mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-24 11:38:46 +01:00
Compare commits
55 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd74756aeb | ||
|
|
46622b0362 | ||
|
|
014ab697fe | ||
|
|
890b6865f3 | ||
|
|
45d38a3c0e | ||
|
|
229830d737 | ||
|
|
b1c6b960a4 | ||
|
|
7cd0a758fe | ||
|
|
a474dddc37 | ||
|
|
cb9052bc88 | ||
|
|
a3b5b80ea7 | ||
|
|
293c05ba3f | ||
|
|
c00695f613 | ||
|
|
b378104777 | ||
|
|
d9fc07a309 | ||
|
|
8cd08af766 | ||
|
|
3ab313b9f1 | ||
|
|
44d8cb8bed | ||
|
|
626f4c7d16 | ||
|
|
b9d3008334 | ||
|
|
b73d20a313 | ||
|
|
191a99c952 | ||
|
|
5fe9514d68 | ||
|
|
aa3f3749f8 | ||
|
|
727e621f1e | ||
|
|
04ed9a4ceb | ||
|
|
0e83eda526 | ||
|
|
4871daae6c | ||
|
|
8db0e57b73 | ||
|
|
80a10d4185 | ||
|
|
29521a9aa4 | ||
|
|
58cccd526e | ||
|
|
265a64409e | ||
|
|
1fe95747c4 | ||
|
|
05e9079233 | ||
|
|
fd42ba766d | ||
|
|
4040dee0ee | ||
|
|
845cfdee6f | ||
|
|
f4d17fff60 | ||
|
|
a9238104c0 | ||
|
|
dc8785ee1b | ||
|
|
ad778e87bb | ||
|
|
273bc30a2a | ||
|
|
38755a4c1e | ||
|
|
1c559fd7ba | ||
|
|
0fc0d10a4e | ||
|
|
5e767a4b9e | ||
|
|
5cee8f84e3 | ||
|
|
93d588c86e | ||
|
|
377eb799ff | ||
|
|
ff11b58816 | ||
|
|
b1625eb8cc | ||
|
|
b8e154f767 | ||
|
|
c4ab0938f9 | ||
|
|
3fe5753f33 |
11
.github/workflows/ci-build.yaml
vendored
11
.github/workflows/ci-build.yaml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release-*'
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
@@ -71,10 +72,10 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.38.0
|
||||
args: --timeout 10m --exclude SA5011
|
||||
version: v1.46.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
@@ -340,7 +341,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.21.2, v1.20.2, v1.19.2, v1.18.9, v1.17.11]
|
||||
k3s-version: [v1.21.2, v1.20.2, v1.19.2]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -400,7 +401,7 @@ jobs:
|
||||
run: |
|
||||
docker pull quay.io/dexidp/dex:v2.25.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:6.2.6-alpine
|
||||
docker pull redis:6.2.7-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
run:
|
||||
timeout: 2m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go"
|
||||
skip-dirs:
|
||||
- pkg/client/
|
||||
- vendor/
|
||||
linters:
|
||||
enable:
|
||||
- vet
|
||||
- deadcode
|
||||
- goimports
|
||||
- varcheck
|
||||
- structcheck
|
||||
- ineffassign
|
||||
- unconvert
|
||||
- unparam
|
||||
linters-settings:
|
||||
goimports:
|
||||
local-prefixes: github.com/argoproj/argo-cd
|
||||
service:
|
||||
golangci-lint-version: 1.21.0
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:21.04
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,7 +1,7 @@
|
||||
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} "
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.6-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.7-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} go run ./cmd/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
@@ -63,14 +64,15 @@ func getPauseGenerationOnFailureForRequests() int {
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
parallelismLimit int64
|
||||
listenPort int
|
||||
metricsPort int
|
||||
cacheSrc func() (*reposervercache.Cache, error)
|
||||
tlsConfigCustomizer tls.ConfigCustomizer
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
redisClient *redis.Client
|
||||
disableTLS bool
|
||||
parallelismLimit int64
|
||||
listenPort int
|
||||
metricsPort int
|
||||
cacheSrc func() (*reposervercache.Cache, error)
|
||||
tlsConfigCustomizer tls.ConfigCustomizer
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
redisClient *redis.Client
|
||||
disableTLS bool
|
||||
maxCombinedDirectoryManifestsSize string
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -90,13 +92,17 @@ func NewCommand() *cobra.Command {
|
||||
cache, err := cacheSrc()
|
||||
errors.CheckError(err)
|
||||
|
||||
maxCombinedDirectoryManifestsQuantity, err := resource.ParseQuantity(maxCombinedDirectoryManifestsSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
cacheutil.CollectMetrics(redisClient, metricsServer)
|
||||
server, err := reposerver.NewServer(metricsServer, cache, tlsConfigCustomizer, repository.RepoServerInitConstants{
|
||||
ParallelismLimit: parallelismLimit,
|
||||
ParallelismLimit: parallelismLimit,
|
||||
PauseGenerationAfterFailedGenerationAttempts: getPauseGenerationAfterFailedGenerationAttempts(),
|
||||
PauseGenerationOnFailureForMinutes: getPauseGenerationOnFailureForMinutes(),
|
||||
PauseGenerationOnFailureForRequests: getPauseGenerationOnFailureForRequests(),
|
||||
MaxCombinedDirectoryManifestsSize: maxCombinedDirectoryManifestsQuantity,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -160,6 +166,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&listenPort, "port", common.DefaultPortRepoServer, "Listen on given port for incoming connections")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port")
|
||||
command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_TLS", false), "Disable TLS on the gRPC endpoint")
|
||||
command.Flags().StringVar(&maxCombinedDirectoryManifestsSize, "max-combined-directory-manifests-size", env.StringFromEnv("ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE", "10M"), "Max combined size of manifest files in a directory-type Application")
|
||||
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
|
||||
@@ -609,7 +609,7 @@ func GenerateToken(clusterOpts cmdutil.ClusterOptions, conf *rest.Config) (strin
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
errors.CheckError(err)
|
||||
|
||||
bearerToken, err := clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount)
|
||||
bearerToken, err := clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount, common.BearerTokenTimeout)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package admin
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -63,7 +64,10 @@ func NewProjectAllowListGenCommand() *cobra.Command {
|
||||
}()
|
||||
}
|
||||
|
||||
globalProj := generateProjectAllowList(clientConfig, clusterRoleFileName, projName)
|
||||
resourceList, err := getResourceList(clientConfig)
|
||||
errors.CheckError(err)
|
||||
globalProj, err := generateProjectAllowList(resourceList, clusterRoleFileName, projName)
|
||||
errors.CheckError(err)
|
||||
|
||||
yamlBytes, err := yaml.Marshal(globalProj)
|
||||
errors.CheckError(err)
|
||||
@@ -78,23 +82,38 @@ func NewProjectAllowListGenCommand() *cobra.Command {
|
||||
return command
|
||||
}
|
||||
|
||||
func generateProjectAllowList(clientConfig clientcmd.ClientConfig, clusterRoleFileName string, projName string) v1alpha1.AppProject {
|
||||
func getResourceList(clientConfig clientcmd.ClientConfig) ([]*metav1.APIResourceList, error) {
|
||||
config, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while creating client config: %s", err)
|
||||
}
|
||||
disco, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while creating discovery client: %s", err)
|
||||
}
|
||||
serverResources, err := disco.ServerPreferredResources()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while getting server resources: %s", err)
|
||||
}
|
||||
return serverResources, nil
|
||||
}
|
||||
|
||||
func generateProjectAllowList(serverResources []*metav1.APIResourceList, clusterRoleFileName string, projName string) (*v1alpha1.AppProject, error) {
|
||||
yamlBytes, err := ioutil.ReadFile(clusterRoleFileName)
|
||||
errors.CheckError(err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading cluster role file: %s", err)
|
||||
}
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
errors.CheckError(err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling cluster role file yaml: %s", err)
|
||||
}
|
||||
|
||||
clusterRole := &rbacv1.ClusterRole{}
|
||||
err = scheme.Scheme.Convert(&obj, clusterRole, nil)
|
||||
errors.CheckError(err)
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
disco, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
errors.CheckError(err)
|
||||
serverResources, err := disco.ServerPreferredResources()
|
||||
errors.CheckError(err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting cluster role yaml into ClusterRole struct: %s", err)
|
||||
}
|
||||
|
||||
resourceList := make([]metav1.GroupKind, 0)
|
||||
for _, rule := range clusterRole.Rules {
|
||||
@@ -140,5 +159,5 @@ func generateProjectAllowList(clientConfig clientcmd.ClientConfig, clusterRoleFi
|
||||
Spec: v1alpha1.AppProjectSpec{},
|
||||
}
|
||||
globalProj.Spec.NamespaceResourceWhitelist = resourceList
|
||||
return globalProj
|
||||
return &globalProj, nil
|
||||
}
|
||||
|
||||
@@ -1,57 +1,20 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/undefinedlabs/go-mpatch"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/discovery"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
func TestProjectAllowListGen(t *testing.T) {
|
||||
useMock := true
|
||||
rules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
overrides := &clientcmd.ConfigOverrides{}
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
|
||||
|
||||
if useMock {
|
||||
var patchClientConfig *mpatch.Patch
|
||||
patchClientConfig, err := mpatch.PatchInstanceMethodByName(reflect.TypeOf(clientConfig), "ClientConfig", func(*clientcmd.DeferredLoadingClientConfig) (*restclient.Config, error) {
|
||||
return nil, nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
patch, err := mpatch.PatchMethod(discovery.NewDiscoveryClientForConfig, func(c *restclient.Config) (*discovery.DiscoveryClient, error) {
|
||||
return &discovery.DiscoveryClient{LegacyPrefix: "/api"}, nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var patchSeverPreferredResources *mpatch.Patch
|
||||
discoClient := &discovery.DiscoveryClient{}
|
||||
patchSeverPreferredResources, err = mpatch.PatchInstanceMethodByName(reflect.TypeOf(discoClient), "ServerPreferredResources", func(*discovery.DiscoveryClient) ([]*metav1.APIResourceList, error) {
|
||||
res := metav1.APIResource{
|
||||
Name: "services",
|
||||
Kind: "Service",
|
||||
}
|
||||
resourceList := []*metav1.APIResourceList{{APIResources: []metav1.APIResource{res}}}
|
||||
return resourceList, nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
err = patchClientConfig.Unpatch()
|
||||
assert.NoError(t, err)
|
||||
err = patch.Unpatch()
|
||||
assert.NoError(t, err)
|
||||
err = patchSeverPreferredResources.Unpatch()
|
||||
err = patch.Unpatch()
|
||||
}()
|
||||
res := metav1.APIResource{
|
||||
Name: "services",
|
||||
Kind: "Service",
|
||||
}
|
||||
resourceList := []*metav1.APIResourceList{{APIResources: []metav1.APIResource{res}}}
|
||||
|
||||
globalProj := generateProjectAllowList(clientConfig, "testdata/test_clusterrole.yaml", "testproj")
|
||||
globalProj, err := generateProjectAllowList(resourceList, "testdata/test_clusterrole.yaml", "testproj")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, len(globalProj.Spec.NamespaceResourceWhitelist) > 0)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -766,7 +767,7 @@ func getLocalObjectsString(app *argoappv1.Application, local, localRepoRoot, app
|
||||
KubeVersion: kubeVersion,
|
||||
Plugins: configManagementPlugins,
|
||||
TrackingMethod: trackingMethod,
|
||||
}, true)
|
||||
}, true, resource.MustParse("0"))
|
||||
errors.CheckError(err)
|
||||
|
||||
return res.Manifests
|
||||
|
||||
@@ -122,9 +122,17 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
errors.CheckError(err)
|
||||
if clusterOpts.ServiceAccount != "" {
|
||||
managerBearerToken, err = clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount)
|
||||
managerBearerToken, err = clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount, common.BearerTokenTimeout)
|
||||
} else {
|
||||
managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, clusterOpts.SystemNamespace, clusterOpts.Namespaces)
|
||||
isTerminal := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
|
||||
|
||||
if isTerminal && !skipConfirmation {
|
||||
message := fmt.Sprintf("WARNING: This will create a service account `argocd-manager` on the cluster referenced by context `%s` with full cluster level admin privileges. Do you want to continue [y/N]? ", contextName)
|
||||
if !cli.AskToProceed(message) {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, clusterOpts.SystemNamespace, clusterOpts.Namespaces, common.BearerTokenTimeout)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
@@ -202,7 +202,10 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo
|
||||
// completionChan is to signal flow completed. Non-empty string indicates error
|
||||
completionChan := make(chan string)
|
||||
// stateNonce is an OAuth2 state nonce
|
||||
stateNonce := rand.RandString(10)
|
||||
// According to the spec (https://www.rfc-editor.org/rfc/rfc6749#section-10.10), this must be guessable with
|
||||
// probability <= 2^(-128). The following call generates one of 52^24 random strings, ~= 2^136 possibilities.
|
||||
stateNonce, err := rand.String(24)
|
||||
errors.CheckError(err)
|
||||
var tokenString string
|
||||
var refreshToken string
|
||||
|
||||
@@ -212,7 +215,8 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo
|
||||
}
|
||||
|
||||
// PKCE implementation of https://tools.ietf.org/html/rfc7636
|
||||
codeVerifier := rand.RandStringCharset(43, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~")
|
||||
codeVerifier, err := rand.StringFromCharset(43, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~")
|
||||
errors.CheckError(err)
|
||||
codeChallengeHash := sha256.Sum256([]byte(codeVerifier))
|
||||
codeChallenge := base64.RawURLEncoding.EncodeToString(codeChallengeHash[:])
|
||||
|
||||
@@ -296,7 +300,8 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo
|
||||
opts = append(opts, oauth2.SetAuthURLParam("code_challenge_method", "S256"))
|
||||
url = oauth2conf.AuthCodeURL(stateNonce, opts...)
|
||||
case oidcutil.GrantTypeImplicit:
|
||||
url = oidcutil.ImplicitFlowURL(oauth2conf, stateNonce, opts...)
|
||||
url, err = oidcutil.ImplicitFlowURL(oauth2conf, stateNonce, opts...)
|
||||
errors.CheckError(err)
|
||||
default:
|
||||
log.Fatalf("Unsupported grant type: %v", grantType)
|
||||
}
|
||||
|
||||
@@ -203,6 +203,12 @@ const (
|
||||
CacheVersion = "1.8.3"
|
||||
)
|
||||
|
||||
// Constants used by util/clusterauth package
|
||||
const (
|
||||
ClusterAuthRequestTimeout = 10 * time.Second
|
||||
BearerTokenTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultGitRetryMaxDuration time.Duration = time.Second * 5 // 5s
|
||||
DefaultGitRetryDuration time.Duration = time.Millisecond * 250 // 0.25s
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
apiruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -408,8 +409,12 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
},
|
||||
})
|
||||
} else {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) bool {
|
||||
if !proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination) {
|
||||
return false
|
||||
}
|
||||
nodes = append(nodes, child)
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -419,16 +424,18 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
orphanedNodes := make([]appv1.ResourceNode, 0)
|
||||
for k := range orphanedNodesMap {
|
||||
if k.Namespace != "" && proj.IsGroupKindPermitted(k.GroupKind(), true) && !isKnownOrphanedResourceExclusion(k, proj) {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, k, func(child appv1.ResourceNode, appName string) {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, k, func(child appv1.ResourceNode, appName string) bool {
|
||||
belongToAnotherApp := false
|
||||
if appName != "" {
|
||||
if _, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName); exists && err == nil {
|
||||
belongToAnotherApp = true
|
||||
}
|
||||
}
|
||||
if !belongToAnotherApp {
|
||||
orphanedNodes = append(orphanedNodes, child)
|
||||
if belongToAnotherApp || !proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination) {
|
||||
return false
|
||||
}
|
||||
orphanedNodes = append(orphanedNodes, child)
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1258,6 +1265,13 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
app.Status.Sync.Status = appv1.SyncStatusCodeUnknown
|
||||
app.Status.Health.Status = health.HealthStatusUnknown
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
|
||||
if err := ctrl.cache.SetAppResourcesTree(app.Name, &appv1.ApplicationTree{}); err != nil {
|
||||
log.Warnf("failed to set app resource tree: %v", err)
|
||||
}
|
||||
if err := ctrl.cache.SetAppManagedResources(app.Name, nil); err != nil {
|
||||
log.Warnf("failed to set app managed resources tree: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -136,12 +136,12 @@ func newFakeController(data *fakeData) *ApplicationController {
|
||||
mockStateCache.On("GetClusterCache", mock.Anything).Return(&clusterCacheMock, nil)
|
||||
mockStateCache.On("IterateHierarchy", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||
key := args[1].(kube.ResourceKey)
|
||||
action := args[2].(func(child argoappv1.ResourceNode, appName string))
|
||||
action := args[2].(func(child argoappv1.ResourceNode, appName string) bool)
|
||||
appName := ""
|
||||
if res, ok := data.namespacedResources[key]; ok {
|
||||
appName = res.AppName
|
||||
}
|
||||
action(argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Kind: key.Kind, Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName)
|
||||
_ = action(argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Kind: key.Kind, Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName)
|
||||
}).Return(nil)
|
||||
return ctrl
|
||||
}
|
||||
|
||||
8
controller/cache/cache.go
vendored
8
controller/cache/cache.go
vendored
@@ -79,7 +79,7 @@ type LiveStateCache interface {
|
||||
// Returns synced cluster cache
|
||||
GetClusterCache(server string) (clustercache.ClusterCache, error)
|
||||
// Executes give callback against resource specified by the key and all its children
|
||||
IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error
|
||||
IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
|
||||
// Returns state of live nodes which correspond for target nodes of specified application.
|
||||
GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
|
||||
// IterateResources iterates all resource stored in cache
|
||||
@@ -397,13 +397,13 @@ func (c *liveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool,
|
||||
return clusterInfo.IsNamespaced(gk)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error {
|
||||
func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterInfo.IterateHierarchy(key, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) {
|
||||
action(asResourceNode(resource), getApp(resource, namespaceResources))
|
||||
clusterInfo.IterateHierarchy(key, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) bool {
|
||||
return action(asResourceNode(resource), getApp(resource, namespaceResources))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
4
controller/cache/mocks/LiveStateCache.go
vendored
4
controller/cache/mocks/LiveStateCache.go
vendored
@@ -176,11 +176,11 @@ func (_m *LiveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool
|
||||
}
|
||||
|
||||
// IterateHierarchy provides a mock function with given fields: server, key, action
|
||||
func (_m *LiveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(v1alpha1.ResourceNode, string)) error {
|
||||
func (_m *LiveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(v1alpha1.ResourceNode, string) bool) error {
|
||||
ret := _m.Called(server, key, action)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, kube.ResourceKey, func(v1alpha1.ResourceNode, string)) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(string, kube.ResourceKey, func(v1alpha1.ResourceNode, string) bool) error); ok {
|
||||
r0 = rf(server, key, action)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
||||
@@ -137,7 +137,13 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
|
||||
atomic.AddUint64(&syncIdPrefix, 1)
|
||||
syncId := fmt.Sprintf("%05d-%s", syncIdPrefix, rand.RandString(5))
|
||||
randSuffix, err := rand.String(5)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed generate random sync ID: %v", err)
|
||||
return
|
||||
}
|
||||
syncId := fmt.Sprintf("%05d-%s", syncIdPrefix, randSuffix)
|
||||
|
||||
logEntry := log.WithFields(log.Fields{"application": app.Name, "syncId": syncId})
|
||||
initialResourcesRes := make([]common.ResourceSyncResult, 0)
|
||||
|
||||
@@ -24,8 +24,7 @@ You will need at least the following things in your toolchain in order to develo
|
||||
|
||||
* A Kubernetes cluster. You won't need a fully blown multi-master, multi-node cluster, but you will need something like K3S, Minikube or microk8s. You will also need a working Kubernetes client (`kubectl`) configuration in your development environment. The configuration must reside in `~/.kube/config` and the API server URL must point to the IP address of your local machine (or VM), and **not** to `localhost` or `127.0.0.1` if you are using the virtualized development toolchain (see below)
|
||||
|
||||
* You will also need a working Docker runtime environment, to be able to build and run images.
|
||||
The Docker version must be fairly recent, and support multi-stage builds. You should not work as root. Make your local user a member of the `docker` group to be able to control the Docker service on your machine.
|
||||
* You will also need a working Docker runtime environment, to be able to build and run images. The Docker version must be 17.05.0 or higher, to support multi-stage builds.
|
||||
|
||||
* Obviously, you will need a `git` client for pulling source code and pushing back your changes.
|
||||
|
||||
|
||||
@@ -242,3 +242,9 @@ data:
|
||||
# published to the repository. Reconciliation by timeout is disabled if timeout is set to 0. Three minutes by default.
|
||||
# > Note: argocd-repo-server deployment must be manually restarted after changing the setting.
|
||||
timeout.reconciliation: 180s
|
||||
|
||||
# oidc.tls.insecure.skip.verify determines whether certificate verification is skipped when verifying tokens with the
|
||||
# configured OIDC provider (either external or the bundled Dex instance). Setting this to "true" will cause JWT
|
||||
# token verification to pass despite the OIDC provider having an invalid certificate. Only set to "true" if you
|
||||
# understand the risks.
|
||||
oidc.tls.insecure.skip.verify: "false"
|
||||
|
||||
@@ -103,4 +103,8 @@ data:
|
||||
reposerver.repo.cache.expiration: "24h0m0s"
|
||||
# Cache expiration default (default 24h0m0s)
|
||||
reposerver.default.cache.expiration: "24h0m0s"
|
||||
|
||||
# Max combined manifest file size for a single directory-type Application. In-memory manifest representation may be as
|
||||
# much as 300x the manifest file size. Limit this to stay within the memory limits of the repo-server while allowing
|
||||
# for 300x memory expansion and N Applications running at the same time.
|
||||
# (example 10M max * 300 expansion * 10 Apps = 30G max theoretical memory usage).
|
||||
reposerver.max.combined.directory.manifests.size: '10M'
|
||||
|
||||
@@ -205,4 +205,45 @@ Argo CD logs payloads of most API requests except request that are considered se
|
||||
can be found in [server/server.go](https://github.com/argoproj/argo-cd/blob/abba8dddce8cd897ba23320e3715690f465b4a95/server/server.go#L516).
|
||||
|
||||
Argo CD does not log IP addresses of clients requesting API endpoints, since the API server is typically behind a proxy. Instead, it is recommended
|
||||
to configure IP addresses logging in the proxy server that sits in front of the API server.
|
||||
to configure IP addresses logging in the proxy server that sits in front of the API server.
|
||||
|
||||
## Limiting Directory App Memory Usage
|
||||
|
||||
> >2.2.10, 2.1.16, >2.3.5
|
||||
|
||||
Directory-type Applications (those whose source is raw JSON or YAML files) can consume significant
|
||||
[repo-server](architecture.md#repository-server) memory, depending on the size and structure of the YAML files.
|
||||
|
||||
To avoid over-using memory in the repo-server (potentially causing a crash and denial of service), set the
|
||||
`reposerver.max.combined.directory.manifests.size` config option in [argocd-cmd-params-cm](argocd-cmd-params-cm.yaml).
|
||||
|
||||
This option limits the combined size of all JSON or YAML files in an individual app. Note that the in-memory
|
||||
representation of a manifest may be as much as 300x the size of the manifest on disk. Also note that the limit is per
|
||||
Application. If manifests are generated for multiple applications at once, memory usage will be higher.
|
||||
|
||||
**Example:**
|
||||
|
||||
Suppose your repo-server has a 10G memory limit, and you have ten Applications which use raw JSON or YAML files. To
|
||||
calculate the max safe combined file size per Application, divide 10G by 300 * 10 Apps (300 being the worst-case memory
|
||||
growth factor for the manifests).
|
||||
|
||||
```
|
||||
10G / 300 * 10 = 3M
|
||||
```
|
||||
|
||||
So a reasonably safe configuration for this setup would be a 3M limit per app.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
reposerver.max.combined.directory.manifests.size: '3M'
|
||||
```
|
||||
|
||||
The 300x ratio assumes a maliciously-crafted manifest file. If you only want to protect against accidental excessive
|
||||
memory use, it is probably safe to use a smaller ratio.
|
||||
|
||||
Keep in mind that if a malicious user can create additional Applications, they can increase the total memory usage.
|
||||
Grant [App creation privileges](rbac.md) carefully.
|
||||
|
||||
@@ -13,27 +13,28 @@ argocd-repo-server [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--disable-tls Disable TLS on the gRPC endpoint
|
||||
-h, --help help for argocd-repo-server
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--metrics-port int Start metrics server on given port (default 8084)
|
||||
--parallelismlimit int Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.
|
||||
--port int Listen on given port for incoming connections (default 8081)
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-cache-expiration duration Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data (default 24h0m0s)
|
||||
--revision-cache-expiration duration Cache expiration for cached revision (default 3m0s)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--tlsciphers string The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384")
|
||||
--tlsmaxversion string The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3")
|
||||
--tlsminversion string The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2")
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--disable-tls Disable TLS on the gRPC endpoint
|
||||
-h, --help help for argocd-repo-server
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--max-combined-directory-manifests-size string Max combined size of manifest files in a directory-type Application (default "10M")
|
||||
--metrics-port int Start metrics server on given port (default 8084)
|
||||
--parallelismlimit int Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.
|
||||
--port int Listen on given port for incoming connections (default 8081)
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-cache-expiration duration Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data (default 24h0m0s)
|
||||
--revision-cache-expiration duration Cache expiration for cached revision (default 3m0s)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--tlsciphers string The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384")
|
||||
--tlsmaxversion string The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3")
|
||||
--tlsminversion string The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2")
|
||||
```
|
||||
|
||||
|
||||
@@ -14,3 +14,76 @@ Note that bundled Helm has been upgraded from 3.6.0 to v3.7+. This includes foll
|
||||
- Experimental OCI support has been rewritten.
|
||||
|
||||
More information in the [Helm v3.7.0 release notes](https://github.com/helm/helm/releases/tag/v3.7.0).
|
||||
|
||||
## Support for private repo SSH keys using the SHA-1 signature hash algorithm is removed in 2.2.12
|
||||
|
||||
Argo CD 2.2.12 upgraded its base image from Ubuntu 21.10 to Ubuntu 22.04, which upgraded OpenSSH to 8.9. OpenSSH starting
|
||||
with 8.8 [dropped support for the `ssh-rsa` SHA-1 key signature algorithm](https://www.openssh.com/txt/release-8.8).
|
||||
|
||||
The signature algorithm is _not_ the same as the algorithm used when generating the key. There is no need to update
|
||||
keys.
|
||||
|
||||
The signature algorithm is negotiated with the SSH server when the connection is being set up. The client offers its
|
||||
list of accepted signature algorithms, and if the server has a match, the connection proceeds. For most SSH servers on
|
||||
up-to-date git providers, acceptable algorithms other than `ssh-rsa` should be available.
|
||||
|
||||
Before upgrading to Argo CD 2.2.12, check whether your git provider(s) using SSH authentication support algorithms newer
|
||||
than `rsa-ssh`.
|
||||
|
||||
1. Make sure your version of SSH >= 8.9 (the version used by Argo CD). If not, upgrade it before proceeding.
|
||||
|
||||
```shell
|
||||
ssh -V
|
||||
```
|
||||
|
||||
Example output: `OpenSSH_8.9p1 Ubuntu-3, OpenSSL 3.0.2 15 Mar 2022`
|
||||
|
||||
2. Once you have a recent version of OpenSSH, follow the directions from the [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.7):
|
||||
|
||||
> To check whether a server is using the weak ssh-rsa public key
|
||||
> algorithm, for host authentication, try to connect to it after
|
||||
> removing the ssh-rsa algorithm from ssh(1)'s allowed list:
|
||||
>
|
||||
> ```shell
|
||||
> ssh -oHostKeyAlgorithms=-ssh-rsa user@host
|
||||
> ```
|
||||
>
|
||||
> If the host key verification fails and no other supported host key
|
||||
> types are available, the server software on that host should be
|
||||
> upgraded.
|
||||
|
||||
If the server does not support an acceptable version, you will get an error similar to this;
|
||||
|
||||
```
|
||||
$ ssh -oHostKeyAlgorithms=-ssh-rsa vs-ssh.visualstudio.com
|
||||
Unable to negotiate with 20.42.134.1 port 22: no matching host key type found. Their offer: ssh-rsa
|
||||
```
|
||||
|
||||
This indicates that the server needs to update its supported key signature algorithms, and Argo CD will not connect
|
||||
to it.
|
||||
|
||||
### Workaround
|
||||
|
||||
The [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.8) describe a workaround if you cannot change the
|
||||
server's key signature algorithms configuration.
|
||||
|
||||
> Incompatibility is more likely when connecting to older SSH
|
||||
> implementations that have not been upgraded or have not closely tracked
|
||||
> improvements in the SSH protocol. For these cases, it may be necessary
|
||||
> to selectively re-enable RSA/SHA1 to allow connection and/or user
|
||||
> authentication via the HostkeyAlgorithms and PubkeyAcceptedAlgorithms
|
||||
> options. For example, the following stanza in ~/.ssh/config will enable
|
||||
> RSA/SHA1 for host and user authentication for a single destination host:
|
||||
>
|
||||
> ```
|
||||
> Host old-host
|
||||
> HostkeyAlgorithms +ssh-rsa
|
||||
> PubkeyAcceptedAlgorithms +ssh-rsa
|
||||
> ```
|
||||
>
|
||||
> We recommend enabling RSA/SHA1 only as a stopgap measure until legacy
|
||||
> implementations can be upgraded or reconfigured with another key type
|
||||
> (such as ECDSA or Ed25519).
|
||||
|
||||
To apply this to Argo CD, you could create a ConfigMap with the desired ssh config file and then mount it at
|
||||
`/home/argocd/.ssh/config`.
|
||||
|
||||
@@ -373,6 +373,21 @@ You are not required to specify a logoutRedirectURL as this is automatically gen
|
||||
!!! note
|
||||
The post logout redirect URI may need to be whitelisted against your OIDC provider's client settings for ArgoCD.
|
||||
|
||||
### Configuring a custom root CA certificate for communicating with the OIDC provider
|
||||
|
||||
If your OIDC provider is setup with a certificate which is not signed by one of the well known certificate authorities
|
||||
you can provide a custom certificate which will be used in verifying the OIDC provider's TLS certificate when
|
||||
communicating with it.
|
||||
Add a `rootCA` to your `oidc.config` which contains the PEM encoded root certificate:
|
||||
|
||||
```yaml
|
||||
oidc.config: |
|
||||
...
|
||||
rootCA: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
... encoded certificate data here ...
|
||||
-----END CERTIFICATE-----
|
||||
```
|
||||
|
||||
|
||||
## SSO Further Reading
|
||||
@@ -477,3 +492,20 @@ data:
|
||||
clientSecret: $another-secret:oidc.auth0.clientSecret # Mind the ':'
|
||||
...
|
||||
```
|
||||
|
||||
### Skipping certificate verification on OIDC provider connections
|
||||
|
||||
By default, all connections made by the API server to OIDC providers (either external providers or the bundled Dex
|
||||
instance) must pass certificate validation. These connections occur when getting the OIDC provider's well-known
|
||||
configuration, when getting the OIDC provider's keys, and when exchanging an authorization code or verifying an ID
|
||||
token as part of an OIDC login flow.
|
||||
|
||||
Disabling certificate verification might make sense if:
|
||||
* You are using the bundled Dex instance **and** your Argo CD instance has TLS configured with a self-signed certificate
|
||||
**and** you understand and accept the risks of skipping OIDC provider cert verification.
|
||||
* You are using an external OIDC provider **and** that provider uses an invalid certificate **and** you cannot solve
|
||||
the problem by setting `oidcConfig.rootCA` **and** you understand and accept the risks of skipping OIDC provider cert
|
||||
verification.
|
||||
|
||||
If either of those two applies, then you can disable OIDC provider certificate verification by setting
|
||||
`oidc.tls.insecure.skip.verify` to `"true"` in the `argocd-cm` ConfigMap.
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
mkdocs==1.1.2
|
||||
mkdocs==1.2.3
|
||||
mkdocs-material==7.1.7
|
||||
markdown_include==0.6.0
|
||||
pygments==2.7.4
|
||||
pygments==2.7.4
|
||||
jinja2==3.0.3
|
||||
markdown==3.3.7
|
||||
13
go.mod
13
go.mod
@@ -8,7 +8,7 @@ require (
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||
github.com/alicebob/miniredis/v2 v2.14.2
|
||||
github.com/argoproj/gitops-engine v0.5.2
|
||||
github.com/argoproj/gitops-engine v0.5.5
|
||||
github.com/argoproj/pkg v0.11.1-0.20211203175135-36c59d8fafe0
|
||||
github.com/bombsimon/logrusr v1.0.0
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.2
|
||||
@@ -61,11 +61,10 @@ require (
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/undefinedlabs/go-mpatch v1.0.6
|
||||
github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c
|
||||
@@ -75,6 +74,7 @@ require (
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apiextensions-apiserver v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
k8s.io/apiserver v0.22.2
|
||||
k8s.io/client-go v0.22.2
|
||||
k8s.io/code-generator v0.22.2
|
||||
k8s.io/component-base v0.22.2
|
||||
@@ -95,6 +95,9 @@ replace (
|
||||
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.15.0
|
||||
|
||||
// Avoid CVE-2022-28948
|
||||
gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
|
||||
|
||||
k8s.io/api => k8s.io/api v0.22.2
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.2
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.2
|
||||
|
||||
26
go.sum
26
go.sum
@@ -103,8 +103,8 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/argoproj/gitops-engine v0.5.2 h1:UQ2ajVyUPCSgFyqidzlTXddh/Xf6cE3I0s9uu92BoJg=
|
||||
github.com/argoproj/gitops-engine v0.5.2/go.mod h1:K2RYpGXh11VdFwDksS23SyFTOJaPcsF+MVJ/FHlqEOE=
|
||||
github.com/argoproj/gitops-engine v0.5.5 h1:ac6mKIncPzT/f3CH9+55ETqEsC+Z2lVDDz2Gbtvt8KE=
|
||||
github.com/argoproj/gitops-engine v0.5.5/go.mod h1:K2RYpGXh11VdFwDksS23SyFTOJaPcsF+MVJ/FHlqEOE=
|
||||
github.com/argoproj/pkg v0.11.1-0.20211203175135-36c59d8fafe0 h1:Cfp7rO/HpVxnwlRqJe0jHiBbZ77ZgXhB6HWlYD02Xdc=
|
||||
github.com/argoproj/pkg v0.11.1-0.20211203175135-36c59d8fafe0/go.mod h1:ra+bQPmbVAoEL+gYSKesuigt4m49i3Qa3mE/xQcjCiA=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@@ -840,8 +840,6 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/undefinedlabs/go-mpatch v1.0.6 h1:h8q5ORH/GaOE1Se1DMhrOyljXZEhRcROO7agMqWXCOY=
|
||||
github.com/undefinedlabs/go-mpatch v1.0.6/go.mod h1:TyJZDQ/5AgyN7FSLiBJ8RO9u2c6wbtRvK827b6AVqY4=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
@@ -942,8 +940,8 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1033,19 +1031,19 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1337,10 +1335,8 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.38.0
|
||||
GO111MODULE=on go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2
|
||||
|
||||
@@ -28,7 +28,7 @@ spec:
|
||||
name: dexconfig
|
||||
containers:
|
||||
- name: dex
|
||||
image: ghcr.io/dexidp/dex:v2.30.2
|
||||
image: ghcr.io/dexidp/dex:v2.32.0
|
||||
imagePullPolicy: Always
|
||||
command: [/shared/argocd-dex, rundex]
|
||||
securityContext:
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.2.6
|
||||
newTag: v2.2.12
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -21,7 +21,7 @@ spec:
|
||||
serviceAccountName: argocd-redis
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--save"
|
||||
|
||||
@@ -98,6 +98,12 @@ spec:
|
||||
name: argocd-cmd-params-cm
|
||||
key: reposerver.default.cache.expiration
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: reposerver.max.combined.directory.manifests.size
|
||||
optional: true
|
||||
- name: HELM_CACHE_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_CONFIG_HOME
|
||||
|
||||
@@ -2890,7 +2890,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -3012,13 +3012,19 @@ spec:
|
||||
key: reposerver.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.max.combined.directory.manifests.size
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: HELM_CACHE_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_CONFIG_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -3067,7 +3073,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -3232,7 +3238,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -11,4 +11,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.2.6
|
||||
newTag: v2.2.12
|
||||
|
||||
@@ -11,7 +11,7 @@ patchesStrategicMerge:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.2.6
|
||||
newTag: v2.2.12
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/dex
|
||||
|
||||
@@ -770,7 +770,7 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: haproxy:2.0.25-alpine
|
||||
image: haproxy:2.0.29-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
{}
|
||||
@@ -790,7 +790,7 @@ spec:
|
||||
runAsUser: 1000
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy:2.0.25-alpine
|
||||
image: haproxy:2.0.29-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -878,7 +878,7 @@ spec:
|
||||
automountServiceAccountToken: false
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
{}
|
||||
@@ -906,7 +906,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-server
|
||||
@@ -947,7 +947,7 @@ spec:
|
||||
lifecycle:
|
||||
{}
|
||||
- name: sentinel
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-sentinel
|
||||
|
||||
@@ -9,12 +9,12 @@ redis-ha:
|
||||
haproxy:
|
||||
enabled: true
|
||||
image:
|
||||
tag: 2.0.25-alpine
|
||||
tag: 2.0.29-alpine
|
||||
timeout:
|
||||
server: 6m
|
||||
client: 6m
|
||||
checkInterval: 3s
|
||||
image:
|
||||
tag: 6.2.6-alpine
|
||||
tag: 6.2.7-alpine
|
||||
sentinel:
|
||||
bind: "0.0.0.0"
|
||||
|
||||
@@ -3687,7 +3687,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.30.2
|
||||
image: ghcr.io/dexidp/dex:v2.32.0
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -3709,7 +3709,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -3756,7 +3756,7 @@ spec:
|
||||
app.kubernetes.io/name: argocd-redis-ha-haproxy
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- image: haproxy:2.0.25-alpine
|
||||
- image: haproxy:2.0.29-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -3785,7 +3785,7 @@ spec:
|
||||
- /readonly/haproxy_init.sh
|
||||
command:
|
||||
- sh
|
||||
image: haproxy:2.0.25-alpine
|
||||
image: haproxy:2.0.29-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
@@ -3920,13 +3920,19 @@ spec:
|
||||
key: reposerver.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.max.combined.directory.manifests.size
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: HELM_CACHE_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_CONFIG_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -3975,7 +3981,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -4202,7 +4208,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -4398,7 +4404,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -4480,7 +4486,7 @@ spec:
|
||||
- /data/conf/redis.conf
|
||||
command:
|
||||
- redis-server
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -4518,7 +4524,7 @@ spec:
|
||||
- /data/conf/sentinel.conf
|
||||
command:
|
||||
- redis-sentinel
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -4564,7 +4570,7 @@ spec:
|
||||
value: 40000915ab58c3fa8fd888fb8b24711944e6cbb4
|
||||
- name: SENTINEL_ID_2
|
||||
value: 2bbec7894d954a8af3bb54d13eaec53cb024e2ca
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
|
||||
@@ -1046,7 +1046,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.30.2
|
||||
image: ghcr.io/dexidp/dex:v2.32.0
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -1068,7 +1068,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -1115,7 +1115,7 @@ spec:
|
||||
app.kubernetes.io/name: argocd-redis-ha-haproxy
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- image: haproxy:2.0.25-alpine
|
||||
- image: haproxy:2.0.29-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -1144,7 +1144,7 @@ spec:
|
||||
- /readonly/haproxy_init.sh
|
||||
command:
|
||||
- sh
|
||||
image: haproxy:2.0.25-alpine
|
||||
image: haproxy:2.0.29-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
@@ -1279,13 +1279,19 @@ spec:
|
||||
key: reposerver.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.max.combined.directory.manifests.size
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: HELM_CACHE_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_CONFIG_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1334,7 +1340,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -1561,7 +1567,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1757,7 +1763,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1839,7 +1845,7 @@ spec:
|
||||
- /data/conf/redis.conf
|
||||
command:
|
||||
- redis-server
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -1877,7 +1883,7 @@ spec:
|
||||
- /data/conf/sentinel.conf
|
||||
command:
|
||||
- redis-sentinel
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -1923,7 +1929,7 @@ spec:
|
||||
value: 40000915ab58c3fa8fd888fb8b24711944e6cbb4
|
||||
- name: SENTINEL_ID_2
|
||||
value: 2bbec7894d954a8af3bb54d13eaec53cb024e2ca
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
|
||||
@@ -3057,7 +3057,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.30.2
|
||||
image: ghcr.io/dexidp/dex:v2.32.0
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -3079,7 +3079,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -3132,7 +3132,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -3254,13 +3254,19 @@ spec:
|
||||
key: reposerver.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.max.combined.directory.manifests.size
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: HELM_CACHE_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_CONFIG_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -3309,7 +3315,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -3532,7 +3538,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3722,7 +3728,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -416,7 +416,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.30.2
|
||||
image: ghcr.io/dexidp/dex:v2.32.0
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -438,7 +438,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -491,7 +491,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.6-alpine
|
||||
image: redis:6.2.7-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -613,13 +613,19 @@ spec:
|
||||
key: reposerver.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.max.combined.directory.manifests.size
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: HELM_CACHE_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_CONFIG_HOME
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -668,7 +674,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -891,7 +897,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1081,7 +1087,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.2.6
|
||||
image: quay.io/argoproj/argocd:v2.2.12
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -100,7 +100,11 @@ func (c *client) executeRequest(fullMethodName string, msg []byte, md metadata.M
|
||||
}
|
||||
|
||||
func (c *client) startGRPCProxy() (*grpc.Server, net.Listener, error) {
|
||||
serverAddr := fmt.Sprintf("%s/argocd-%s.sock", os.TempDir(), rand.RandString(16))
|
||||
randSuffix, err := rand.String(16)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate random socket filename: %w", err)
|
||||
}
|
||||
serverAddr := fmt.Sprintf("%s/argocd-%s.sock", os.TempDir(), randSuffix)
|
||||
ln, err := net.Listen("unix", serverAddr)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -313,11 +313,15 @@ func (proj AppProject) IsGroupKindPermitted(gk schema.GroupKind, namespaced bool
|
||||
|
||||
// IsLiveResourcePermitted returns whether a live resource found in the cluster is permitted by an AppProject
|
||||
func (proj AppProject) IsLiveResourcePermitted(un *unstructured.Unstructured, server string, name string) bool {
|
||||
if !proj.IsGroupKindPermitted(un.GroupVersionKind().GroupKind(), un.GetNamespace() != "") {
|
||||
return proj.IsResourcePermitted(un.GroupVersionKind().GroupKind(), un.GetNamespace(), ApplicationDestination{Server: server, Name: name})
|
||||
}
|
||||
|
||||
func (proj AppProject) IsResourcePermitted(groupKind schema.GroupKind, namespace string, dest ApplicationDestination) bool {
|
||||
if !proj.IsGroupKindPermitted(groupKind, namespace != "") {
|
||||
return false
|
||||
}
|
||||
if un.GetNamespace() != "" {
|
||||
return proj.IsDestinationPermitted(ApplicationDestination{Server: server, Namespace: un.GetNamespace(), Name: name})
|
||||
if namespace != "" {
|
||||
return proj.IsDestinationPermitted(ApplicationDestination{Server: dest.Server, Name: dest.Name, Namespace: namespace})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
goio "io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -16,6 +17,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
kubeyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/io/files"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
@@ -66,6 +73,8 @@ const (
|
||||
ociPrefix = "oci://"
|
||||
)
|
||||
|
||||
var ErrExceededMaxCombinedManifestFileSize = errors.New("exceeded max combined manifest file size")
|
||||
|
||||
// Service implements ManifestService interface
|
||||
type Service struct {
|
||||
repoLock *repositoryLock
|
||||
@@ -85,6 +94,7 @@ type RepoServerInitConstants struct {
|
||||
PauseGenerationAfterFailedGenerationAttempts int
|
||||
PauseGenerationOnFailureForMinutes int
|
||||
PauseGenerationOnFailureForRequests int
|
||||
MaxCombinedDirectoryManifestsSize resource.Quantity
|
||||
}
|
||||
|
||||
// NewService returns a new instance of the Manifest service
|
||||
@@ -329,7 +339,7 @@ func (s *Service) runManifestGen(repoRoot, commitSHA, cacheKey string, ctxSrc op
|
||||
var manifestGenResult *apiclient.ManifestResponse
|
||||
ctx, err := ctxSrc()
|
||||
if err == nil {
|
||||
manifestGenResult, err = GenerateManifests(ctx.appPath, repoRoot, commitSHA, q, false)
|
||||
manifestGenResult, err = GenerateManifests(ctx.appPath, repoRoot, commitSHA, q, false, s.initConstants.MaxCombinedDirectoryManifestsSize)
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -704,7 +714,7 @@ func getRepoCredential(repoCredentials []*v1alpha1.RepoCreds, repoURL string) *v
|
||||
}
|
||||
|
||||
// GenerateManifests generates manifests from a path
|
||||
func GenerateManifests(appPath, repoRoot, revision string, q *apiclient.ManifestRequest, isLocal bool) (*apiclient.ManifestResponse, error) {
|
||||
func GenerateManifests(appPath, repoRoot, revision string, q *apiclient.ManifestRequest, isLocal bool, maxCombinedManifestQuantity resource.Quantity) (*apiclient.ManifestResponse, error) {
|
||||
var targetObjs []*unstructured.Unstructured
|
||||
var dest *v1alpha1.ApplicationDestination
|
||||
|
||||
@@ -752,7 +762,8 @@ func GenerateManifests(appPath, repoRoot, revision string, q *apiclient.Manifest
|
||||
if directory = q.ApplicationSource.Directory; directory == nil {
|
||||
directory = &v1alpha1.ApplicationSourceDirectory{}
|
||||
}
|
||||
targetObjs, err = findManifests(appPath, repoRoot, env, *directory)
|
||||
logCtx := log.WithField("application", q.AppName)
|
||||
targetObjs, err = findManifests(logCtx, appPath, repoRoot, env, *directory, maxCombinedManifestQuantity)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -950,44 +961,27 @@ func ksShow(appLabelKey, appPath string, ksonnetOpts *v1alpha1.ApplicationSource
|
||||
var manifestFile = regexp.MustCompile(`^.*\.(yaml|yml|json|jsonnet)$`)
|
||||
|
||||
// findManifests looks at all yaml files in a directory and unmarshals them into a list of unstructured objects
|
||||
func findManifests(appPath string, repoRoot string, env *v1alpha1.Env, directory v1alpha1.ApplicationSourceDirectory) ([]*unstructured.Unstructured, error) {
|
||||
func findManifests(logCtx *log.Entry, appPath string, repoRoot string, env *v1alpha1.Env, directory v1alpha1.ApplicationSourceDirectory, maxCombinedManifestQuantity resource.Quantity) ([]*unstructured.Unstructured, error) {
|
||||
// Validate the directory before loading any manifests to save memory.
|
||||
potentiallyValidManifests, err := getPotentiallyValidManifests(logCtx, appPath, repoRoot, directory.Recurse, directory.Include, directory.Exclude, maxCombinedManifestQuantity)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to get potentially valid manifests: %s", err)
|
||||
return nil, fmt.Errorf("failed to get potentially valid manifests: %w", err)
|
||||
}
|
||||
|
||||
var objs []*unstructured.Unstructured
|
||||
err := filepath.Walk(appPath, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.IsDir() {
|
||||
if path != appPath && !directory.Recurse {
|
||||
return filepath.SkipDir
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
for _, potentiallyValidManifest := range potentiallyValidManifests {
|
||||
manifestPath := potentiallyValidManifest.path
|
||||
manifestFileInfo := potentiallyValidManifest.fileInfo
|
||||
|
||||
if !manifestFile.MatchString(f.Name()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(appPath, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if directory.Exclude != "" && glob.Match(directory.Exclude, relPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if directory.Include != "" && !glob.Match(directory.Include, relPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if strings.HasSuffix(f.Name(), ".jsonnet") {
|
||||
if strings.HasSuffix(manifestFileInfo.Name(), ".jsonnet") {
|
||||
vm, err := makeJsonnetVm(appPath, repoRoot, directory.Jsonnet, env)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
jsonStr, err := vm.EvaluateFile(path)
|
||||
jsonStr, err := vm.EvaluateFile(manifestPath)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to evaluate jsonnet %q: %v", f.Name(), err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "Failed to evaluate jsonnet %q: %v", manifestFileInfo.Name(), err)
|
||||
}
|
||||
|
||||
// attempt to unmarshal either array or single object
|
||||
@@ -999,49 +993,207 @@ func findManifests(appPath string, repoRoot string, env *v1alpha1.Env, directory
|
||||
var jsonObj unstructured.Unstructured
|
||||
err = json.Unmarshal([]byte(jsonStr), &jsonObj)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to unmarshal generated json %q: %v", f.Name(), err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "Failed to unmarshal generated json %q: %v", manifestFileInfo.Name(), err)
|
||||
}
|
||||
objs = append(objs, &jsonObj)
|
||||
}
|
||||
} else {
|
||||
out, err := utfutil.ReadFile(path, utfutil.UTF8)
|
||||
err := getObjsFromYAMLOrJson(logCtx, manifestPath, manifestFileInfo.Name(), &objs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".json") {
|
||||
var obj unstructured.Unstructured
|
||||
err = json.Unmarshal(out, &obj)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to unmarshal %q: %v", f.Name(), err)
|
||||
}
|
||||
objs = append(objs, &obj)
|
||||
} else {
|
||||
yamlObjs, err := kube.SplitYAML(out)
|
||||
if err != nil {
|
||||
if len(yamlObjs) > 0 {
|
||||
// If we get here, we had a multiple objects in a single YAML file which had some
|
||||
// valid k8s objects, but errors parsing others (within the same file). It's very
|
||||
// likely the user messed up a portion of the YAML, so report on that.
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to unmarshal %q: %v", f.Name(), err)
|
||||
}
|
||||
// Otherwise, let's see if it looks like a resource, if yes, we return error
|
||||
if bytes.Contains(out, []byte("apiVersion:")) &&
|
||||
bytes.Contains(out, []byte("kind:")) &&
|
||||
bytes.Contains(out, []byte("metadata:")) {
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to unmarshal %q: %v", f.Name(), err)
|
||||
}
|
||||
// Otherwise, it might be a unrelated YAML file which we will ignore
|
||||
return nil
|
||||
}
|
||||
objs = append(objs, yamlObjs...)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
// getObjsFromYAMLOrJson unmarshals the given yaml or json file and appends it to the given list of objects.
|
||||
func getObjsFromYAMLOrJson(logCtx *log.Entry, manifestPath string, filename string, objs *[]*unstructured.Unstructured) error {
|
||||
reader, err := utfutil.OpenFile(manifestPath, utfutil.UTF8)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to open %q", manifestPath)
|
||||
}
|
||||
defer func() {
|
||||
err := reader.Close()
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to close %q - potential memory leak", manifestPath)
|
||||
}
|
||||
}()
|
||||
if strings.HasSuffix(filename, ".json") {
|
||||
var obj unstructured.Unstructured
|
||||
decoder := json.NewDecoder(reader)
|
||||
err = decoder.Decode(&obj)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to unmarshal %q: %v", filename, err)
|
||||
}
|
||||
if decoder.More() {
|
||||
return status.Errorf(codes.FailedPrecondition, "Found multiple objects in %q. Only single objects are allowed in JSON files.", filename)
|
||||
}
|
||||
*objs = append(*objs, &obj)
|
||||
} else {
|
||||
yamlObjs, err := splitYAMLOrJSON(reader)
|
||||
if err != nil {
|
||||
if len(yamlObjs) > 0 {
|
||||
// If we get here, we had a multiple objects in a single YAML file which had some
|
||||
// valid k8s objects, but errors parsing others (within the same file). It's very
|
||||
// likely the user messed up a portion of the YAML, so report on that.
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to unmarshal %q: %v", filename, err)
|
||||
}
|
||||
// Read the whole file to check whether it looks like a manifest.
|
||||
out, err := utfutil.ReadFile(manifestPath, utfutil.UTF8)
|
||||
// Otherwise, let's see if it looks like a resource, if yes, we return error
|
||||
if bytes.Contains(out, []byte("apiVersion:")) &&
|
||||
bytes.Contains(out, []byte("kind:")) &&
|
||||
bytes.Contains(out, []byte("metadata:")) {
|
||||
return status.Errorf(codes.FailedPrecondition, "Failed to unmarshal %q: %v", filename, err)
|
||||
}
|
||||
// Otherwise, it might be an unrelated YAML file which we will ignore
|
||||
}
|
||||
*objs = append(*objs, yamlObjs...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// splitYAMLOrJSON reads a YAML or JSON file and gets each document as an unstructured object. If the unmarshaller
|
||||
// encounters an error, objects read up until the error are returned.
|
||||
func splitYAMLOrJSON(reader goio.Reader) ([]*unstructured.Unstructured, error) {
|
||||
d := kubeyaml.NewYAMLOrJSONDecoder(reader, 4096)
|
||||
var objs []*unstructured.Unstructured
|
||||
for {
|
||||
u := &unstructured.Unstructured{}
|
||||
if err := d.Decode(&u); err != nil {
|
||||
if err == goio.EOF {
|
||||
break
|
||||
}
|
||||
return objs, fmt.Errorf("failed to unmarshal manifest: %v", err)
|
||||
}
|
||||
if u == nil {
|
||||
continue
|
||||
}
|
||||
objs = append(objs, u)
|
||||
}
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
// getPotentiallyValidManifestFile checks whether the given path/FileInfo may be a valid manifest file. Returns a non-nil error if
|
||||
// there was an error that should not be handled by ignoring the file. Returns non-nil realFileInfo if the file is a
|
||||
// potential manifest. Returns a non-empty ignoreMessage if there's a message that should be logged about why the file
|
||||
// was skipped. If realFileInfo is nil and the ignoreMessage is empty, there's no need to log the ignoreMessage; the
|
||||
// file was skipped for a mundane reason.
|
||||
//
|
||||
// The file is still only a "potentially" valid manifest file because it could be invalid JSON or YAML, or it might not
|
||||
// be a valid Kubernetes resource. This function tests everything possible without actually reading the file.
|
||||
//
|
||||
// repoPath must be absolute.
|
||||
func getPotentiallyValidManifestFile(path string, f os.FileInfo, appPath, repoRoot, include, exclude string) (realFileInfo os.FileInfo, warning string, err error) {
|
||||
relPath, err := filepath.Rel(appPath, path)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to get relative path of %q: %w", path, err)
|
||||
}
|
||||
|
||||
if !manifestFile.MatchString(f.Name()) {
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// If the file is a symlink, these will be overridden with the destination file's info.
|
||||
var relRealPath = relPath
|
||||
realFileInfo = f
|
||||
|
||||
if files.IsSymlink(f) {
|
||||
realPath, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fmt.Sprintf("destination of symlink %q is missing", relPath), nil
|
||||
}
|
||||
return nil, "", fmt.Errorf("failed to evaluate symlink at %q: %w", relPath, err)
|
||||
}
|
||||
if !files.Inbound(realPath, repoRoot) {
|
||||
return nil, "", fmt.Errorf("illegal filepath in symlink at %q", relPath)
|
||||
}
|
||||
realFileInfo, err = os.Stat(realPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// This should have been caught by filepath.EvalSymlinks, but check again since that function's docs
|
||||
// don't promise to return this error.
|
||||
return nil, fmt.Sprintf("destination of symlink %q is missing at %q", relPath, realPath), nil
|
||||
}
|
||||
return nil, "", fmt.Errorf("failed to get file info for symlink at %q to %q: %w", relPath, realPath, err)
|
||||
}
|
||||
relRealPath, err = filepath.Rel(repoRoot, realPath)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to get relative path of %q: %w", realPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// FileInfo.Size() behavior is platform-specific for non-regular files. Allow only regular files, so we guarantee
|
||||
// accurate file sizes.
|
||||
if !realFileInfo.Mode().IsRegular() {
|
||||
return nil, fmt.Sprintf("ignoring symlink at %q to non-regular file %q", relPath, relRealPath), nil
|
||||
}
|
||||
|
||||
if exclude != "" && glob.Match(exclude, relPath) {
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
if include != "" && !glob.Match(include, relPath) {
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
return realFileInfo, "", nil
|
||||
}
|
||||
|
||||
type potentiallyValidManifest struct {
|
||||
path string
|
||||
fileInfo os.FileInfo
|
||||
}
|
||||
|
||||
// getPotentiallyValidManifests ensures that 1) there are no errors while checking for potential manifest files in the given dir
|
||||
// and 2) the combined file size of the potentially-valid manifest files does not exceed the limit.
|
||||
func getPotentiallyValidManifests(logCtx *log.Entry, appPath string, repoRoot string, recurse bool, include string, exclude string, maxCombinedManifestQuantity resource.Quantity) ([]potentiallyValidManifest, error) {
|
||||
maxCombinedManifestFileSize := maxCombinedManifestQuantity.Value()
|
||||
var currentCombinedManifestFileSize = int64(0)
|
||||
|
||||
var potentiallyValidManifests []potentiallyValidManifest
|
||||
err := filepath.Walk(appPath, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
if path != appPath && !recurse {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
realFileInfo, warning, err := getPotentiallyValidManifestFile(path, f, appPath, repoRoot, include, exclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid manifest file %q: %w", path, err)
|
||||
}
|
||||
if realFileInfo == nil {
|
||||
if warning != "" {
|
||||
logCtx.Warnf("skipping manifest file %q: %s", path, warning)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Don't count jsonnet file size against max. It's jsonnet's responsibility to manage memory usage.
|
||||
if !strings.HasSuffix(f.Name(), ".jsonnet") {
|
||||
// We use the realFileInfo size (which is guaranteed to be a regular file instead of a symlink or other
|
||||
// non-regular file) because .Size() behavior is platform-specific for non-regular files.
|
||||
currentCombinedManifestFileSize += realFileInfo.Size()
|
||||
if maxCombinedManifestFileSize != 0 && currentCombinedManifestFileSize > maxCombinedManifestFileSize {
|
||||
return ErrExceededMaxCombinedManifestFileSize
|
||||
}
|
||||
}
|
||||
potentiallyValidManifests = append(potentiallyValidManifests, potentiallyValidManifest{path: path, fileInfo: f})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
// Not wrapping, because this error should be wrapped by the caller.
|
||||
return nil, err
|
||||
}
|
||||
return objs, nil
|
||||
|
||||
return potentiallyValidManifests, nil
|
||||
}
|
||||
|
||||
func makeJsonnetVm(appPath string, repoRoot string, sourceJsonnet v1alpha1.ApplicationSourceJsonnet, env *v1alpha1.Env) (*jsonnet.VM, error) {
|
||||
@@ -1071,7 +1223,8 @@ func makeJsonnetVm(appPath string, repoRoot string, sourceJsonnet v1alpha1.Appli
|
||||
// Jsonnet Imports relative to the repository path
|
||||
jpaths := []string{appPath}
|
||||
for _, p := range sourceJsonnet.Libs {
|
||||
jpath, _, err := pathutil.ResolveFilePath(appPath, repoRoot, p, nil)
|
||||
// the jsonnet library path is relative to the repository root, not application path
|
||||
jpath, _, err := pathutil.ResolveFilePath(repoRoot, repoRoot, p, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1336,8 +1489,12 @@ func populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse, appPath strin
|
||||
return err
|
||||
}
|
||||
|
||||
if err := loadFileIntoIfExists(filepath.Join(appPath, "values.yaml"), &res.Helm.Values); err != nil {
|
||||
return err
|
||||
if resolvedValuesPath, _, err := pathutil.ResolveFilePath(appPath, repoRoot, "values.yaml", []string{}); err == nil {
|
||||
if err := loadFileIntoIfExists(resolvedValuesPath, &res.Helm.Values); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Warnf("Values file %s is not allowed: %v", filepath.Join(appPath, "values.yaml"), err)
|
||||
}
|
||||
var resolvedSelectedValueFiles []pathutil.ResolvedFilePath
|
||||
// drop not allowed values files
|
||||
@@ -1345,10 +1502,10 @@ func populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse, appPath strin
|
||||
if resolvedFile, _, err := pathutil.ResolveFilePath(appPath, repoRoot, file, q.GetValuesFileSchemes()); err == nil {
|
||||
resolvedSelectedValueFiles = append(resolvedSelectedValueFiles, resolvedFile)
|
||||
} else {
|
||||
log.Debugf("Values file %s is not allowed: %v", file, err)
|
||||
log.Warnf("Values file %s is not allowed: %v", file, err)
|
||||
}
|
||||
}
|
||||
params, err := h.GetParameters(resolvedSelectedValueFiles)
|
||||
params, err := h.GetParameters(resolvedSelectedValueFiles, appPath, repoRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1367,15 +1524,16 @@ func populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse, appPath strin
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadFileIntoIfExists(path string, destination *string) error {
|
||||
info, err := os.Stat(path)
|
||||
func loadFileIntoIfExists(path pathutil.ResolvedFilePath, destination *string) error {
|
||||
stringPath := string(path)
|
||||
info, err := os.Stat(stringPath)
|
||||
|
||||
if err == nil && !info.IsDir() {
|
||||
if bytes, err := ioutil.ReadFile(path); err != nil {
|
||||
*destination = string(bytes)
|
||||
} else {
|
||||
bytes, err := ioutil.ReadFile(stringPath);
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*destination = string(bytes)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
// +build !race
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
)
|
||||
|
||||
func TestHelmDependencyWithConcurrency(t *testing.T) {
|
||||
|
||||
// !race:
|
||||
// Un-synchronized use of a random source, will be fixed when this is merged:
|
||||
// https://github.com/argoproj/argo-cd/issues/4728
|
||||
|
||||
cleanup := func() {
|
||||
_ = os.Remove(filepath.Join("../../util/helm/testdata/helm2-dependency", helmDepUpMarkerFile))
|
||||
_ = os.RemoveAll(filepath.Join("../../util/helm/testdata/helm2-dependency", "charts"))
|
||||
}
|
||||
cleanup()
|
||||
defer cleanup()
|
||||
|
||||
helmRepo := argoappv1.Repository{Name: "bitnami", Type: "helm", Repo: "https://charts.bitnami.com/bitnami"}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
for i := 0; i < 3; i++ {
|
||||
go func() {
|
||||
res, err := helmTemplate("../../util/helm/testdata/helm2-dependency", "../..", nil, &apiclient.ManifestRequest{
|
||||
ApplicationSource: &argoappv1.ApplicationSource{},
|
||||
Repos: []*argoappv1.Repository{&helmRepo},
|
||||
}, false)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, res)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -5,16 +5,19 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -29,6 +32,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/cache"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/metrics"
|
||||
fileutil "github.com/argoproj/argo-cd/v2/test/fixture/path"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
gitmocks "github.com/argoproj/argo-cd/v2/util/git/mocks"
|
||||
@@ -52,7 +56,7 @@ func newServiceWithMocks(root string, signed bool) (*Service, *gitmocks.Client)
|
||||
return newServiceWithOpt(func(gitClient *gitmocks.Client) {
|
||||
gitClient.On("Init").Return(nil)
|
||||
gitClient.On("Fetch", mock.Anything).Return(nil)
|
||||
gitClient.On("Checkout", mock.Anything).Return(nil)
|
||||
gitClient.On("Checkout", mock.Anything, mock.Anything).Return(nil)
|
||||
gitClient.On("LsRemote", mock.Anything).Return(mock.Anything, nil)
|
||||
gitClient.On("CommitSHA").Return(mock.Anything, nil)
|
||||
gitClient.On("Root").Return(root)
|
||||
@@ -81,7 +85,6 @@ func newServiceWithOpt(cf clientFunc) (*Service, *gitmocks.Client) {
|
||||
}}, nil)
|
||||
helmClient.On("ExtractChart", chart, version).Return("./testdata/my-chart", io.NopCloser, nil)
|
||||
helmClient.On("CleanChartCache", chart, version).Return(nil)
|
||||
|
||||
service.newGitClient = func(rawRepoURL string, creds git.Creds, insecure bool, enableLfs bool, prosy string, opts ...git.ClientOpts) (client git.Client, e error) {
|
||||
return gitClient, nil
|
||||
}
|
||||
@@ -112,7 +115,7 @@ func newServiceWithCommitSHA(root, revision string) *Service {
|
||||
service, gitClient := newServiceWithOpt(func(gitClient *gitmocks.Client) {
|
||||
gitClient.On("Init").Return(nil)
|
||||
gitClient.On("Fetch", mock.Anything).Return(nil)
|
||||
gitClient.On("Checkout", mock.Anything).Return(nil)
|
||||
gitClient.On("Checkout", mock.Anything, mock.Anything).Return(nil)
|
||||
gitClient.On("LsRemote", revision).Return(revision, revisionErr)
|
||||
gitClient.On("CommitSHA").Return("632039659e542ed7de0c170a4fcc1c571b288fc0", nil)
|
||||
gitClient.On("Root").Return(root)
|
||||
@@ -125,6 +128,31 @@ func newServiceWithCommitSHA(root, revision string) *Service {
|
||||
return service
|
||||
}
|
||||
|
||||
// createSymlink creates a symlink with name linkName to file destName in
|
||||
// workingDir
|
||||
func createSymlink(t *testing.T, workingDir, destName, linkName string) error {
|
||||
oldWorkingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if workingDir != "" {
|
||||
err = os.Chdir(workingDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Chdir(oldWorkingDir); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
err = os.Symlink(destName, linkName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGenerateYamlManifestInDir(t *testing.T) {
|
||||
service := newService("../..")
|
||||
|
||||
@@ -140,11 +168,81 @@ func TestGenerateYamlManifestInDir(t *testing.T) {
|
||||
assert.Equal(t, countOfManifests, len(res1.Manifests))
|
||||
|
||||
// this will test concatenated manifests to verify we split YAMLs correctly
|
||||
res2, err := GenerateManifests("./testdata/concatenated", "/", "", &q, false)
|
||||
res2, err := GenerateManifests("./testdata/concatenated", "/", "", &q, false, resource.MustParse("0"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(res2.Manifests))
|
||||
}
|
||||
|
||||
func Test_GenerateManifests_NoOutOfBoundsAccess(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
outOfBoundsFilename string
|
||||
outOfBoundsFileContents string
|
||||
mustNotContain string // Optional string that must not appear in error or manifest output. If empty, use outOfBoundsFileContents.
|
||||
}{
|
||||
{
|
||||
name: "out of bounds JSON file should not appear in error output",
|
||||
outOfBoundsFilename: "test.json",
|
||||
outOfBoundsFileContents: `{"some": "json"}`,
|
||||
},
|
||||
{
|
||||
name: "malformed JSON file contents should not appear in error output",
|
||||
outOfBoundsFilename: "test.json",
|
||||
outOfBoundsFileContents: "$",
|
||||
},
|
||||
{
|
||||
name: "out of bounds JSON manifest should not appear in manifest output",
|
||||
outOfBoundsFilename: "test.json",
|
||||
// JSON marshalling is deterministic. So if there's a leak, exactly this should appear in the manifests.
|
||||
outOfBoundsFileContents: `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"test","namespace":"default"},"type":"Opaque"}`,
|
||||
},
|
||||
{
|
||||
name: "out of bounds YAML manifest should not appear in manifest output",
|
||||
outOfBoundsFilename: "test.yaml",
|
||||
outOfBoundsFileContents: "apiVersion: v1\nkind: Secret\nmetadata:\n name: test\n namespace: default\ntype: Opaque",
|
||||
mustNotContain: `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"test","namespace":"default"},"type":"Opaque"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
outOfBoundsDir := t.TempDir()
|
||||
outOfBoundsFile := path.Join(outOfBoundsDir, testCaseCopy.outOfBoundsFilename)
|
||||
err := os.WriteFile(outOfBoundsFile, []byte(testCaseCopy.outOfBoundsFileContents), os.FileMode(0444))
|
||||
require.NoError(t, err)
|
||||
|
||||
repoDir := t.TempDir()
|
||||
err = os.Symlink(outOfBoundsFile, path.Join(repoDir, testCaseCopy.outOfBoundsFilename))
|
||||
require.NoError(t, err)
|
||||
|
||||
var mustNotContain = testCaseCopy.outOfBoundsFileContents
|
||||
if testCaseCopy.mustNotContain != "" {
|
||||
mustNotContain = testCaseCopy.mustNotContain
|
||||
}
|
||||
|
||||
q := apiclient.ManifestRequest{Repo: &argoappv1.Repository{}, ApplicationSource: &argoappv1.ApplicationSource{}}
|
||||
res, err := GenerateManifests(repoDir, "", "", &q, false, resource.MustParse("0"))
|
||||
require.Error(t, err)
|
||||
assert.NotContains(t, err.Error(), mustNotContain)
|
||||
assert.Contains(t, err.Error(), "illegal filepath")
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateManifests_MissingSymlinkDestination(t *testing.T) {
|
||||
repoDir := t.TempDir()
|
||||
err := os.Symlink("/obviously/does/not/exist", path.Join(repoDir, "test.yaml"))
|
||||
require.NoError(t, err)
|
||||
|
||||
q := apiclient.ManifestRequest{Repo: &argoappv1.Repository{}, ApplicationSource: &argoappv1.ApplicationSource{}}
|
||||
_, err = GenerateManifests(repoDir, "", "", &q, false, resource.MustParse("0"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGenerateManifests_K8SAPIResetCache(t *testing.T) {
|
||||
service := newService("../..")
|
||||
|
||||
@@ -250,7 +348,7 @@ func TestGenerateJsonnetManifestInDir(t *testing.T) {
|
||||
Jsonnet: argoappv1.ApplicationSourceJsonnet{
|
||||
ExtVars: []argoappv1.JsonnetVar{{Name: "extVarString", Value: "extVarString"}, {Name: "extVarCode", Value: "\"extVarCode\"", Code: true}},
|
||||
TLAs: []argoappv1.JsonnetVar{{Name: "tlaString", Value: "tlaString"}, {Name: "tlaCode", Value: "\"tlaCode\"", Code: true}},
|
||||
Libs: []string{"./vendor"},
|
||||
Libs: []string{"testdata/jsonnet/vendor"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -298,29 +396,6 @@ func TestGenerateKsonnetManifest(t *testing.T) {
|
||||
assert.Equal(t, "https://kubernetes.default.svc", res.Server)
|
||||
}
|
||||
|
||||
func TestGenerateHelmChartWithDependencies(t *testing.T) {
|
||||
service := newService("../..")
|
||||
|
||||
cleanup := func() {
|
||||
_ = os.Remove(filepath.Join("../../util/helm/testdata/helm2-dependency", helmDepUpMarkerFile))
|
||||
_ = os.RemoveAll(filepath.Join("../../util/helm/testdata/helm2-dependency", "charts"))
|
||||
}
|
||||
cleanup()
|
||||
defer cleanup()
|
||||
|
||||
helmRepo := argoappv1.Repository{Name: "bitnami", Type: "helm", Repo: "https://charts.bitnami.com/bitnami"}
|
||||
q := apiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{},
|
||||
ApplicationSource: &argoappv1.ApplicationSource{
|
||||
Path: "./util/helm/testdata/helm2-dependency",
|
||||
},
|
||||
Repos: []*argoappv1.Repository{&helmRepo},
|
||||
}
|
||||
res1, err := service.GenerateManifest(context.Background(), &q)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, res1.Manifests, 10)
|
||||
}
|
||||
|
||||
func TestManifestGenErrorCacheByNumRequests(t *testing.T) {
|
||||
|
||||
// Returns the state of the manifest generation cache, by querying the cache for the previously set result
|
||||
@@ -1051,7 +1126,7 @@ func TestGenerateFromUTF16(t *testing.T) {
|
||||
Repo: &argoappv1.Repository{},
|
||||
ApplicationSource: &argoappv1.ApplicationSource{},
|
||||
}
|
||||
res1, err := GenerateManifests("./testdata/utf-16", "/", "", &q, false)
|
||||
res1, err := GenerateManifests("./testdata/utf-16", "/", "", &q, false, resource.MustParse("0"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(res1.Manifests))
|
||||
}
|
||||
@@ -1067,12 +1142,15 @@ func TestListApps(t *testing.T) {
|
||||
"app-parameters/multi": "Kustomize",
|
||||
"app-parameters/single-app-only": "Kustomize",
|
||||
"app-parameters/single-global": "Kustomize",
|
||||
"in-bounds-values-file-link": "Helm",
|
||||
"invalid-helm": "Helm",
|
||||
"invalid-kustomize": "Kustomize",
|
||||
"kustomization_yaml": "Kustomize",
|
||||
"kustomization_yml": "Kustomize",
|
||||
"my-chart": "Helm",
|
||||
"my-chart-2": "Helm",
|
||||
"out-of-bounds-values-file-link": "Helm",
|
||||
"values-files": "Helm",
|
||||
}
|
||||
assert.Equal(t, expectedApps, res.Apps)
|
||||
}
|
||||
@@ -1411,6 +1489,7 @@ func runWithTempTestdata(t *testing.T, path string, runner func(t *testing.T, pa
|
||||
tempDir := mkTempParameters("./testdata/app-parameters")
|
||||
defer os.RemoveAll(tempDir)
|
||||
runner(t, filepath.Join(tempDir, "app-parameters", path))
|
||||
os.RemoveAll(tempDir)
|
||||
}
|
||||
|
||||
func TestGenerateManifestsWithAppParameterFile(t *testing.T) {
|
||||
@@ -1612,11 +1691,11 @@ func TestFindResources(t *testing.T) {
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
objs, err := findManifests("testdata/app-include-exclude", ".", nil, argoappv1.ApplicationSourceDirectory{
|
||||
objs, err := findManifests(&log.Entry{}, "testdata/app-include-exclude", ".", nil, argoappv1.ApplicationSourceDirectory{
|
||||
Recurse: true,
|
||||
Include: tc.include,
|
||||
Exclude: tc.exclude,
|
||||
})
|
||||
}, resource.MustParse("0"))
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
@@ -1630,10 +1709,10 @@ func TestFindResources(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindManifests_Exclude(t *testing.T) {
|
||||
objs, err := findManifests("testdata/app-include-exclude", ".", nil, argoappv1.ApplicationSourceDirectory{
|
||||
objs, err := findManifests(&log.Entry{}, "testdata/app-include-exclude", ".", nil, argoappv1.ApplicationSourceDirectory{
|
||||
Recurse: true,
|
||||
Exclude: "subdir/deploymentSub.yaml",
|
||||
})
|
||||
}, resource.MustParse("0"))
|
||||
|
||||
if !assert.NoError(t, err) || !assert.Len(t, objs, 1) {
|
||||
return
|
||||
@@ -1643,10 +1722,10 @@ func TestFindManifests_Exclude(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindManifests_Exclude_NothingMatches(t *testing.T) {
|
||||
objs, err := findManifests("testdata/app-include-exclude", ".", nil, argoappv1.ApplicationSourceDirectory{
|
||||
objs, err := findManifests(&log.Entry{}, "testdata/app-include-exclude", ".", nil, argoappv1.ApplicationSourceDirectory{
|
||||
Recurse: true,
|
||||
Exclude: "nothing.yaml",
|
||||
})
|
||||
}, resource.MustParse("0"))
|
||||
|
||||
if !assert.NoError(t, err) || !assert.Len(t, objs, 2) {
|
||||
return
|
||||
@@ -1656,6 +1735,479 @@ func TestFindManifests_Exclude_NothingMatches(t *testing.T) {
|
||||
[]string{"nginx-deployment", "nginx-deployment-sub"}, []string{objs[0].GetName(), objs[1].GetName()})
|
||||
}
|
||||
|
||||
func tempDir(t *testing.T) string {
|
||||
dir, err := ioutil.TempDir(".", "")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
err = os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
absDir, err := filepath.Abs(dir)
|
||||
require.NoError(t, err)
|
||||
return absDir
|
||||
}
|
||||
|
||||
func walkFor(t *testing.T, root string, testPath string, run func(info fs.FileInfo)) {
|
||||
var hitExpectedPath = false
|
||||
err := filepath.Walk(root, func(path string, info fs.FileInfo, err error) error {
|
||||
if path == testPath {
|
||||
require.NoError(t, err)
|
||||
hitExpectedPath = true
|
||||
run(info)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, hitExpectedPath, "did not hit expected path when walking directory")
|
||||
}
|
||||
|
||||
func Test_getPotentiallyValidManifestFile(t *testing.T) {
|
||||
// These tests use filepath.Walk instead of os.Stat to get file info, because FileInfo from os.Stat does not return
|
||||
// true for IsSymlink like os.Walk does.
|
||||
|
||||
// These tests do not use t.TempDir() because those directories can contain symlinks which cause test to fail
|
||||
// InBound checks.
|
||||
|
||||
t.Run("non-JSON/YAML is skipped with an empty ignore message", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
filePath := filepath.Join(appDir, "not-json-or-yaml")
|
||||
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, filePath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(filePath, info, appDir, appDir, "", "")
|
||||
assert.Nil(t, realFileInfo)
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("circular link should throw an error", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
aPath := filepath.Join(appDir, "a.json")
|
||||
bPath := filepath.Join(appDir, "b.json")
|
||||
err := os.Symlink(bPath, aPath)
|
||||
require.NoError(t, err)
|
||||
err = os.Symlink(aPath, bPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, aPath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(aPath, info, appDir, appDir, "", "")
|
||||
assert.Nil(t, realFileInfo)
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "too many links")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("symlink with missing destination should throw an error", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
aPath := filepath.Join(appDir, "a.json")
|
||||
bPath := filepath.Join(appDir, "b.json")
|
||||
err := os.Symlink(bPath, aPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, aPath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(aPath, info, appDir, appDir, "", "")
|
||||
assert.Nil(t, realFileInfo)
|
||||
assert.NotEmpty(t, ignoreMessage)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("out-of-bounds symlink should throw an error", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
linkPath := filepath.Join(appDir, "a.json")
|
||||
err := os.Symlink("..", linkPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, linkPath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(linkPath, info, appDir, appDir, "", "")
|
||||
assert.Nil(t, realFileInfo)
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "illegal filepath in symlink")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("symlink to a non-regular file should be skipped with warning", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
dirPath := filepath.Join(appDir, "test.dir")
|
||||
err := os.MkdirAll(dirPath, 0644)
|
||||
require.NoError(t, err)
|
||||
linkPath := filepath.Join(appDir, "test.json")
|
||||
err = os.Symlink(dirPath, linkPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, linkPath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(linkPath, info, appDir, appDir, "", "")
|
||||
assert.Nil(t, realFileInfo)
|
||||
assert.Contains(t, ignoreMessage, "non-regular file")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("non-included file should be skipped with no message", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
filePath := filepath.Join(appDir, "not-included.yaml")
|
||||
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, filePath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(filePath, info, appDir, appDir, "*.json", "")
|
||||
assert.Nil(t, realFileInfo)
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("excluded file should be skipped with no message", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
filePath := filepath.Join(appDir, "excluded.json")
|
||||
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, filePath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(filePath, info, appDir, appDir, "", "excluded.*")
|
||||
assert.Nil(t, realFileInfo)
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("symlink to a regular file is potentially valid", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
filePath := filepath.Join(appDir, "regular-file")
|
||||
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
linkPath := filepath.Join(appDir, "link.json")
|
||||
err = os.Symlink(filePath, linkPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, linkPath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(linkPath, info, appDir, appDir, "", "")
|
||||
assert.NotNil(t, realFileInfo)
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("a regular file is potentially valid", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
filePath := filepath.Join(appDir, "regular-file.json")
|
||||
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, filePath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(filePath, info, appDir, appDir, "", "")
|
||||
assert.NotNil(t, realFileInfo)
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("realFileInfo is for the destination rather than the symlink", func(t *testing.T) {
|
||||
appDir := tempDir(t)
|
||||
|
||||
filePath := filepath.Join(appDir, "regular-file")
|
||||
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
linkPath := filepath.Join(appDir, "link.json")
|
||||
err = os.Symlink(filePath, linkPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
walkFor(t, appDir, linkPath, func(info fs.FileInfo) {
|
||||
realFileInfo, ignoreMessage, err := getPotentiallyValidManifestFile(linkPath, info, appDir, appDir, "", "")
|
||||
assert.NotNil(t, realFileInfo)
|
||||
assert.Equal(t, filepath.Base(filePath), realFileInfo.Name())
|
||||
assert.Empty(t, ignoreMessage)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func Test_getPotentiallyValidManifests(t *testing.T) {
|
||||
// Tests which return no manifests and an error check to make sure the directory exists before running. A missing
|
||||
// directory would produce those same results.
|
||||
|
||||
logCtx := log.WithField("test", "test")
|
||||
|
||||
t.Run("unreadable file throws error", func(t *testing.T) {
|
||||
appDir := t.TempDir()
|
||||
unreadablePath := filepath.Join(appDir, "unreadable.json")
|
||||
err := os.WriteFile(unreadablePath, []byte{}, 0666)
|
||||
require.NoError(t, err)
|
||||
err = os.Chmod(appDir, 0000)
|
||||
require.NoError(t, err)
|
||||
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, appDir, appDir, false, "", "", resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
|
||||
// allow cleanup
|
||||
err = os.Chmod(appDir, 0777)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no recursion when recursion is disabled", func(t *testing.T) {
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, "./testdata/recurse", "./testdata/recurse", false, "", "", resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("recursion when recursion is enabled", func(t *testing.T) {
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, "./testdata/recurse", "./testdata/recurse", true, "", "", resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 2)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("non-JSON/YAML is skipped", func(t *testing.T) {
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, "./testdata/non-manifest-file", "./testdata/non-manifest-file", false, "", "", resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("circular link should throw an error", func(t *testing.T) {
|
||||
const testDir = "./testdata/circular-link"
|
||||
require.DirExists(t, testDir)
|
||||
require.NoError(t, createSymlink(t, testDir, "a.json", "b.json"))
|
||||
defer os.Remove(path.Join(testDir, "a.json"))
|
||||
require.NoError(t, createSymlink(t, testDir, "b.json", "a.json"))
|
||||
defer os.Remove(path.Join(testDir, "b.json"))
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, "./testdata/circular-link", "./testdata/circular-link", false, "", "", resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("out-of-bounds symlink should throw an error", func(t *testing.T) {
|
||||
require.DirExists(t, "./testdata/out-of-bounds-link")
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, "./testdata/out-of-bounds-link", "./testdata/out-of-bounds-link", false, "", "", resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("symlink to a regular file works", func(t *testing.T) {
|
||||
repoRoot, err := filepath.Abs("./testdata/in-bounds-link")
|
||||
require.NoError(t, err)
|
||||
appPath, err := filepath.Abs("./testdata/in-bounds-link/app")
|
||||
require.NoError(t, err)
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, appPath, repoRoot, false, "", "", resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("symlink to nowhere should be ignored", func(t *testing.T) {
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, "./testdata/link-to-nowhere", "./testdata/link-to-nowhere", false, "", "", resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("link to over-sized manifest fails", func(t *testing.T) {
|
||||
repoRoot, err := filepath.Abs("./testdata/in-bounds-link")
|
||||
require.NoError(t, err)
|
||||
appPath, err := filepath.Abs("./testdata/in-bounds-link/app")
|
||||
require.NoError(t, err)
|
||||
// The file is 35 bytes.
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, appPath, repoRoot, false, "", "", resource.MustParse("34"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.ErrorIs(t, err, ErrExceededMaxCombinedManifestFileSize)
|
||||
})
|
||||
|
||||
t.Run("group of files should be limited at precisely the sum of their size", func(t *testing.T) {
|
||||
// There is a total of 10 files, ech file being 10 bytes.
|
||||
manifests, err := getPotentiallyValidManifests(logCtx, "./testdata/several-files", "./testdata/several-files", false, "", "", resource.MustParse("365"))
|
||||
assert.Len(t, manifests, 10)
|
||||
assert.NoError(t, err)
|
||||
|
||||
manifests, err = getPotentiallyValidManifests(logCtx, "./testdata/several-files", "./testdata/several-files", false, "", "", resource.MustParse("100"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.ErrorIs(t, err, ErrExceededMaxCombinedManifestFileSize)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_findManifests(t *testing.T) {
|
||||
logCtx := log.WithField("test", "test")
|
||||
noRecurse := argoappv1.ApplicationSourceDirectory{Recurse: false}
|
||||
|
||||
t.Run("unreadable file throws error", func(t *testing.T) {
|
||||
appDir := t.TempDir()
|
||||
unreadablePath := filepath.Join(appDir, "unreadable.json")
|
||||
err := os.WriteFile(unreadablePath, []byte{}, 0666)
|
||||
require.NoError(t, err)
|
||||
err = os.Chmod(appDir, 0000)
|
||||
require.NoError(t, err)
|
||||
|
||||
manifests, err := findManifests(logCtx, appDir, appDir, nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
|
||||
// allow cleanup
|
||||
err = os.Chmod(appDir, 0777)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no recursion when recursion is disabled", func(t *testing.T) {
|
||||
manifests, err := findManifests(logCtx, "./testdata/recurse", "./testdata/recurse", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 2)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("recursion when recursion is enabled", func(t *testing.T) {
|
||||
recurse := argoappv1.ApplicationSourceDirectory{Recurse: true}
|
||||
manifests, err := findManifests(logCtx, "./testdata/recurse", "./testdata/recurse", nil, recurse, resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 4)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("non-JSON/YAML is skipped", func(t *testing.T) {
|
||||
manifests, err := findManifests(logCtx, "./testdata/non-manifest-file", "./testdata/non-manifest-file", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("circular link should throw an error", func(t *testing.T) {
|
||||
const testDir = "./testdata/circular-link"
|
||||
require.DirExists(t, testDir)
|
||||
require.NoError(t, createSymlink(t, testDir, "a.json", "b.json"))
|
||||
defer os.Remove(path.Join(testDir, "a.json"))
|
||||
require.NoError(t, createSymlink(t, testDir, "b.json", "a.json"))
|
||||
defer os.Remove(path.Join(testDir, "b.json"))
|
||||
manifests, err := findManifests(logCtx, "./testdata/circular-link", "./testdata/circular-link", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("out-of-bounds symlink should throw an error", func(t *testing.T) {
|
||||
require.DirExists(t, "./testdata/out-of-bounds-link")
|
||||
manifests, err := findManifests(logCtx, "./testdata/out-of-bounds-link", "./testdata/out-of-bounds-link", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("symlink to a regular file works", func(t *testing.T) {
|
||||
repoRoot, err := filepath.Abs("./testdata/in-bounds-link")
|
||||
require.NoError(t, err)
|
||||
appPath, err := filepath.Abs("./testdata/in-bounds-link/app")
|
||||
require.NoError(t, err)
|
||||
manifests, err := findManifests(logCtx, appPath, repoRoot, nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("symlink to nowhere should be ignored", func(t *testing.T) {
|
||||
manifests, err := findManifests(logCtx, "./testdata/link-to-nowhere", "./testdata/link-to-nowhere", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("link to over-sized manifest fails", func(t *testing.T) {
|
||||
repoRoot, err := filepath.Abs("./testdata/in-bounds-link")
|
||||
require.NoError(t, err)
|
||||
appPath, err := filepath.Abs("./testdata/in-bounds-link/app")
|
||||
require.NoError(t, err)
|
||||
// The file is 35 bytes.
|
||||
manifests, err := findManifests(logCtx, appPath, repoRoot, nil, noRecurse, resource.MustParse("34"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.ErrorIs(t, err, ErrExceededMaxCombinedManifestFileSize)
|
||||
})
|
||||
|
||||
t.Run("group of files should be limited at precisely the sum of their size", func(t *testing.T) {
|
||||
// There is a total of 10 files, each file being 10 bytes.
|
||||
manifests, err := findManifests(logCtx, "./testdata/several-files", "./testdata/several-files", nil, noRecurse, resource.MustParse("365"))
|
||||
assert.Len(t, manifests, 10)
|
||||
assert.NoError(t, err)
|
||||
|
||||
manifests, err = findManifests(logCtx, "./testdata/several-files", "./testdata/several-files", nil, noRecurse, resource.MustParse("364"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.ErrorIs(t, err, ErrExceededMaxCombinedManifestFileSize)
|
||||
})
|
||||
|
||||
t.Run("jsonnet isn't counted against size limit", func(t *testing.T) {
|
||||
// Each file is 36 bytes. Only the 36-byte json file should be counted against the limit.
|
||||
manifests, err := findManifests(logCtx, "./testdata/jsonnet-and-json", "./testdata/jsonnet-and-json", nil, noRecurse, resource.MustParse("36"))
|
||||
assert.Len(t, manifests, 2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
manifests, err = findManifests(logCtx, "./testdata/jsonnet-and-json", "./testdata/jsonnet-and-json", nil, noRecurse, resource.MustParse("35"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.ErrorIs(t, err, ErrExceededMaxCombinedManifestFileSize)
|
||||
})
|
||||
|
||||
t.Run("partially valid YAML file throws an error", func(t *testing.T) {
|
||||
require.DirExists(t, "./testdata/partially-valid-yaml")
|
||||
manifests, err := findManifests(logCtx, "./testdata/partially-valid-yaml", "./testdata/partially-valid-yaml", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalid manifest throws an error", func(t *testing.T) {
|
||||
require.DirExists(t, "./testdata/invalid-manifests")
|
||||
manifests, err := findManifests(logCtx, "./testdata/invalid-manifests", "./testdata/invalid-manifests", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("irrelevant YAML gets skipped, relevant YAML gets parsed", func(t *testing.T) {
|
||||
manifests, err := findManifests(logCtx, "./testdata/irrelevant-yaml", "./testdata/irrelevant-yaml", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("multiple JSON objects in one file throws an error", func(t *testing.T) {
|
||||
require.DirExists(t, "./testdata/json-list")
|
||||
manifests, err := findManifests(logCtx, "./testdata/json-list", "./testdata/json-list", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalid JSON throws an error", func(t *testing.T) {
|
||||
require.DirExists(t, "./testdata/invalid-json")
|
||||
manifests, err := findManifests(logCtx, "./testdata/invalid-json", "./testdata/invalid-json", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Empty(t, manifests)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid JSON returns manifest and no error", func(t *testing.T) {
|
||||
manifests, err := findManifests(logCtx, "./testdata/valid-json", "./testdata/valid-json", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("YAML with an empty document doesn't throw an error", func(t *testing.T) {
|
||||
manifests, err := findManifests(logCtx, "./testdata/yaml-with-empty-document", "./testdata/yaml-with-empty-document", nil, noRecurse, resource.MustParse("0"))
|
||||
assert.Len(t, manifests, 1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTestRepoOCI(t *testing.T) {
|
||||
service := newService(".")
|
||||
_, err := service.TestRepository(context.Background(), &apiclient.TestRepositoryRequest{
|
||||
@@ -1679,3 +2231,52 @@ func Test_getHelmDependencyRepos(t *testing.T) {
|
||||
assert.Equal(t, repos[0].Repo, repo1)
|
||||
assert.Equal(t, repos[1].Repo, repo2)
|
||||
}
|
||||
|
||||
func Test_findHelmValueFilesInPath(t *testing.T) {
|
||||
t.Run("does not exist", func(t *testing.T) {
|
||||
files, err := findHelmValueFilesInPath("/obviously/does/not/exist")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, files)
|
||||
})
|
||||
t.Run("values files", func(t *testing.T) {
|
||||
files, err := findHelmValueFilesInPath("./testdata/values-files")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, files, 4)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_populateHelmAppDetails(t *testing.T) {
|
||||
res := apiclient.RepoAppDetailsResponse{}
|
||||
q := apiclient.RepoServerAppDetailsQuery{
|
||||
Repo: &argoappv1.Repository{},
|
||||
Source: &argoappv1.ApplicationSource{
|
||||
Helm: &argoappv1.ApplicationSourceHelm{ValueFiles: []string{"exclude.yaml", "has-the-word-values.yaml"}},
|
||||
},
|
||||
}
|
||||
appPath, err := filepath.Abs("./testdata/values-files/")
|
||||
require.NoError(t, err)
|
||||
err = populateHelmAppDetails(&res, appPath, appPath, &q)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.Helm.Parameters, 3)
|
||||
assert.Len(t, res.Helm.ValueFiles, 4)
|
||||
}
|
||||
|
||||
func Test_populateHelmAppDetails_values_symlinks(t *testing.T) {
|
||||
t.Run("inbound", func(t *testing.T) {
|
||||
res := apiclient.RepoAppDetailsResponse{}
|
||||
q := apiclient.RepoServerAppDetailsQuery{Repo: &argoappv1.Repository{}, Source: &argoappv1.ApplicationSource{}}
|
||||
err := populateHelmAppDetails(&res, "./testdata/in-bounds-values-file-link/", "./testdata/in-bounds-values-file-link/", &q)
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, res.Helm.Values)
|
||||
assert.NotEmpty(t, res.Helm.Parameters)
|
||||
})
|
||||
|
||||
t.Run("out of bounds", func(t *testing.T) {
|
||||
res := apiclient.RepoAppDetailsResponse{}
|
||||
q := apiclient.RepoServerAppDetailsQuery{Repo: &argoappv1.Repository{}, Source: &argoappv1.ApplicationSource{}}
|
||||
err := populateHelmAppDetails(&res, "./testdata/out-of-bounds-values-file-link/", "./testdata/out-of-bounds-values-file-link/", &q)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, res.Helm.Values)
|
||||
assert.Empty(t, res.Helm.Parameters)
|
||||
})
|
||||
}
|
||||
|
||||
0
reposerver/repository/testdata/circular-link/.keep
vendored
Normal file
0
reposerver/repository/testdata/circular-link/.keep
vendored
Normal file
1
reposerver/repository/testdata/in-bounds-link/app/cm.link.yaml
vendored
Symbolic link
1
reposerver/repository/testdata/in-bounds-link/app/cm.link.yaml
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../cm.yaml
|
||||
2
reposerver/repository/testdata/in-bounds-link/cm.yaml
vendored
Normal file
2
reposerver/repository/testdata/in-bounds-link/cm.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
2
reposerver/repository/testdata/in-bounds-values-file-link/Chart.yaml
vendored
Normal file
2
reposerver/repository/testdata/in-bounds-values-file-link/Chart.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
name: my-chart
|
||||
version: 1.1.0
|
||||
1
reposerver/repository/testdata/in-bounds-values-file-link/values-2.yaml
vendored
Normal file
1
reposerver/repository/testdata/in-bounds-values-file-link/values-2.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
some: yaml
|
||||
1
reposerver/repository/testdata/in-bounds-values-file-link/values.yaml
vendored
Symbolic link
1
reposerver/repository/testdata/in-bounds-values-file-link/values.yaml
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
values-2.yaml
|
||||
1
reposerver/repository/testdata/invalid-json/invalid.json
vendored
Normal file
1
reposerver/repository/testdata/invalid-json/invalid.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
[
|
||||
1
reposerver/repository/testdata/irrelevant-yaml/irrelevant.yaml
vendored
Normal file
1
reposerver/repository/testdata/irrelevant-yaml/irrelevant.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
some: [irrelevant, yaml]
|
||||
2
reposerver/repository/testdata/irrelevant-yaml/relevant.yaml
vendored
Normal file
2
reposerver/repository/testdata/irrelevant-yaml/relevant.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
2
reposerver/repository/testdata/json-list/list.json
vendored
Normal file
2
reposerver/repository/testdata/json-list/list.json
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
1
reposerver/repository/testdata/jsonnet-and-json/test.json
vendored
Normal file
1
reposerver/repository/testdata/jsonnet-and-json/test.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "Pod"}
|
||||
1
reposerver/repository/testdata/jsonnet-and-json/test.jsonnet
vendored
Normal file
1
reposerver/repository/testdata/jsonnet-and-json/test.jsonnet
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "Pod"}
|
||||
1
reposerver/repository/testdata/link-to-nowhere/nowhere.json
vendored
Symbolic link
1
reposerver/repository/testdata/link-to-nowhere/nowhere.json
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
nowhere
|
||||
0
reposerver/repository/testdata/non-manifest-file/not-json-or-yaml
vendored
Normal file
0
reposerver/repository/testdata/non-manifest-file/not-json-or-yaml
vendored
Normal file
1
reposerver/repository/testdata/out-of-bounds-link/out-of-bounds.json
vendored
Symbolic link
1
reposerver/repository/testdata/out-of-bounds-link/out-of-bounds.json
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../out-of-bounds.json
|
||||
2
reposerver/repository/testdata/out-of-bounds-values-file-link/Chart.yaml
vendored
Normal file
2
reposerver/repository/testdata/out-of-bounds-values-file-link/Chart.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
name: my-chart
|
||||
version: 1.1.0
|
||||
1
reposerver/repository/testdata/out-of-bounds-values-file-link/values.yaml
vendored
Symbolic link
1
reposerver/repository/testdata/out-of-bounds-values-file-link/values.yaml
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../out-of-bounds.yaml
|
||||
0
reposerver/repository/testdata/out-of-bounds.json
vendored
Normal file
0
reposerver/repository/testdata/out-of-bounds.json
vendored
Normal file
1
reposerver/repository/testdata/out-of-bounds.yaml
vendored
Normal file
1
reposerver/repository/testdata/out-of-bounds.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
some: yaml
|
||||
4
reposerver/repository/testdata/partially-valid-yaml/partially-valid.yaml
vendored
Normal file
4
reposerver/repository/testdata/partially-valid-yaml/partially-valid.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
---
|
||||
invalid:
|
||||
1
reposerver/repository/testdata/several-files/0.json
vendored
Normal file
1
reposerver/repository/testdata/several-files/0.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
2
reposerver/repository/testdata/several-files/0.yaml
vendored
Normal file
2
reposerver/repository/testdata/several-files/0.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
1
reposerver/repository/testdata/several-files/1.json
vendored
Normal file
1
reposerver/repository/testdata/several-files/1.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
2
reposerver/repository/testdata/several-files/1.yaml
vendored
Normal file
2
reposerver/repository/testdata/several-files/1.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
1
reposerver/repository/testdata/several-files/2.json
vendored
Normal file
1
reposerver/repository/testdata/several-files/2.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
2
reposerver/repository/testdata/several-files/2.yaml
vendored
Normal file
2
reposerver/repository/testdata/several-files/2.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
1
reposerver/repository/testdata/several-files/3.json
vendored
Normal file
1
reposerver/repository/testdata/several-files/3.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
2
reposerver/repository/testdata/several-files/3.yaml
vendored
Normal file
2
reposerver/repository/testdata/several-files/3.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
1
reposerver/repository/testdata/several-files/4.json
vendored
Normal file
1
reposerver/repository/testdata/several-files/4.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
2
reposerver/repository/testdata/several-files/4.yaml
vendored
Normal file
2
reposerver/repository/testdata/several-files/4.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
1
reposerver/repository/testdata/several-files/README.md
vendored
Normal file
1
reposerver/repository/testdata/several-files/README.md
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file shouldn't be counted in the manifest file size limit, because it isn't JSON or YAML.
|
||||
1
reposerver/repository/testdata/valid-json/valid.json
vendored
Normal file
1
reposerver/repository/testdata/valid-json/valid.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"apiVersion": "v1", "kind": "ConfigMap"}
|
||||
2
reposerver/repository/testdata/values-files/Chart.yaml
vendored
Normal file
2
reposerver/repository/testdata/values-files/Chart.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
name: my-chart
|
||||
version: 1.1.0
|
||||
0
reposerver/repository/testdata/values-files/caps-extn-values.YAML
vendored
Normal file
0
reposerver/repository/testdata/values-files/caps-extn-values.YAML
vendored
Normal file
1
reposerver/repository/testdata/values-files/exclude.yaml
vendored
Normal file
1
reposerver/repository/testdata/values-files/exclude.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
exclude: yaml
|
||||
4
reposerver/repository/testdata/values-files/has-the-word-values.yaml
vendored
Normal file
4
reposerver/repository/testdata/values-files/has-the-word-values.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
has:
|
||||
the:
|
||||
word:
|
||||
values: yaml
|
||||
1
reposerver/repository/testdata/values-files/values.yaml
vendored
Normal file
1
reposerver/repository/testdata/values-files/values.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
values: yaml
|
||||
0
reposerver/repository/testdata/values-files/values.yml
vendored
Normal file
0
reposerver/repository/testdata/values-files/values.yml
vendored
Normal file
4
reposerver/repository/testdata/yaml-with-empty-document/has-empty.yaml
vendored
Normal file
4
reposerver/repository/testdata/yaml-with-empty-document/has-empty.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
---
|
||||
---
|
||||
@@ -482,6 +482,21 @@ func (s *Server) ListResourceEvents(ctx context.Context, q *application.Applicat
|
||||
"involvedObject.namespace": a.Namespace,
|
||||
}).String()
|
||||
} else {
|
||||
tree, err := s.getAppResources(ctx, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
found := false
|
||||
for _, n := range append(tree.Nodes, tree.OrphanedNodes...) {
|
||||
if n.ResourceRef.UID == q.ResourceUID && n.ResourceRef.Name == q.ResourceName && n.ResourceRef.Namespace == q.ResourceNamespace {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s not found as part of application %s", q.ResourceName, *q.Name)
|
||||
}
|
||||
|
||||
namespace = q.ResourceNamespace
|
||||
var config *rest.Config
|
||||
config, err = s.getApplicationClusterConfig(ctx, a)
|
||||
@@ -923,7 +938,7 @@ func (s *Server) getAppResources(ctx context.Context, a *appv1.Application) (*ap
|
||||
return &tree, err
|
||||
}
|
||||
|
||||
func (s *Server) getAppResource(ctx context.Context, action string, q *application.ApplicationResourceRequest) (*appv1.ResourceNode, *rest.Config, *appv1.Application, error) {
|
||||
func (s *Server) getAppLiveResource(ctx context.Context, action string, q *application.ApplicationResourceRequest) (*appv1.ResourceNode, *rest.Config, *appv1.Application, error) {
|
||||
a, err := s.appLister.Get(*q.Name)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -938,7 +953,7 @@ func (s *Server) getAppResource(ctx context.Context, action string, q *applicati
|
||||
}
|
||||
|
||||
found := tree.FindNode(q.Group, q.Kind, q.Namespace, q.ResourceName)
|
||||
if found == nil {
|
||||
if found == nil || found.ResourceRef.UID == "" {
|
||||
return nil, nil, nil, status.Errorf(codes.InvalidArgument, "%s %s %s not found as part of application %s", q.Kind, q.Group, q.ResourceName, *q.Name)
|
||||
}
|
||||
config, err := s.getApplicationClusterConfig(ctx, a)
|
||||
@@ -949,7 +964,7 @@ func (s *Server) getAppResource(ctx context.Context, action string, q *applicati
|
||||
}
|
||||
|
||||
func (s *Server) GetResource(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ApplicationResourceResponse, error) {
|
||||
res, config, _, err := s.getAppResource(ctx, rbacpolicy.ActionGet, q)
|
||||
res, config, _, err := s.getAppLiveResource(ctx, rbacpolicy.ActionGet, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -994,7 +1009,7 @@ func (s *Server) PatchResource(ctx context.Context, q *application.ApplicationRe
|
||||
Version: q.Version,
|
||||
Group: q.Group,
|
||||
}
|
||||
res, config, a, err := s.getAppResource(ctx, rbacpolicy.ActionUpdate, resourceRequest)
|
||||
res, config, a, err := s.getAppLiveResource(ctx, rbacpolicy.ActionUpdate, resourceRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1034,7 +1049,7 @@ func (s *Server) DeleteResource(ctx context.Context, q *application.ApplicationR
|
||||
Version: q.Version,
|
||||
Group: q.Group,
|
||||
}
|
||||
res, config, a, err := s.getAppResource(ctx, rbacpolicy.ActionDelete, resourceRequest)
|
||||
res, config, a, err := s.getAppLiveResource(ctx, rbacpolicy.ActionDelete, resourceRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1305,7 +1320,7 @@ func getSelectedPods(treeNodes []appv1.ResourceNode, q *application.ApplicationP
|
||||
var pods []appv1.ResourceNode
|
||||
isTheOneMap := make(map[string]bool)
|
||||
for _, treeNode := range treeNodes {
|
||||
if treeNode.Kind == kube.PodKind && treeNode.Group == "" {
|
||||
if treeNode.Kind == kube.PodKind && treeNode.Group == "" && treeNode.UID != "" {
|
||||
if isTheSelectedOne(&treeNode, q, treeNodes, isTheOneMap) {
|
||||
pods = append(pods, treeNode)
|
||||
}
|
||||
@@ -1615,7 +1630,7 @@ func (s *Server) logResourceEvent(res *appv1.ResourceNode, ctx context.Context,
|
||||
}
|
||||
|
||||
func (s *Server) ListResourceActions(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ResourceActionsListResponse, error) {
|
||||
res, config, _, err := s.getAppResource(ctx, rbacpolicy.ActionGet, q)
|
||||
res, config, _, err := s.getAppLiveResource(ctx, rbacpolicy.ActionGet, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1666,7 +1681,7 @@ func (s *Server) RunResourceAction(ctx context.Context, q *application.ResourceA
|
||||
Group: q.Group,
|
||||
}
|
||||
actionRequest := fmt.Sprintf("%s/%s/%s/%s", rbacpolicy.ActionAction, q.Group, q.Kind, q.Action)
|
||||
res, config, a, err := s.getAppResource(ctx, actionRequest, resourceRequest)
|
||||
res, config, a, err := s.getAppLiveResource(ctx, actionRequest, resourceRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -949,6 +949,9 @@ func (a *ArgoCDServer) Authenticate(ctx context.Context) (context.Context, error
|
||||
}
|
||||
if !argoCDSettings.AnonymousUserEnabled {
|
||||
return ctx, claimsErr
|
||||
} else {
|
||||
// nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, "claims", "")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ package server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
"github.com/dgrijalva/jwt-go/v4"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/metadata"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -430,6 +433,383 @@ func TestAuthenticate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func dexMockHandler(t *testing.T, url string) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
switch r.RequestURI {
|
||||
case "/api/dex/.well-known/openid-configuration":
|
||||
_, err := io.WriteString(w, fmt.Sprintf(`
|
||||
{
|
||||
"issuer": "%[1]s/api/dex",
|
||||
"authorization_endpoint": "%[1]s/api/dex/auth",
|
||||
"token_endpoint": "%[1]s/api/dex/token",
|
||||
"jwks_uri": "%[1]s/api/dex/keys",
|
||||
"userinfo_endpoint": "%[1]s/api/dex/userinfo",
|
||||
"device_authorization_endpoint": "%[1]s/api/dex/device/code",
|
||||
"grant_types_supported": [
|
||||
"authorization_code",
|
||||
"refresh_token",
|
||||
"urn:ietf:params:oauth:grant-type:device_code"
|
||||
],
|
||||
"response_types_supported": [
|
||||
"code"
|
||||
],
|
||||
"subject_types_supported": [
|
||||
"public"
|
||||
],
|
||||
"id_token_signing_alg_values_supported": [
|
||||
"RS256", "HS256"
|
||||
],
|
||||
"code_challenge_methods_supported": [
|
||||
"S256",
|
||||
"plain"
|
||||
],
|
||||
"scopes_supported": [
|
||||
"openid",
|
||||
"email",
|
||||
"groups",
|
||||
"profile",
|
||||
"offline_access"
|
||||
],
|
||||
"token_endpoint_auth_methods_supported": [
|
||||
"client_secret_basic",
|
||||
"client_secret_post"
|
||||
],
|
||||
"claims_supported": [
|
||||
"iss",
|
||||
"sub",
|
||||
"aud",
|
||||
"iat",
|
||||
"exp",
|
||||
"email",
|
||||
"email_verified",
|
||||
"locale",
|
||||
"name",
|
||||
"preferred_username",
|
||||
"at_hash"
|
||||
]
|
||||
}`, url))
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
default:
|
||||
w.WriteHeader(404)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTestServer(t *testing.T, anonymousEnabled bool, withFakeSSO bool) (argocd *ArgoCDServer, dexURL string) {
|
||||
cm := test.NewFakeConfigMap()
|
||||
if anonymousEnabled {
|
||||
cm.Data["users.anonymous.enabled"] = "true"
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Start with a placeholder. We need the server URL before setting up the real handler.
|
||||
}))
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
dexMockHandler(t, ts.URL)(w, r)
|
||||
})
|
||||
if withFakeSSO {
|
||||
cm.Data["url"] = ts.URL
|
||||
cm.Data["dex.config"] = `
|
||||
connectors:
|
||||
# OIDC
|
||||
- type: OIDC
|
||||
id: oidc
|
||||
name: OIDC
|
||||
config:
|
||||
issuer: https://auth.example.gom
|
||||
clientID: test-client
|
||||
clientSecret: $dex.oidc.clientSecret`
|
||||
}
|
||||
secret := test.NewFakeSecret()
|
||||
kubeclientset := fake.NewSimpleClientset(cm, secret)
|
||||
appClientSet := apps.NewSimpleClientset()
|
||||
argoCDOpts := ArgoCDServerOpts{
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
KubeClientset: kubeclientset,
|
||||
AppClientset: appClientSet,
|
||||
}
|
||||
if withFakeSSO {
|
||||
argoCDOpts.DexServerAddr = ts.URL
|
||||
}
|
||||
argocd = NewServer(context.Background(), argoCDOpts)
|
||||
return argocd, ts.URL
|
||||
}
|
||||
|
||||
func TestAuthenticate_3rd_party_JWTs(t *testing.T) {
|
||||
type testData struct {
|
||||
test string
|
||||
anonymousEnabled bool
|
||||
claims jwt.StandardClaims
|
||||
expectedErrorContains string
|
||||
expectedClaims interface{}
|
||||
}
|
||||
var tests = []testData{
|
||||
{
|
||||
test: "anonymous disabled, no audience",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.StandardClaims{},
|
||||
expectedErrorContains: "no audience found in the token",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, no audience",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.StandardClaims{},
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, unexpired token, admin claim",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.StandardClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewTime(float64(time.Now().Add(time.Hour * 24).Unix()))},
|
||||
expectedErrorContains: "id token signed with unsupported algorithm",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, unexpired token, admin claim",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.StandardClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewTime(float64(time.Now().Add(time.Hour * 24).Unix()))},
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, expired token, admin claim",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.StandardClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewTime(float64(time.Now().Unix()))},
|
||||
expectedErrorContains: "token is expired",
|
||||
expectedClaims: jwt.StandardClaims{Issuer: "sso"},
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, expired token, admin claim",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.StandardClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewTime(float64(time.Now().Unix()))},
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testData := range tests {
|
||||
testDataCopy := testData
|
||||
|
||||
t.Run(testDataCopy.test, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argocd, dexURL := getTestServer(t, testDataCopy.anonymousEnabled, true)
|
||||
testDataCopy.claims.Issuer = fmt.Sprintf("%s/api/dex", dexURL)
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, testDataCopy.claims)
|
||||
tokenString, err := token.SignedString([]byte("key"))
|
||||
require.NoError(t, err)
|
||||
ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs(apiclient.MetaDataTokenKey, tokenString))
|
||||
|
||||
ctx, err = argocd.Authenticate(ctx)
|
||||
claims := ctx.Value("claims")
|
||||
if testDataCopy.expectedClaims == nil {
|
||||
assert.Nil(t, claims)
|
||||
} else {
|
||||
assert.Equal(t, testDataCopy.expectedClaims, claims)
|
||||
}
|
||||
if testDataCopy.expectedErrorContains != "" {
|
||||
assert.Contains(t, err.Error(), testDataCopy.expectedErrorContains, "Authenticate should have thrown an error and blocked the request")
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthenticate_no_request_metadata(t *testing.T) {
|
||||
type testData struct {
|
||||
test string
|
||||
anonymousEnabled bool
|
||||
expectedErrorContains string
|
||||
expectedClaims interface{}
|
||||
}
|
||||
var tests = []testData{
|
||||
{
|
||||
test: "anonymous disabled",
|
||||
anonymousEnabled: false,
|
||||
expectedErrorContains: "no session information",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled",
|
||||
anonymousEnabled: true,
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testData := range tests {
|
||||
testDataCopy := testData
|
||||
|
||||
t.Run(testDataCopy.test, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argocd, _ := getTestServer(t, testDataCopy.anonymousEnabled, true)
|
||||
ctx := context.Background()
|
||||
|
||||
ctx, err := argocd.Authenticate(ctx)
|
||||
claims := ctx.Value("claims")
|
||||
assert.Equal(t, testDataCopy.expectedClaims, claims)
|
||||
if testDataCopy.expectedErrorContains != "" {
|
||||
assert.Contains(t, err.Error(), testDataCopy.expectedErrorContains, "Authenticate should have thrown an error and blocked the request")
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthenticate_no_SSO(t *testing.T) {
|
||||
type testData struct {
|
||||
test string
|
||||
anonymousEnabled bool
|
||||
expectedErrorMessage string
|
||||
expectedClaims interface{}
|
||||
}
|
||||
var tests = []testData{
|
||||
{
|
||||
test: "anonymous disabled",
|
||||
anonymousEnabled: false,
|
||||
expectedErrorMessage: "SSO is not configured",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled",
|
||||
anonymousEnabled: true,
|
||||
expectedErrorMessage: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testData := range tests {
|
||||
testDataCopy := testData
|
||||
|
||||
t.Run(testDataCopy.test, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argocd, dexURL := getTestServer(t, testDataCopy.anonymousEnabled, false)
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.StandardClaims{Issuer: fmt.Sprintf("%s/api/dex", dexURL)})
|
||||
tokenString, err := token.SignedString([]byte("key"))
|
||||
require.NoError(t, err)
|
||||
ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs(apiclient.MetaDataTokenKey, tokenString))
|
||||
|
||||
ctx, err = argocd.Authenticate(ctx)
|
||||
claims := ctx.Value("claims")
|
||||
assert.Equal(t, testDataCopy.expectedClaims, claims)
|
||||
if testDataCopy.expectedErrorMessage != "" {
|
||||
assert.Contains(t, err.Error(), testDataCopy.expectedErrorMessage, "Authenticate should have thrown an error and blocked the request")
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthenticate_bad_request_metadata(t *testing.T) {
|
||||
type testData struct {
|
||||
test string
|
||||
anonymousEnabled bool
|
||||
metadata metadata.MD
|
||||
expectedErrorMessage string
|
||||
expectedClaims interface{}
|
||||
}
|
||||
var tests = []testData{
|
||||
{
|
||||
test: "anonymous disabled, empty metadata",
|
||||
anonymousEnabled: false,
|
||||
metadata: metadata.MD{},
|
||||
expectedErrorMessage: "no session information",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, empty metadata",
|
||||
anonymousEnabled: true,
|
||||
metadata: metadata.MD{},
|
||||
expectedErrorMessage: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, empty tokens",
|
||||
anonymousEnabled: false,
|
||||
metadata: metadata.MD{apiclient.MetaDataTokenKey: []string{}},
|
||||
expectedErrorMessage: "no session information",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, empty tokens",
|
||||
anonymousEnabled: true,
|
||||
metadata: metadata.MD{apiclient.MetaDataTokenKey: []string{}},
|
||||
expectedErrorMessage: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, bad tokens",
|
||||
anonymousEnabled: false,
|
||||
metadata: metadata.Pairs(apiclient.MetaDataTokenKey, "bad"),
|
||||
expectedErrorMessage: "token contains an invalid number of segments",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, bad tokens",
|
||||
anonymousEnabled: true,
|
||||
metadata: metadata.Pairs(apiclient.MetaDataTokenKey, "bad"),
|
||||
expectedErrorMessage: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, bad auth header",
|
||||
anonymousEnabled: false,
|
||||
metadata: metadata.MD{"authorization": []string{"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ"}},
|
||||
expectedErrorMessage: "no audience found in the token",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, bad auth header",
|
||||
anonymousEnabled: true,
|
||||
metadata: metadata.MD{"authorization": []string{"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ"}},
|
||||
expectedErrorMessage: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, bad auth cookie",
|
||||
anonymousEnabled: false,
|
||||
metadata: metadata.MD{"grpcgateway-cookie": []string{"argocd.token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ"}},
|
||||
expectedErrorMessage: "no audience found in the token",
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, bad auth cookie",
|
||||
anonymousEnabled: true,
|
||||
metadata: metadata.MD{"grpcgateway-cookie": []string{"argocd.token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ"}},
|
||||
expectedErrorMessage: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testData := range tests {
|
||||
testDataCopy := testData
|
||||
|
||||
t.Run(testDataCopy.test, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argocd, _ := getTestServer(t, testDataCopy.anonymousEnabled, true)
|
||||
ctx := metadata.NewIncomingContext(context.Background(), testDataCopy.metadata)
|
||||
|
||||
ctx, err := argocd.Authenticate(ctx)
|
||||
claims := ctx.Value("claims")
|
||||
assert.Equal(t, testDataCopy.expectedClaims, claims)
|
||||
if testDataCopy.expectedErrorMessage != "" {
|
||||
assert.Contains(t, err.Error(), testDataCopy.expectedErrorMessage, "Authenticate should have thrown an error and blocked the request")
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getToken(t *testing.T) {
|
||||
token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c"
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM redis:6.2.6-alpine as redis
|
||||
FROM redis:6.2.7-alpine as redis
|
||||
|
||||
FROM node:12.18.4 as node
|
||||
|
||||
@@ -6,7 +6,7 @@ FROM golang:1.16.11 as golang
|
||||
|
||||
FROM registry:2.7.1 as registry
|
||||
|
||||
FROM ubuntu:21.04
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install --fix-missing -y \
|
||||
@@ -64,6 +64,11 @@ COPY ./test/fixture/testrepos/ssh_host_*_key* /etc/ssh/
|
||||
# Copy redis binaries to the image
|
||||
COPY --from=redis /usr/local/bin/* /usr/local/bin/
|
||||
|
||||
# Copy redis dependencies/shared libraries
|
||||
# Ubuntu 22.04+ has moved to OpenSSL3 and no longer provides these libraries
|
||||
COPY --from=redis /usr/lib/x86_64-linux-gnu/libssl.so.1.1 /usr/lib/x86_64-linux-gnu/
|
||||
COPY --from=redis /usr/lib/x86_64-linux-gnu/libcrypto.so.1.1 /usr/lib/x86_64-linux-gnu/
|
||||
|
||||
# Copy registry binaries to the image
|
||||
COPY --from=registry /bin/registry /usr/local/bin/
|
||||
COPY --from=registry /etc/docker/registry/config.yml /etc/docker/registry/config.yml
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1beta "k8s.io/api/networking/v1beta1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -564,7 +564,7 @@ func TestKnownTypesInCRDDiffing(t *testing.T) {
|
||||
When().
|
||||
And(func() {
|
||||
dummyResIf := DynamicClientset.Resource(dummiesGVR).Namespace(DeploymentNamespace())
|
||||
patchData := []byte(`{"spec":{"requests": {"cpu": "2"}}}`)
|
||||
patchData := []byte(`{"spec": {"cpu": "2"}}`)
|
||||
FailOnErr(dummyResIf.Patch(context.Background(), "dummy-crd-instance", types.MergePatchType, patchData, metav1.PatchOptions{}))
|
||||
}).Refresh(RefreshTypeNormal).
|
||||
Then().
|
||||
@@ -574,7 +574,7 @@ func TestKnownTypesInCRDDiffing(t *testing.T) {
|
||||
SetResourceOverrides(map[string]ResourceOverride{
|
||||
"argoproj.io/Dummy": {
|
||||
KnownTypeFields: []KnownTypeField{{
|
||||
Field: "spec.requests",
|
||||
Field: "spec",
|
||||
Type: "core/v1/ResourceList",
|
||||
}},
|
||||
},
|
||||
@@ -823,64 +823,125 @@ func TestSyncAsync(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced))
|
||||
}
|
||||
|
||||
func TestPermissions(t *testing.T) {
|
||||
EnsureCleanState(t)
|
||||
appName := Name()
|
||||
_, err := RunCli("proj", "create", "test")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// make sure app cannot be created without permissions in project
|
||||
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
|
||||
"--path", guestbookPath, "--project", "test", "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
|
||||
assert.Error(t, err)
|
||||
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'test'", RepoURL(RepoURLTypeFile))
|
||||
destinationError := fmt.Sprintf("application destination {%s %s} is not permitted in project 'test'", KubernetesInternalAPIServerAddr, DeploymentNamespace())
|
||||
|
||||
assert.Contains(t, err.Error(), sourceError)
|
||||
assert.Contains(t, err.Error(), destinationError)
|
||||
|
||||
proj, err := AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Get(context.Background(), "test", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
proj.Spec.Destinations = []ApplicationDestination{{Server: "*", Namespace: "*"}}
|
||||
proj.Spec.SourceRepos = []string{"*"}
|
||||
proj, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(context.Background(), proj, metav1.UpdateOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// make sure controller report permissions issues in conditions
|
||||
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
|
||||
"--path", guestbookPath, "--project", "test", "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err = AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Delete(context.Background(), appName, metav1.DeleteOptions{})
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
proj.Spec.Destinations = []ApplicationDestination{}
|
||||
proj.Spec.SourceRepos = []string{}
|
||||
_, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(context.Background(), proj, metav1.UpdateOptions{})
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(1 * time.Second)
|
||||
closer, client, err := ArgoCDClientset.NewApplicationClient()
|
||||
assert.NoError(t, err)
|
||||
defer io.Close(closer)
|
||||
|
||||
refresh := string(RefreshTypeNormal)
|
||||
app, err := client.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: &refresh})
|
||||
assert.NoError(t, err)
|
||||
|
||||
destinationErrorExist := false
|
||||
sourceErrorExist := false
|
||||
for i := range app.Status.Conditions {
|
||||
if strings.Contains(app.Status.Conditions[i].Message, destinationError) {
|
||||
destinationErrorExist = true
|
||||
}
|
||||
if strings.Contains(app.Status.Conditions[i].Message, sourceError) {
|
||||
sourceErrorExist = true
|
||||
// assertResourceActions verifies if view/modify resource actions are successful/failing for given application
|
||||
func assertResourceActions(t *testing.T, appName string, successful bool) {
|
||||
assertError := func(err error, message string) {
|
||||
if successful {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), message)
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.True(t, destinationErrorExist)
|
||||
assert.True(t, sourceErrorExist)
|
||||
|
||||
closer, cdClient := ArgoCDClientset.NewApplicationClientOrDie()
|
||||
defer io.Close(closer)
|
||||
|
||||
deploymentResource, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Get(context.Background(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
logs, err := cdClient.PodLogs(context.Background(), &applicationpkg.ApplicationPodLogsQuery{
|
||||
Group: pointer.String("apps"), Kind: pointer.String("Deployment"), Name: &appName, Namespace: DeploymentNamespace(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = logs.Recv()
|
||||
assertError(err, "EOF")
|
||||
|
||||
expectedError := fmt.Sprintf("Deployment apps guestbook-ui not found as part of application %s", appName)
|
||||
|
||||
_, err = cdClient.ListResourceEvents(context.Background(), &applicationpkg.ApplicationResourceEventsQuery{
|
||||
Name: &appName, ResourceName: "guestbook-ui", ResourceNamespace: DeploymentNamespace(), ResourceUID: string(deploymentResource.UID)})
|
||||
assertError(err, fmt.Sprintf("%s not found as part of application %s", "guestbook-ui", appName))
|
||||
|
||||
_, err = cdClient.GetResource(context.Background(), &applicationpkg.ApplicationResourceRequest{
|
||||
Name: &appName, ResourceName: "guestbook-ui", Namespace: DeploymentNamespace(), Version: "v1", Group: "apps", Kind: "Deployment"})
|
||||
assertError(err, expectedError)
|
||||
|
||||
_, err = cdClient.DeleteResource(context.Background(), &applicationpkg.ApplicationResourceDeleteRequest{
|
||||
Name: &appName, ResourceName: "guestbook-ui", Namespace: DeploymentNamespace(), Version: "v1", Group: "apps", Kind: "Deployment",
|
||||
})
|
||||
assertError(err, expectedError)
|
||||
|
||||
_, err = cdClient.RunResourceAction(context.Background(), &applicationpkg.ResourceActionRunRequest{
|
||||
Name: &appName, ResourceName: "guestbook-ui", Namespace: DeploymentNamespace(), Version: "v1", Group: "apps", Kind: "Deployment", Action: "restart",
|
||||
})
|
||||
assertError(err, expectedError)
|
||||
}
|
||||
|
||||
func TestPermissions(t *testing.T) {
|
||||
appCtx := Given(t)
|
||||
projName := "argo-project"
|
||||
projActions := projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
When().
|
||||
Create()
|
||||
|
||||
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'argo-project'", RepoURL(RepoURLTypeFile))
|
||||
destinationError := fmt.Sprintf("application destination {%s %s} is not permitted in project 'argo-project'", KubernetesInternalAPIServerAddr, DeploymentNamespace())
|
||||
|
||||
appCtx.
|
||||
Path("guestbook-logs").
|
||||
Project(projName).
|
||||
When().
|
||||
IgnoreErrors().
|
||||
// ensure app is not created if project permissions are missing
|
||||
Create().
|
||||
Then().
|
||||
Expect(Error("", sourceError)).
|
||||
Expect(Error("", destinationError)).
|
||||
When().
|
||||
DoNotIgnoreErrors().
|
||||
// add missing permissions, create and sync app
|
||||
And(func() {
|
||||
projActions.AddDestination("*", "*")
|
||||
projActions.AddSource("*")
|
||||
}).
|
||||
Create().
|
||||
Sync().
|
||||
Then().
|
||||
// make sure application resource actiions are successful
|
||||
And(func(app *Application) {
|
||||
assertResourceActions(t, app.Name, true)
|
||||
}).
|
||||
When().
|
||||
// remove projet permissions and "refresh" app
|
||||
And(func() {
|
||||
projActions.UpdateProject(func(proj *AppProject) {
|
||||
proj.Spec.Destinations = nil
|
||||
proj.Spec.SourceRepos = nil
|
||||
})
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
Then().
|
||||
// ensure app resource tree is empty when source/destination permissions are missing
|
||||
Expect(Condition(ApplicationConditionInvalidSpecError, destinationError)).
|
||||
Expect(Condition(ApplicationConditionInvalidSpecError, sourceError)).
|
||||
And(func(app *Application) {
|
||||
closer, cdClient := ArgoCDClientset.NewApplicationClientOrDie()
|
||||
defer io.Close(closer)
|
||||
tree, err := cdClient.ResourceTree(context.Background(), &applicationpkg.ResourcesQuery{ApplicationName: &app.Name})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, tree.Nodes, 0)
|
||||
assert.Len(t, tree.OrphanedNodes, 0)
|
||||
}).
|
||||
When().
|
||||
// add missing permissions but deny management of Deployment kind
|
||||
And(func() {
|
||||
projActions.
|
||||
AddDestination("*", "*").
|
||||
AddSource("*").
|
||||
UpdateProject(func(proj *AppProject) {
|
||||
proj.Spec.NamespaceResourceBlacklist = []metav1.GroupKind{{Group: "*", Kind: "Deployment"}}
|
||||
})
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
Then().
|
||||
// make sure application resource actiions are failing
|
||||
And(func(app *Application) {
|
||||
assertResourceActions(t, "test-permissions", false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPermissionWithScopedRepo(t *testing.T) {
|
||||
@@ -1194,23 +1255,27 @@ func TestOrphanedResource(t *testing.T) {
|
||||
func TestNotPermittedResources(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
|
||||
ingress := &networkingv1beta.Ingress{
|
||||
pathType := networkingv1.PathTypePrefix
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sample-ingress",
|
||||
Labels: map[string]string{
|
||||
common.LabelKeyAppInstance: ctx.GetName(),
|
||||
},
|
||||
},
|
||||
Spec: networkingv1beta.IngressSpec{
|
||||
Rules: []networkingv1beta.IngressRule{{
|
||||
IngressRuleValue: networkingv1beta.IngressRuleValue{
|
||||
HTTP: &networkingv1beta.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1beta.HTTPIngressPath{{
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{{
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{{
|
||||
Path: "/",
|
||||
Backend: networkingv1beta.IngressBackend{
|
||||
ServiceName: "guestbook-ui",
|
||||
ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80},
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "guestbook-ui",
|
||||
Port: networkingv1.ServiceBackendPort{Number: 80},
|
||||
},
|
||||
},
|
||||
PathType: &pathType,
|
||||
}},
|
||||
},
|
||||
},
|
||||
@@ -1219,7 +1284,7 @@ func TestNotPermittedResources(t *testing.T) {
|
||||
}
|
||||
defer func() {
|
||||
log.Infof("Ingress 'sample-ingress' deleted from %s", ArgoCDNamespace)
|
||||
CheckError(KubeClientset.NetworkingV1beta1().Ingresses(ArgoCDNamespace).Delete(context.Background(), "sample-ingress", metav1.DeleteOptions{}))
|
||||
CheckError(KubeClientset.NetworkingV1().Ingresses(ArgoCDNamespace).Delete(context.Background(), "sample-ingress", metav1.DeleteOptions{}))
|
||||
}()
|
||||
|
||||
svc := &v1.Service{
|
||||
@@ -1247,7 +1312,7 @@ func TestNotPermittedResources(t *testing.T) {
|
||||
{Group: "", Kind: "Service"},
|
||||
}}).
|
||||
And(func() {
|
||||
FailOnErr(KubeClientset.NetworkingV1beta1().Ingresses(ArgoCDNamespace).Create(context.Background(), ingress, metav1.CreateOptions{}))
|
||||
FailOnErr(KubeClientset.NetworkingV1().Ingresses(ArgoCDNamespace).Create(context.Background(), ingress, metav1.CreateOptions{}))
|
||||
FailOnErr(KubeClientset.CoreV1().Services(DeploymentNamespace()).Create(context.Background(), svc, metav1.CreateOptions{}))
|
||||
}).
|
||||
Path(guestbookPath).
|
||||
@@ -1273,7 +1338,7 @@ func TestNotPermittedResources(t *testing.T) {
|
||||
Expect(DoesNotExist())
|
||||
|
||||
// Make sure prohibited resources are not deleted during application deletion
|
||||
FailOnErr(KubeClientset.NetworkingV1beta1().Ingresses(ArgoCDNamespace).Get(context.Background(), "sample-ingress", metav1.GetOptions{}))
|
||||
FailOnErr(KubeClientset.NetworkingV1().Ingresses(ArgoCDNamespace).Get(context.Background(), "sample-ingress", metav1.GetOptions{}))
|
||||
FailOnErr(KubeClientset.CoreV1().Services(DeploymentNamespace()).Get(context.Background(), "guestbook-ui", metav1.GetOptions{}))
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,11 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/clusterauth"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
clusterpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster"
|
||||
"github.com/argoproj/argo-cd/v2/test/e2e/fixture"
|
||||
@@ -63,6 +67,30 @@ func (a *Actions) Create(args ...string) *Actions {
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) CreateWithRBAC(args ...string) *Actions {
|
||||
pathOpts := clientcmd.NewDefaultPathOptions()
|
||||
config, err := pathOpts.GetStartingConfig()
|
||||
if err != nil {
|
||||
a.lastError = err
|
||||
return a
|
||||
}
|
||||
clientConfig := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{})
|
||||
conf, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
a.lastError = err
|
||||
return a
|
||||
}
|
||||
client := kubernetes.NewForConfigOrDie(conf)
|
||||
|
||||
_, err = clusterauth.InstallClusterManagerRBAC(client, "kube-system", []string{}, common.BearerTokenTimeout)
|
||||
if err != nil {
|
||||
a.lastError = err
|
||||
return a
|
||||
}
|
||||
|
||||
return a.Create()
|
||||
}
|
||||
|
||||
func (a *Actions) List() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.runCli("cluster", "list")
|
||||
@@ -75,6 +103,20 @@ func (a *Actions) Get() *Actions {
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteByName() *Actions {
|
||||
a.context.t.Helper()
|
||||
|
||||
a.runCli("cluster", "rm", a.context.name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteByServer() *Actions {
|
||||
a.context.t.Helper()
|
||||
|
||||
a.runCli("cluster", "rm", a.context.server)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
return &Consequences{a.context, a}
|
||||
|
||||
@@ -543,7 +543,9 @@ func EnsureCleanState(t *testing.T) {
|
||||
FailOnErr(Run("", "mkdir", "-p", TmpDir))
|
||||
|
||||
// random id - unique across test runs
|
||||
postFix := "-" + strings.ToLower(rand.RandString(5))
|
||||
randString, err := rand.String(5)
|
||||
CheckError(err)
|
||||
postFix := "-" + strings.ToLower(randString)
|
||||
id = t.Name() + postFix
|
||||
name = DnsFriendly(t.Name(), "")
|
||||
deploymentNamespace = DnsFriendly(fmt.Sprintf("argocd-e2e-%s", t.Name()), postFix)
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
package project
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/test/e2e/fixture"
|
||||
)
|
||||
|
||||
@@ -34,6 +40,25 @@ func (a *Actions) Create(args ...string) *Actions {
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddDestination(cluster string, namespace string) *Actions {
|
||||
a.runCli("proj", "add-destination", a.context.name, cluster, namespace)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddSource(repo string) *Actions {
|
||||
a.runCli("proj", "add-source", a.context.name, repo)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) UpdateProject(updater func(project *v1alpha1.AppProject)) *Actions {
|
||||
proj, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Get(context.TODO(), a.context.name, v1.GetOptions{})
|
||||
require.NoError(a.context.t, err)
|
||||
updater(proj)
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Update(context.TODO(), proj, v1.UpdateOptions{})
|
||||
require.NoError(a.context.t, err)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Name(name string) *Actions {
|
||||
a.context.name = name
|
||||
return a
|
||||
@@ -72,4 +97,7 @@ func (a *Actions) Then() *Consequences {
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
if !a.ignoreErrors {
|
||||
require.Empty(a.context.t, a.lastError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
. "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
. "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/test/e2e/fixture"
|
||||
@@ -110,7 +111,9 @@ func TestSelectiveSyncWithNamespace(t *testing.T) {
|
||||
}
|
||||
|
||||
func getNewNamespace(t *testing.T) string {
|
||||
postFix := "-" + strings.ToLower(rand.RandString(5))
|
||||
randStr, err := rand.String(5)
|
||||
require.NoError(t, err)
|
||||
postFix := "-" + strings.ToLower(randStr)
|
||||
name := fixture.DnsFriendly(t.Name(), "")
|
||||
return fixture.DnsFriendly(fmt.Sprintf("argocd-e2e-%s", name), postFix)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ kind: Dummy
|
||||
metadata:
|
||||
name: dummy-crd-instance
|
||||
spec:
|
||||
requests:
|
||||
cpu: 2000m
|
||||
memory: 32Mi
|
||||
cpu: 2000m
|
||||
memory: 32Mi
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Dummy
|
||||
|
||||
57
test/e2e/testdata/crd-creation/crd.yaml
vendored
57
test/e2e/testdata/crd-creation/crd.yaml
vendored
@@ -1,24 +1,69 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: dummies.argoproj.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
group: argoproj.io
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
kind: Dummy
|
||||
listKind: DummyList
|
||||
plural: dummies
|
||||
singular: dummy
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
memory:
|
||||
type: string
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterdummies.argoproj.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
group: argoproj.io
|
||||
version: v1alpha1
|
||||
scope: Cluster
|
||||
names:
|
||||
kind: ClusterDummy
|
||||
listKind: ClusterDummyList
|
||||
plural: clusterdummies
|
||||
singular: clusterdummy
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
|
||||
79
test/e2e/testdata/crd-subresource/crd.yaml
vendored
79
test/e2e/testdata/crd-subresource/crd.yaml
vendored
@@ -1,25 +1,88 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: statussubresources.argoproj.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
group: argoproj.io
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
kind: StatusSubResource
|
||||
listKind: StatusSubResourceList
|
||||
plural: statussubresources
|
||||
subresources:
|
||||
status: {}
|
||||
singular: statussubresource
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
properties:
|
||||
foo:
|
||||
type: string
|
||||
status:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
properties:
|
||||
bar:
|
||||
type: string
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: nonstatussubresources.argoproj.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
group: argoproj.io
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
kind: NonStatusSubResource
|
||||
listKind: NonStatusSubResourceList
|
||||
plural: nonstatussubresources
|
||||
singular: nonstatussubresource
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
properties:
|
||||
foo:
|
||||
type: string
|
||||
status:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
properties:
|
||||
bar:
|
||||
type: string
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.17.4-alpine
|
||||
image: quay.io/argoprojlabs/argocd-e2e-container:0.1
|
||||
ports:
|
||||
- containerPort: "80"
|
||||
imagePullPolicy: IfNotPresent
|
||||
@@ -1,16 +1,36 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterdummies.argoproj.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
group: argoproj.io
|
||||
version: v1alpha1
|
||||
scope: Cluster
|
||||
names:
|
||||
kind: ClusterDummy
|
||||
listKind: ClusterDummyList
|
||||
plural: clusterdummies
|
||||
|
||||
singular: clusterdummy
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user