mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
45 Commits
release-3.
...
v2.2.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
987f6659b8 | ||
|
|
e099a6a851 | ||
|
|
afbd59ba63 | ||
|
|
b1e3a07d92 | ||
|
|
c3144c0059 | ||
|
|
33547f149b | ||
|
|
06dc9aa836 | ||
|
|
03b17e0233 | ||
|
|
d5909f7168 | ||
|
|
7d0d665747 | ||
|
|
834a102c09 | ||
|
|
4bcd8cf733 | ||
|
|
a069c602dc | ||
|
|
e309ceebac | ||
|
|
4a7f0bbfd8 | ||
|
|
28a54bf2a2 | ||
|
|
e209426a7e | ||
|
|
06a95f86ce | ||
|
|
122ecefc3a | ||
|
|
004d73ce92 | ||
|
|
81e1a58328 | ||
|
|
84f949ff17 | ||
|
|
a7e7f32a0f | ||
|
|
6da92a8e81 | ||
|
|
d5368f5714 | ||
|
|
25cfb27d51 | ||
|
|
47d23e1f07 | ||
|
|
1dc14dc172 | ||
|
|
5c06333914 | ||
|
|
656bee1402 | ||
|
|
2a30c92a7e | ||
|
|
6a1fec9d33 | ||
|
|
0faeeb843d | ||
|
|
48bdabad1a | ||
|
|
c3fd7f5f2d | ||
|
|
3f75a7faa3 | ||
|
|
0f14657301 | ||
|
|
02768367b5 | ||
|
|
3b628b3af8 | ||
|
|
d8d2920eff | ||
|
|
1a72853ca3 | ||
|
|
5354e7d823 | ||
|
|
34d8f12c99 | ||
|
|
8840688e6e | ||
|
|
c081ef0b00 |
4
.github/workflows/ci-build.yaml
vendored
4
.github/workflows/ci-build.yaml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.16.5'
|
||||
GOLANG_VERSION: '1.16.11'
|
||||
|
||||
jobs:
|
||||
build-docker:
|
||||
@@ -400,7 +400,7 @@ jobs:
|
||||
run: |
|
||||
docker pull quay.io/dexidp/dex:v2.25.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:6.2.4-alpine
|
||||
docker pull redis:6.2.6-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
|
||||
2
.github/workflows/image.yaml
vendored
2
.github/workflows/image.yaml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- master
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.16.5'
|
||||
GOLANG_VERSION: '1.16.11'
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
- '!release-v0*'
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.16.5'
|
||||
GOLANG_VERSION: '1.16.11'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
|
||||
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:21.04
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.16.5 as builder
|
||||
FROM docker.io/library/golang:1.16.11 as builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -101,7 +101,7 @@ RUN NODE_ENV='production' NODE_ONLINE_ENV='online' yarn build
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM golang:1.16.5 as argocd-build
|
||||
FROM docker.io/library/golang:1.16.11 as argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,7 +1,7 @@
|
||||
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} "
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.0 serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.4-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.6-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} go run ./cmd/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
type forwardCacheClient struct {
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
client cache.CacheClient
|
||||
err error
|
||||
@@ -25,7 +26,9 @@ type forwardCacheClient struct {
|
||||
|
||||
func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error {
|
||||
c.init.Do(func() {
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
overrides := clientcmd.ConfigOverrides{
|
||||
CurrentContext: c.context,
|
||||
}
|
||||
redisPort, err := kubeutil.PortForward(6379, c.namespace, &overrides,
|
||||
"app.kubernetes.io/name=argocd-redis-ha-haproxy", "app.kubernetes.io/name=argocd-redis")
|
||||
if err != nil {
|
||||
@@ -74,6 +77,7 @@ func (c *forwardCacheClient) NotifyUpdated(key string) error {
|
||||
|
||||
type forwardRepoClientset struct {
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
repoClientset repoapiclient.Clientset
|
||||
err error
|
||||
@@ -81,7 +85,9 @@ type forwardRepoClientset struct {
|
||||
|
||||
func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.RepoServerServiceClient, error) {
|
||||
c.init.Do(func() {
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
overrides := clientcmd.ConfigOverrides{
|
||||
CurrentContext: c.context,
|
||||
}
|
||||
repoServerPort, err := kubeutil.PortForward(8081, c.namespace, &overrides, "app.kubernetes.io/name=argocd-repo-server")
|
||||
if err != nil {
|
||||
c.err = err
|
||||
|
||||
@@ -27,6 +27,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/localconfig"
|
||||
|
||||
flag "github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func testAPI(clientOpts *argoapi.ClientOptions) error {
|
||||
@@ -43,6 +45,13 @@ func testAPI(clientOpts *argoapi.ClientOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func retrieveContextIfChanged(contextFlag *flag.Flag) string {
|
||||
if contextFlag != nil && contextFlag.Changed {
|
||||
return contextFlag.Value.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// InitCommand allows executing command in a headless mode: on the fly starts Argo CD API server and
|
||||
// changes provided client options to use started API server port
|
||||
func InitCommand(cmd *cobra.Command, clientOpts *argoapi.ClientOptions, port *int) *cobra.Command {
|
||||
@@ -108,12 +117,14 @@ func InitCommand(cmd *cobra.Command, clientOpts *argoapi.ClientOptions, port *in
|
||||
return err
|
||||
}
|
||||
|
||||
context := retrieveContextIfChanged(cmd.Flag("context"))
|
||||
|
||||
mr, err := miniredis.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appstateCache := appstatecache.NewCache(cacheutil.NewCache(&forwardCacheClient{namespace: namespace}), time.Hour)
|
||||
appstateCache := appstatecache.NewCache(cacheutil.NewCache(&forwardCacheClient{namespace: namespace, context: context}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
@@ -125,7 +136,7 @@ func InitCommand(cmd *cobra.Command, clientOpts *argoapi.ClientOptions, port *in
|
||||
KubeClientset: kubeClientset,
|
||||
Insecure: true,
|
||||
ListenHost: "localhost",
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace},
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: context},
|
||||
})
|
||||
|
||||
go srv.Run(ctx, *port, 0)
|
||||
|
||||
80
cmd/argocd/commands/headless/headless_test.go
Normal file
80
cmd/argocd/commands/headless/headless_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package headless
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type StringFlag struct {
|
||||
// The exact value provided on the flag
|
||||
value string
|
||||
}
|
||||
|
||||
func (f StringFlag) String() string {
|
||||
return f.value
|
||||
}
|
||||
|
||||
func (f *StringFlag) Set(value string) error {
|
||||
f.value = value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *StringFlag) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
func Test_FlagContextNotChanged(t *testing.T) {
|
||||
res := retrieveContextIfChanged(&flag.Flag{
|
||||
Name: "",
|
||||
Shorthand: "",
|
||||
Usage: "",
|
||||
Value: &StringFlag{value: "test"},
|
||||
DefValue: "",
|
||||
Changed: false,
|
||||
NoOptDefVal: "",
|
||||
Deprecated: "",
|
||||
Hidden: false,
|
||||
ShorthandDeprecated: "",
|
||||
Annotations: nil,
|
||||
})
|
||||
|
||||
assert.Equal(t, "", res)
|
||||
}
|
||||
|
||||
func Test_FlagContextChanged(t *testing.T) {
|
||||
res := retrieveContextIfChanged(&flag.Flag{
|
||||
Name: "",
|
||||
Shorthand: "",
|
||||
Usage: "",
|
||||
Value: &StringFlag{value: "test"},
|
||||
DefValue: "",
|
||||
Changed: true,
|
||||
NoOptDefVal: "",
|
||||
Deprecated: "",
|
||||
Hidden: false,
|
||||
ShorthandDeprecated: "",
|
||||
Annotations: nil,
|
||||
})
|
||||
|
||||
assert.Equal(t, "test", res)
|
||||
}
|
||||
|
||||
func Test_FlagContextNil(t *testing.T) {
|
||||
res := retrieveContextIfChanged(&flag.Flag{
|
||||
Name: "",
|
||||
Shorthand: "",
|
||||
Usage: "",
|
||||
Value: nil,
|
||||
DefValue: "",
|
||||
Changed: false,
|
||||
NoOptDefVal: "",
|
||||
Deprecated: "",
|
||||
Hidden: false,
|
||||
ShorthandDeprecated: "",
|
||||
Annotations: nil,
|
||||
})
|
||||
|
||||
assert.Equal(t, "", res)
|
||||
}
|
||||
@@ -166,19 +166,26 @@ func NewApplicationController(
|
||||
AddFunc: func(obj interface{}) {
|
||||
if key, err := cache.MetaNamespaceKeyFunc(obj); err == nil {
|
||||
ctrl.projectRefreshQueue.Add(key)
|
||||
ctrl.InvalidateProjectsCache()
|
||||
if projMeta, ok := obj.(metav1.Object); ok {
|
||||
ctrl.InvalidateProjectsCache(projMeta.GetName())
|
||||
}
|
||||
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
if key, err := cache.MetaNamespaceKeyFunc(new); err == nil {
|
||||
ctrl.projectRefreshQueue.Add(key)
|
||||
ctrl.InvalidateProjectsCache()
|
||||
if projMeta, ok := new.(metav1.Object); ok {
|
||||
ctrl.InvalidateProjectsCache(projMeta.GetName())
|
||||
}
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
if key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err == nil {
|
||||
ctrl.projectRefreshQueue.Add(key)
|
||||
ctrl.InvalidateProjectsCache()
|
||||
if projMeta, ok := obj.(metav1.Object); ok {
|
||||
ctrl.InvalidateProjectsCache(projMeta.GetName())
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -207,11 +214,17 @@ func NewApplicationController(
|
||||
return &ctrl, nil
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) InvalidateProjectsCache() {
|
||||
ctrl.projByNameCache.Range(func(key, _ interface{}) bool {
|
||||
ctrl.projByNameCache.Delete(key)
|
||||
return true
|
||||
})
|
||||
func (ctrl *ApplicationController) InvalidateProjectsCache(names ...string) {
|
||||
if len(names) > 0 {
|
||||
for _, name := range names {
|
||||
ctrl.projByNameCache.Delete(name)
|
||||
}
|
||||
} else {
|
||||
ctrl.projByNameCache.Range(func(key, _ interface{}) bool {
|
||||
ctrl.projByNameCache.Delete(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) GetMetricsServer() *metrics.MetricsServer {
|
||||
@@ -284,12 +297,8 @@ func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]b
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// exclude resource unless it is permitted in the app project. If project is not permitted then it is not controlled by the user and there is no point showing the warning.
|
||||
if proj, err := ctrl.getAppProj(app); err == nil && proj.IsGroupKindPermitted(ref.GroupVersionKind().GroupKind(), true) &&
|
||||
!isKnownOrphanedResourceExclusion(kube.NewResourceKey(ref.GroupVersionKind().Group, ref.GroupVersionKind().Kind, ref.Namespace, ref.Name), proj) {
|
||||
|
||||
managedByApp[app.Name] = false
|
||||
}
|
||||
managedByApp[app.Name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -360,7 +369,7 @@ func isKnownOrphanedResourceExclusion(key kube.ResourceKey, proj *appv1.AppProje
|
||||
func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) (*appv1.ApplicationTree, error) {
|
||||
nodes := make([]appv1.ResourceNode, 0)
|
||||
|
||||
proj, err := argo.GetAppProject(&a.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace, ctrl.settingsMgr, ctrl.db, context.TODO())
|
||||
proj, err := ctrl.getAppProj(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1093,7 +1102,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
}
|
||||
|
||||
ctrl.setOperationState(app, state)
|
||||
if state.Phase.Completed() && !app.Operation.Sync.DryRun {
|
||||
if state.Phase.Completed() && (app.Operation.Sync != nil && !app.Operation.Sync.DryRun) {
|
||||
// if we just completed an operation, force a refresh so that UI will report up-to-date
|
||||
// sync/health information
|
||||
if _, err := cache.MetaNamespaceKeyFunc(app); err == nil {
|
||||
@@ -1638,7 +1647,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
proj, err := ctrl.getAppProj(app)
|
||||
proj, err := applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()).AppProjects(ctrl.namespace).Get(app.Spec.GetProject())
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -785,11 +785,11 @@ func TestHandleOrphanedResourceUpdated(t *testing.T) {
|
||||
|
||||
isRequested, level := ctrl.isRefreshRequested(app1.Name)
|
||||
assert.True(t, isRequested)
|
||||
assert.Equal(t, ComparisonWithNothing, level)
|
||||
assert.Equal(t, CompareWithRecent, level)
|
||||
|
||||
isRequested, level = ctrl.isRefreshRequested(app2.Name)
|
||||
assert.True(t, isRequested)
|
||||
assert.Equal(t, ComparisonWithNothing, level)
|
||||
assert.Equal(t, CompareWithRecent, level)
|
||||
}
|
||||
|
||||
func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
|
||||
|
||||
73
controller/cache/cache.go
vendored
73
controller/cache/cache.go
vendored
@@ -3,7 +3,7 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
"github.com/argoproj/argo-cd/v2/util/lua"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
@@ -32,20 +33,42 @@ import (
|
||||
const (
|
||||
// EnvClusterCacheResyncDuration is the env variable that holds cluster cache re-sync duration
|
||||
EnvClusterCacheResyncDuration = "ARGOCD_CLUSTER_CACHE_RESYNC_DURATION"
|
||||
|
||||
// EnvClusterCacheWatchResyncDuration is the env variable that holds cluster cache watch re-sync duration
|
||||
EnvClusterCacheWatchResyncDuration = "ARGOCD_CLUSTER_CACHE_WATCH_RESYNC_DURATION"
|
||||
|
||||
// EnvClusterCacheListPageSize is the env variable to control size of the list page size when making K8s queries
|
||||
EnvClusterCacheListPageSize = "ARGOCD_CLUSTER_CACHE_LIST_PAGE_SIZE"
|
||||
|
||||
// EnvClusterCacheListSemaphore is the env variable to control size of the list semaphore
|
||||
// This is used to limit the number of concurrent memory consuming operations on the
|
||||
// k8s list queries results across all clusters to avoid memory spikes during cache initialization.
|
||||
EnvClusterCacheListSemaphore = "ARGOCD_CLUSTER_CACHE_LIST_SEMAPHORE"
|
||||
)
|
||||
|
||||
// GitOps engine cluster cache tuning options
|
||||
var (
|
||||
// K8SClusterResyncDuration controls the duration of cluster cache refresh
|
||||
K8SClusterResyncDuration = 12 * time.Hour
|
||||
// clusterCacheResyncDuration controls the duration of cluster cache refresh.
|
||||
// NOTE: this differs from gitops-engine default of 24h
|
||||
clusterCacheResyncDuration = 12 * time.Hour
|
||||
|
||||
// clusterCacheWatchResyncDuration controls the maximum duration that group/kind watches are allowed to run
|
||||
// for before relisting & restarting the watch
|
||||
clusterCacheWatchResyncDuration = 10 * time.Minute
|
||||
|
||||
// The default limit of 50 is chosen based on experiments.
|
||||
clusterCacheListSemaphoreSize int64 = 50
|
||||
|
||||
// clusterCacheListPageSize is the page size when performing K8s list requests.
|
||||
// 500 is equal to kubectl's size
|
||||
clusterCacheListPageSize int64 = 500
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
if clusterResyncDurationStr := os.Getenv(EnvClusterCacheResyncDuration); clusterResyncDurationStr != "" {
|
||||
if duration, err := time.ParseDuration(clusterResyncDurationStr); err == nil {
|
||||
K8SClusterResyncDuration = duration
|
||||
}
|
||||
}
|
||||
clusterCacheResyncDuration = env.ParseDurationFromEnv(EnvClusterCacheResyncDuration, clusterCacheResyncDuration, 0, math.MaxInt64)
|
||||
clusterCacheWatchResyncDuration = env.ParseDurationFromEnv(EnvClusterCacheWatchResyncDuration, clusterCacheWatchResyncDuration, 0, math.MaxInt64)
|
||||
clusterCacheListPageSize = env.ParseInt64FromEnv(EnvClusterCacheListPageSize, clusterCacheListPageSize, 0, math.MaxInt64)
|
||||
clusterCacheListSemaphoreSize = env.ParseInt64FromEnv(EnvClusterCacheListSemaphore, clusterCacheListSemaphoreSize, 0, math.MaxInt64)
|
||||
}
|
||||
|
||||
type LiveStateCache interface {
|
||||
@@ -109,15 +132,13 @@ func NewLiveStateCache(
|
||||
resourceTracking argo.ResourceTracking) LiveStateCache {
|
||||
|
||||
return &liveStateCache{
|
||||
appInformer: appInformer,
|
||||
db: db,
|
||||
clusters: make(map[string]clustercache.ClusterCache),
|
||||
onObjectUpdated: onObjectUpdated,
|
||||
kubectl: kubectl,
|
||||
settingsMgr: settingsMgr,
|
||||
metricsServer: metricsServer,
|
||||
// The default limit of 50 is chosen based on experiments.
|
||||
listSemaphore: semaphore.NewWeighted(50),
|
||||
appInformer: appInformer,
|
||||
db: db,
|
||||
clusters: make(map[string]clustercache.ClusterCache),
|
||||
onObjectUpdated: onObjectUpdated,
|
||||
kubectl: kubectl,
|
||||
settingsMgr: settingsMgr,
|
||||
metricsServer: metricsServer,
|
||||
clusterFilter: clusterFilter,
|
||||
resourceTracking: resourceTracking,
|
||||
}
|
||||
@@ -138,10 +159,6 @@ type liveStateCache struct {
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
resourceTracking argo.ResourceTracking
|
||||
|
||||
// listSemaphore is used to limit the number of concurrent memory consuming operations on the
|
||||
// k8s list queries results across all clusters to avoid memory spikes during cache initialization.
|
||||
listSemaphore *semaphore.Weighted
|
||||
|
||||
clusters map[string]clustercache.ClusterCache
|
||||
cacheSettings cacheSettings
|
||||
lock sync.RWMutex
|
||||
@@ -289,9 +306,11 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
}
|
||||
|
||||
trackingMethod := argo.GetTrackingMethod(c.settingsMgr)
|
||||
clusterCache = clustercache.NewClusterCache(cluster.RESTConfig(),
|
||||
clustercache.SetListSemaphore(c.listSemaphore),
|
||||
clustercache.SetResyncTimeout(K8SClusterResyncDuration),
|
||||
clusterCacheOpts := []clustercache.UpdateSettingsFunc{
|
||||
clustercache.SetListSemaphore(semaphore.NewWeighted(clusterCacheListSemaphoreSize)),
|
||||
clustercache.SetListPageSize(clusterCacheListPageSize),
|
||||
clustercache.SetWatchResyncTimeout(clusterCacheWatchResyncDuration),
|
||||
clustercache.SetResyncTimeout(clusterCacheResyncDuration),
|
||||
clustercache.SetSettings(cacheSettings.clusterSettings),
|
||||
clustercache.SetNamespaces(cluster.Namespaces),
|
||||
clustercache.SetClusterResources(cluster.ClusterResources),
|
||||
@@ -311,7 +330,9 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
return res, res.AppName != "" || gvk.Kind == kube.CustomResourceDefinitionKind
|
||||
}),
|
||||
clustercache.SetLogr(logutils.NewLogrusLogger(log.WithField("server", cluster.Server))),
|
||||
)
|
||||
}
|
||||
|
||||
clusterCache = clustercache.NewClusterCache(cluster.RESTConfig(), clusterCacheOpts...)
|
||||
|
||||
_ = clusterCache.OnResourceUpdated(func(newRes *clustercache.Resource, oldRes *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) {
|
||||
toNotify := make(map[string]bool)
|
||||
|
||||
@@ -14,3 +14,5 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
|
||||
@@ -406,6 +406,8 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
# Roadmap
|
||||
|
||||
- [Roadmap](#roadmap)
|
||||
- [v2.2](#v22)
|
||||
- [Config Management Tools Integrations (proposal)](#config-management-tools-integrations-proposal)
|
||||
- [Argo CD Extensions (proposal)](#argo-cd-extensions-proposal)
|
||||
- [Project scoped repository and clusters (proposal)](#project-scoped-repository-and-clusters-proposal)
|
||||
- [v2.3 and beyond](#v23-and-beyond)
|
||||
- [Input Forms UI Refresh](#input-forms-ui-refresh)
|
||||
- [Merge ApplicationSet controller into Argo CD](#merge-applicationset-controller-into-argo-cd)
|
||||
- [v2.3](#v23)
|
||||
- [Merge Argo CD Notifications into Argo CD](#merge-argo-cd-notifications-into-argo-cd)
|
||||
- [Merge Argo CD Image Updater into Argo CD](#merge-argo-cd-image-updater-into-argo-cd)
|
||||
- [Compact Resources Tree](#compact-resources-tree)
|
||||
- [Input Forms UI Refresh](#input-forms-ui-refresh)
|
||||
- [Compact resources tree](#compact-resources-tree)
|
||||
- [Maintain difference in cluster and git values for specific fields](#maintain-difference-in-cluster-and-git-values-for-specific-fields)
|
||||
- [Web Shell](#web-shell)
|
||||
- [Helm values from external repo](#helm-values-from-external-repo)
|
||||
- [v2.4 and beyond](#v24-and-beyond)
|
||||
- [Merge ApplicationSet controller into Argo CD](#merge-applicationset-controller-into-argo-cd)
|
||||
- [Merge Argo CD Image Updater into Argo CD](#merge-argo-cd-image-updater-into-argo-cd)
|
||||
- [Config Management Tools Integrations UI/CLI](#config-management-tools-integrations-uicli)
|
||||
- [Allow specifying parent/child relationships in config](#allow-specifying-parentchild-relationships-in-config)
|
||||
- [Dependencies between applications](#dependencies-between-applications)
|
||||
- [Maintain difference in cluster and git values for specific fields](#maintain-difference-in-cluster-and-git-values-for-specific-fields)
|
||||
- [Multi-tenancy improvements](#multi-tenancy-improvements)
|
||||
- [GitOps Engine Enhancements](#gitops-engine-enhancements)
|
||||
- [Completed](#completed)
|
||||
- [✅ Core Argo CD (proposal)](#core-argo-cd-aka-gitops-agent-proposal)
|
||||
- [✅ Config Management Tools Integrations (proposal)](#-config-management-tools-integrations-proposal)
|
||||
- [✅ Argo CD Extensions (proposal)](#-argo-cd-extensions-proposal)
|
||||
- [✅ Project scoped repository and clusters (proposal)](#-project-scoped-repository-and-clusters-proposal)
|
||||
- [✅ Core Argo CD (proposal)](#-core-argo-cd-proposal)
|
||||
- [✅ Core Functionality Bug Fixes](#-core-functionality-bug-fixes)
|
||||
- [✅ Performance](#-performance)
|
||||
- [✅ ApplicationSet](#-applicationset)
|
||||
@@ -30,50 +30,24 @@
|
||||
- [✅ Automated Registry Monitoring](#-automated-registry-monitoring)
|
||||
- [✅ Projects Enhancements](#-projects-enhancements)
|
||||
|
||||
## v2.2
|
||||
|
||||
### Config Management Tools Integrations ([proposal](https://github.com/argoproj/argo-cd/pull/5927))
|
||||
|
||||
The community likes the first class support of Helm, Kustomize and keeps requesting support for more tools.
|
||||
Argo CD provides a mechanism to integrate with any config management tool. We need to investigate why
|
||||
it is not enough and implement missing features.
|
||||
|
||||
|
||||
### Argo CD Extensions ([proposal](https://github.com/argoproj/argo-cd/pull/6240))
|
||||
|
||||
Argo CD supports customizing handling of Kubernetes resources via diffing customizations,
|
||||
health checks, and custom actions. The Argo CD Extensions proposal takes it to next
|
||||
level and allows to deliver the resource customizations along with custom visualization in Argo CD
|
||||
via Git repository.
|
||||
|
||||
### Project scoped repository and clusters ([proposal](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/project-repos-and-clusters.md))
|
||||
|
||||
The feature streamlines the process of adding repositories and clusters to the project and makes it self-service.
|
||||
Instead of asking an administrator to change Argo CD settings end users can perform the change independently.
|
||||
|
||||
## v2.3 and beyond
|
||||
|
||||
### Input Forms UI Refresh
|
||||
|
||||
Improved design of the input forms in Argo CD Web UI: https://www.figma.com/file/IIlsFqqmM5UhqMVul9fQNq/Argo-CD?node-id=0%3A1
|
||||
|
||||
### Merge ApplicationSet controller into Argo CD
|
||||
|
||||
The ApplicationSet functionality is available in Argo CD out-of-the-box ([#7351](https://github.com/argoproj/argo-cd/issues/7351)).
|
||||
The Argo CD UI/CLI/API allows to manage ApplicationSet resources same as Argo CD Applications ([#7352](https://github.com/argoproj/argo-cd/issues/7352)).
|
||||
## v2.3
|
||||
|
||||
### Merge Argo CD Notifications into Argo CD
|
||||
|
||||
The [Argo CD Notifications](https://github.com/argoproj-labs/argocd-notifications) should be merged into Argo CD and available out-of-the-box: [#7350](https://github.com/argoproj/argo-cd/issues/7350)
|
||||
|
||||
### Merge Argo CD Image Updater into Argo CD
|
||||
### Input Forms UI Refresh
|
||||
|
||||
The [Argo CD Image Updater](https://github.com/argoproj-labs/argocd-image-updater) should be merged into Argo CD and available out-of-the-box: [#7385](https://github.com/argoproj/argo-cd/issues/7385)
|
||||
Improved design of the input forms in Argo CD Web UI: https://www.figma.com/file/IIlsFqqmM5UhqMVul9fQNq/Argo-CD?node-id=0%3A1
|
||||
|
||||
### Compact resources tree
|
||||
|
||||
An ability to collaps leaf resources tree to improve visualization of very large applications: [#7349](https://github.com/argoproj/argo-cd/issues/7349)
|
||||
|
||||
### Maintain difference in cluster and git values for specific fields
|
||||
|
||||
The feature allows to avoid updating fields excluded from diffing ([#2913](https://github.com/argoproj/argo-cd/issues/2913)).
|
||||
|
||||
### Web Shell
|
||||
|
||||
Exec into the Kubernetes Pod right from Argo CD Web UI! [#4351](https://github.com/argoproj/argo-cd/issues/4351)
|
||||
@@ -82,6 +56,21 @@ Exec into the Kubernetes Pod right from Argo CD Web UI! [#4351](https://github.c
|
||||
|
||||
The feature allows combining of-the-shelf Helm chart and value file in Git repository ([#2789](https://github.com/argoproj/argo-cd/issues/2789))
|
||||
|
||||
|
||||
## v2.4 and beyond
|
||||
|
||||
|
||||
### Merge ApplicationSet controller into Argo CD
|
||||
|
||||
The ApplicationSet functionality is available in Argo CD out-of-the-box ([#7351](https://github.com/argoproj/argo-cd/issues/7351)).
|
||||
The Argo CD UI/CLI/API allows to manage ApplicationSet resources same as Argo CD Applications ([#7352](https://github.com/argoproj/argo-cd/issues/7352)).
|
||||
|
||||
|
||||
### Merge Argo CD Image Updater into Argo CD
|
||||
|
||||
The [Argo CD Image Updater](https://github.com/argoproj-labs/argocd-image-updater) should be merged into Argo CD and available out-of-the-box: [#7385](https://github.com/argoproj/argo-cd/issues/7385)
|
||||
|
||||
|
||||
### Config Management Tools Integrations UI/CLI
|
||||
|
||||
The continuation of the Config Management Tools of [proposal](https://github.com/argoproj/argo-cd/pull/5927). The Argo CD UI/CLI
|
||||
@@ -96,9 +85,6 @@ visualize custom resources that don't have owner references.
|
||||
|
||||
The feature allows specifying dependencies between applications that allow orchestrating synchronization of multiple applications. [#3517](https://github.com/argoproj/argo-cd/issues/3517)
|
||||
|
||||
### Maintain difference in cluster and git values for specific fields
|
||||
|
||||
The feature allows to avoid updating fields excluded from diffing ([#2913](https://github.com/argoproj/argo-cd/issues/2913)).
|
||||
|
||||
### Multi-tenancy improvements
|
||||
|
||||
@@ -119,6 +105,25 @@ A lot of Argo CD features are still not available in GitOps engine. The followin
|
||||
|
||||
## Completed
|
||||
|
||||
### ✅ Config Management Tools Integrations ([proposal](https://github.com/argoproj/argo-cd/pull/5927))
|
||||
|
||||
The community likes the first class support of Helm, Kustomize and keeps requesting support for more tools.
|
||||
Argo CD provides a mechanism to integrate with any config management tool. We need to investigate why
|
||||
it is not enough and implement missing features.
|
||||
|
||||
|
||||
### ✅ Argo CD Extensions ([proposal](https://github.com/argoproj/argo-cd/pull/6240))
|
||||
|
||||
Argo CD supports customizing handling of Kubernetes resources via diffing customizations,
|
||||
health checks, and custom actions. The Argo CD Extensions proposal takes it to next
|
||||
level and allows to deliver the resource customizations along with custom visualization in Argo CD
|
||||
via Git repository.
|
||||
|
||||
### ✅ Project scoped repository and clusters ([proposal](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/project-repos-and-clusters.md))
|
||||
|
||||
The feature streamlines the process of adding repositories and clusters to the project and makes it self-service.
|
||||
Instead of asking an administrator to change Argo CD settings end users can perform the change independently.
|
||||
|
||||
### ✅ Core Argo CD ([proposal](https://github.com/argoproj/argo-cd/pull/6385))
|
||||
|
||||
Core Argo CD allows to installation and use of lightweight Argo CD that includes only the backend without exposing the API or UI.
|
||||
|
||||
@@ -51,7 +51,7 @@ more with other Kubernetes tools and Argo CD is never confused about the owner o
|
||||
|
||||
## Choosing a tracking method
|
||||
|
||||
To actually select your preferred tracking method edit the `resourceTrackingMethod` value contained inside the `argo-cm` configmap.
|
||||
To actually select your preferred tracking method edit the `resourceTrackingMethod` value contained inside the `argocd-cm` configmap.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
||||
7
go.mod
7
go.mod
@@ -3,15 +3,16 @@ module github.com/argoproj/argo-cd/v2
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||
github.com/alicebob/miniredis/v2 v2.14.2
|
||||
github.com/argoproj/gitops-engine v0.4.1-0.20211103220110-c7bab2eeca22
|
||||
github.com/argoproj/pkg v0.9.1
|
||||
github.com/argoproj/gitops-engine v0.5.2
|
||||
github.com/argoproj/pkg v0.11.1-0.20211203175135-36c59d8fafe0
|
||||
github.com/bombsimon/logrusr v1.0.0
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.2
|
||||
github.com/casbin/casbin v1.9.1
|
||||
github.com/casbin/casbin/v2 v2.39.1
|
||||
github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1 // indirect
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible
|
||||
github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1
|
||||
|
||||
12
go.sum
12
go.sum
@@ -103,10 +103,10 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/argoproj/gitops-engine v0.4.1-0.20211103220110-c7bab2eeca22 h1:2i8r5XiuBDf7uYP4R5ZuSNsPebPw15g/LePNIX7YYi8=
|
||||
github.com/argoproj/gitops-engine v0.4.1-0.20211103220110-c7bab2eeca22/go.mod h1:K2RYpGXh11VdFwDksS23SyFTOJaPcsF+MVJ/FHlqEOE=
|
||||
github.com/argoproj/pkg v0.9.1 h1:osfOS3QkzfRf+W43lbCZb0o0bzrBweQhL+U3rgEg+5M=
|
||||
github.com/argoproj/pkg v0.9.1/go.mod h1:ra+bQPmbVAoEL+gYSKesuigt4m49i3Qa3mE/xQcjCiA=
|
||||
github.com/argoproj/gitops-engine v0.5.2 h1:UQ2ajVyUPCSgFyqidzlTXddh/Xf6cE3I0s9uu92BoJg=
|
||||
github.com/argoproj/gitops-engine v0.5.2/go.mod h1:K2RYpGXh11VdFwDksS23SyFTOJaPcsF+MVJ/FHlqEOE=
|
||||
github.com/argoproj/pkg v0.11.1-0.20211203175135-36c59d8fafe0 h1:Cfp7rO/HpVxnwlRqJe0jHiBbZ77ZgXhB6HWlYD02Xdc=
|
||||
github.com/argoproj/pkg v0.11.1-0.20211203175135-36c59d8fafe0/go.mod h1:ra+bQPmbVAoEL+gYSKesuigt4m49i3Qa3mE/xQcjCiA=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
@@ -140,9 +140,9 @@ github.com/bombsimon/logrusr v1.0.0 h1:CTCkURYAt5nhCCnKH9eLShYayj2/8Kn/4Qg3QfiU+
|
||||
github.com/bombsimon/logrusr v1.0.0/go.mod h1:Jq0nHtvxabKE5EMwAAdgTaz7dfWE8C4i11NOltxGQpc=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.2 h1:VdhctVU4Kag+Yo5iuvEvFx4HNpLEI99Cm41UnE7y1WE=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.2/go.mod h1:GhRUp70E+QFvNemlFd4unyHZ8ryBiMQkJm6KgdilpUo=
|
||||
github.com/casbin/casbin v1.9.1 h1:ucjbS5zTrmSLtH4XogqOG920Poe6QatdXtz1FEbApeM=
|
||||
github.com/casbin/casbin v1.9.1/go.mod h1:z8uPsfBJGUsnkagrt3G8QvjgTKFMBJ32UP8HpZllfog=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/casbin/casbin/v2 v2.39.1 h1:TatfPL1hByffzPs610HL8+gBjCisAtEhjVhpIsbZ+ws=
|
||||
github.com/casbin/casbin/v2 v2.39.1/go.mod h1:sEL80qBYTbd+BPeL4iyvwYzFT3qwLaESq5aFKVLbLfA=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
|
||||
@@ -14,3 +14,5 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.2.3
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -21,7 +21,7 @@ spec:
|
||||
serviceAccountName: argocd-redis
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--save"
|
||||
|
||||
@@ -2763,7 +2763,7 @@ metadata:
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
ssh_known_hosts: |
|
||||
ssh_known_hosts: |-
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
@@ -2771,6 +2771,8 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
@@ -2888,7 +2890,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -3016,7 +3018,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -3065,7 +3067,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -3230,7 +3232,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -11,4 +11,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.2.3
|
||||
|
||||
@@ -11,7 +11,7 @@ patchesStrategicMerge:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.2.3
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/dex
|
||||
|
||||
@@ -878,7 +878,7 @@ spec:
|
||||
automountServiceAccountToken: false
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
{}
|
||||
@@ -906,7 +906,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-server
|
||||
@@ -947,7 +947,7 @@ spec:
|
||||
lifecycle:
|
||||
{}
|
||||
- name: sentinel
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-sentinel
|
||||
|
||||
@@ -15,6 +15,6 @@ redis-ha:
|
||||
client: 6m
|
||||
checkInterval: 3s
|
||||
image:
|
||||
tag: 6.2.4-alpine
|
||||
tag: 6.2.6-alpine
|
||||
sentinel:
|
||||
bind: "0.0.0.0"
|
||||
|
||||
@@ -3400,7 +3400,7 @@ metadata:
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
ssh_known_hosts: |
|
||||
ssh_known_hosts: |-
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
@@ -3408,6 +3408,8 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
@@ -3707,7 +3709,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -3924,7 +3926,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -3973,7 +3975,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -4200,7 +4202,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -4396,7 +4398,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -4478,7 +4480,7 @@ spec:
|
||||
- /data/conf/redis.conf
|
||||
command:
|
||||
- redis-server
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -4516,7 +4518,7 @@ spec:
|
||||
- /data/conf/sentinel.conf
|
||||
command:
|
||||
- redis-sentinel
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -4562,7 +4564,7 @@ spec:
|
||||
value: 40000915ab58c3fa8fd888fb8b24711944e6cbb4
|
||||
- name: SENTINEL_ID_2
|
||||
value: 2bbec7894d954a8af3bb54d13eaec53cb024e2ca
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
|
||||
@@ -759,7 +759,7 @@ metadata:
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
ssh_known_hosts: |
|
||||
ssh_known_hosts: |-
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
@@ -767,6 +767,8 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
@@ -1066,7 +1068,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -1283,7 +1285,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1332,7 +1334,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -1559,7 +1561,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1755,7 +1757,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1837,7 +1839,7 @@ spec:
|
||||
- /data/conf/redis.conf
|
||||
command:
|
||||
- redis-server
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -1875,7 +1877,7 @@ spec:
|
||||
- /data/conf/sentinel.conf
|
||||
command:
|
||||
- redis-sentinel
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -1921,7 +1923,7 @@ spec:
|
||||
value: 40000915ab58c3fa8fd888fb8b24711944e6cbb4
|
||||
- name: SENTINEL_ID_2
|
||||
value: 2bbec7894d954a8af3bb54d13eaec53cb024e2ca
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
|
||||
@@ -2875,7 +2875,7 @@ metadata:
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
ssh_known_hosts: |
|
||||
ssh_known_hosts: |-
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
@@ -2883,6 +2883,8 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
@@ -3077,7 +3079,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -3130,7 +3132,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -3258,7 +3260,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -3307,7 +3309,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -3530,7 +3532,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3720,7 +3722,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -234,7 +234,7 @@ metadata:
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
ssh_known_hosts: |
|
||||
ssh_known_hosts: |-
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
@@ -242,6 +242,8 @@ data:
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
@@ -436,7 +438,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -489,7 +491,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.4-alpine
|
||||
image: redis:6.2.6-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -617,7 +619,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -666,7 +668,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -889,7 +891,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1079,7 +1081,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -36,12 +36,12 @@ var (
|
||||
|
||||
func init() {
|
||||
if envQPS := os.Getenv(EnvK8sClientQPS); envQPS != "" {
|
||||
if qps, err := strconv.ParseFloat(envQPS, 32); err != nil {
|
||||
if qps, err := strconv.ParseFloat(envQPS, 32); err == nil {
|
||||
K8sClientConfigQPS = float32(qps)
|
||||
}
|
||||
}
|
||||
if envBurst := os.Getenv(EnvK8sClientBurst); envBurst != "" {
|
||||
if burst, err := strconv.Atoi(envBurst); err != nil {
|
||||
if burst, err := strconv.Atoi(envBurst); err == nil {
|
||||
K8sClientConfigBurst = burst
|
||||
}
|
||||
} else {
|
||||
@@ -49,7 +49,7 @@ func init() {
|
||||
}
|
||||
|
||||
if envMaxConn := os.Getenv(EnvK8sClientMaxIdleConnections); envMaxConn != "" {
|
||||
if maxConn, err := strconv.Atoi(envMaxConn); err != nil {
|
||||
if maxConn, err := strconv.Atoi(envMaxConn); err == nil {
|
||||
K8sMaxIdleConnections = maxConn
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1569,8 +1569,8 @@ func validatePolicy(proj string, role string, policy string) error {
|
||||
}
|
||||
// resource
|
||||
resource := strings.Trim(policyComponents[2], " ")
|
||||
if resource != "applications" {
|
||||
return status.Errorf(codes.InvalidArgument, "invalid policy rule '%s': project resource must be: 'applications', not '%s'", policy, resource)
|
||||
if resource != "applications" && resource != "repositories" && resource != "clusters" {
|
||||
return status.Errorf(codes.InvalidArgument, "invalid policy rule '%s': project resource must be: 'applications', 'repositories' or 'clusters', not '%s'", policy, resource)
|
||||
}
|
||||
// action
|
||||
action := strings.Trim(policyComponents[3], " ")
|
||||
@@ -1579,7 +1579,7 @@ func validatePolicy(proj string, role string, policy string) error {
|
||||
}
|
||||
// object
|
||||
object := strings.Trim(policyComponents[4], " ")
|
||||
objectRegexp, err := regexp.Compile(fmt.Sprintf(`^%s/[*\w-.]+$`, proj))
|
||||
objectRegexp, err := regexp.Compile(fmt.Sprintf(`^%s/[*\w-.]+$`, regexp.QuoteMeta(proj)))
|
||||
if err != nil || !objectRegexp.MatchString(object) {
|
||||
return status.Errorf(codes.InvalidArgument, "invalid policy rule '%s': object must be of form '%s/*' or '%s/<APPNAME>', not '%s'", policy, proj, proj, object)
|
||||
}
|
||||
|
||||
@@ -2561,3 +2561,22 @@ func TestOrphanedResourcesMonitorSettings_IsWarn(t *testing.T) {
|
||||
settings.Warn = pointer.BoolPtr(true)
|
||||
assert.True(t, settings.IsWarn())
|
||||
}
|
||||
|
||||
func Test_validatePolicy_projIsNotRegex(t *testing.T) {
|
||||
// Make sure the "." in "some.project" isn't treated as the regex wildcard.
|
||||
err := validatePolicy("some.project", "org-admin", "p, proj:some.project:org-admin, applications, *, some-project/*, allow")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = validatePolicy("some.project", "org-admin", "p, proj:some.project:org-admin, applications, *, some.project/*, allow")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = validatePolicy("some-project", "org-admin", "p, proj:some-project:org-admin, applications, *, some-project/*, allow")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_validatePolicy_ValidResource(t *testing.T) {
|
||||
err := validatePolicy("some-project", "org-admin", "p, proj:some-project:org-admin, repositories, *, some-project/*, allow")
|
||||
assert.NoError(t, err)
|
||||
err = validatePolicy("some-project", "org-admin", "p, proj:some-project:org-admin, clusters, *, some-project/*, allow")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
26
reposerver/cache/cache.go
vendored
26
reposerver/cache/cache.go
vendored
@@ -10,8 +10,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/text"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-redis/redis/v8"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/hash"
|
||||
@@ -138,12 +138,16 @@ func (c *Cache) GetGitReferences(repo string, references *[]*plumbing.Reference)
|
||||
return nil
|
||||
}
|
||||
|
||||
func manifestCacheKey(revision string, appSrc *appv1.ApplicationSource, namespace string, appLabelKey string, appName string, info ClusterRuntimeInfo) string {
|
||||
return fmt.Sprintf("mfst|%s|%s|%s|%s|%d", appLabelKey, appName, revision, namespace, appSourceKey(appSrc)+clusterRuntimeInfoKey(info))
|
||||
func manifestCacheKey(revision string, appSrc *appv1.ApplicationSource, namespace string, trackingMethod string, appLabelKey string, appName string, info ClusterRuntimeInfo) string {
|
||||
trackingKey := appLabelKey
|
||||
if text.FirstNonEmpty(trackingMethod, string(argo.TrackingMethodLabel)) != string(argo.TrackingMethodLabel) {
|
||||
trackingKey = trackingMethod + ":" + trackingKey
|
||||
}
|
||||
return fmt.Sprintf("mfst|%s|%s|%s|%s|%d", trackingKey, appName, revision, namespace, appSourceKey(appSrc)+clusterRuntimeInfoKey(info))
|
||||
}
|
||||
|
||||
func (c *Cache) GetManifests(revision string, appSrc *appv1.ApplicationSource, clusterInfo ClusterRuntimeInfo, namespace string, appLabelKey string, appName string, res *CachedManifestResponse) error {
|
||||
err := c.cache.GetItem(manifestCacheKey(revision, appSrc, namespace, appLabelKey, appName, clusterInfo), res)
|
||||
func (c *Cache) GetManifests(revision string, appSrc *appv1.ApplicationSource, clusterInfo ClusterRuntimeInfo, namespace string, trackingMethod string, appLabelKey string, appName string, res *CachedManifestResponse) error {
|
||||
err := c.cache.GetItem(manifestCacheKey(revision, appSrc, namespace, trackingMethod, appLabelKey, appName, clusterInfo), res)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -158,7 +162,7 @@ func (c *Cache) GetManifests(revision string, appSrc *appv1.ApplicationSource, c
|
||||
if hash != res.CacheEntryHash || res.ManifestResponse == nil && res.MostRecentError == "" {
|
||||
log.Warnf("Manifest hash did not match expected value or cached manifests response is empty, treating as a cache miss: %s", appName)
|
||||
|
||||
err = c.DeleteManifests(revision, appSrc, clusterInfo, namespace, appLabelKey, appName)
|
||||
err = c.DeleteManifests(revision, appSrc, clusterInfo, namespace, trackingMethod, appLabelKey, appName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to delete manifest after hash mismatch, %v", err)
|
||||
}
|
||||
@@ -173,7 +177,7 @@ func (c *Cache) GetManifests(revision string, appSrc *appv1.ApplicationSource, c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) SetManifests(revision string, appSrc *appv1.ApplicationSource, clusterInfo ClusterRuntimeInfo, namespace string, appLabelKey string, appName string, res *CachedManifestResponse) error {
|
||||
func (c *Cache) SetManifests(revision string, appSrc *appv1.ApplicationSource, clusterInfo ClusterRuntimeInfo, namespace string, trackingMethod string, appLabelKey string, appName string, res *CachedManifestResponse) error {
|
||||
|
||||
// Generate and apply the cache entry hash, before writing
|
||||
if res != nil {
|
||||
@@ -185,11 +189,11 @@ func (c *Cache) SetManifests(revision string, appSrc *appv1.ApplicationSource, c
|
||||
res.CacheEntryHash = hash
|
||||
}
|
||||
|
||||
return c.cache.SetItem(manifestCacheKey(revision, appSrc, namespace, appLabelKey, appName, clusterInfo), res, c.repoCacheExpiration, res == nil)
|
||||
return c.cache.SetItem(manifestCacheKey(revision, appSrc, namespace, trackingMethod, appLabelKey, appName, clusterInfo), res, c.repoCacheExpiration, res == nil)
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteManifests(revision string, appSrc *appv1.ApplicationSource, clusterInfo ClusterRuntimeInfo, namespace string, appLabelKey string, appName string) error {
|
||||
return c.cache.SetItem(manifestCacheKey(revision, appSrc, namespace, appLabelKey, appName, clusterInfo), "", c.repoCacheExpiration, true)
|
||||
func (c *Cache) DeleteManifests(revision string, appSrc *appv1.ApplicationSource, clusterInfo ClusterRuntimeInfo, namespace string, trackingMethod string, appLabelKey string, appName string) error {
|
||||
return c.cache.SetItem(manifestCacheKey(revision, appSrc, namespace, trackingMethod, appLabelKey, appName, clusterInfo), "", c.repoCacheExpiration, true)
|
||||
}
|
||||
|
||||
func appDetailsCacheKey(revision string, appSrc *appv1.ApplicationSource, trackingMethod appv1.TrackingMethod) string {
|
||||
|
||||
22
reposerver/cache/cache_test.go
vendored
22
reposerver/cache/cache_test.go
vendored
@@ -73,29 +73,29 @@ func TestCache_GetManifests(t *testing.T) {
|
||||
// cache miss
|
||||
q := &apiclient.ManifestRequest{}
|
||||
value := &CachedManifestResponse{}
|
||||
err := cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "my-app-label-key", "my-app-label-value", value)
|
||||
err := cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "", "my-app-label-key", "my-app-label-value", value)
|
||||
assert.Equal(t, ErrCacheMiss, err)
|
||||
// populate cache
|
||||
res := &CachedManifestResponse{ManifestResponse: &apiclient.ManifestResponse{SourceType: "my-source-type"}}
|
||||
err = cache.SetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "my-app-label-key", "my-app-label-value", res)
|
||||
err = cache.SetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "", "my-app-label-key", "my-app-label-value", res)
|
||||
assert.NoError(t, err)
|
||||
// cache miss
|
||||
err = cache.GetManifests("other-revision", &ApplicationSource{}, q, "my-namespace", "my-app-label-key", "my-app-label-value", value)
|
||||
err = cache.GetManifests("other-revision", &ApplicationSource{}, q, "my-namespace", "", "my-app-label-key", "my-app-label-value", value)
|
||||
assert.Equal(t, ErrCacheMiss, err)
|
||||
// cache miss
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{Path: "other-path"}, q, "my-namespace", "my-app-label-key", "my-app-label-value", value)
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{Path: "other-path"}, q, "my-namespace", "", "my-app-label-key", "my-app-label-value", value)
|
||||
assert.Equal(t, ErrCacheMiss, err)
|
||||
// cache miss
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "other-namespace", "my-app-label-key", "my-app-label-value", value)
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "other-namespace", "", "my-app-label-key", "my-app-label-value", value)
|
||||
assert.Equal(t, ErrCacheMiss, err)
|
||||
// cache miss
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "other-app-label-key", "my-app-label-value", value)
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "", "other-app-label-key", "my-app-label-value", value)
|
||||
assert.Equal(t, ErrCacheMiss, err)
|
||||
// cache miss
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "my-app-label-key", "other-app-label-value", value)
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "", "my-app-label-key", "other-app-label-value", value)
|
||||
assert.Equal(t, ErrCacheMiss, err)
|
||||
// cache hit
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "my-app-label-key", "my-app-label-value", value)
|
||||
err = cache.GetManifests("my-revision", &ApplicationSource{}, q, "my-namespace", "", "my-app-label-key", "my-app-label-value", value)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &CachedManifestResponse{ManifestResponse: &apiclient.ManifestResponse{SourceType: "my-source-type"}}, value)
|
||||
}
|
||||
@@ -154,7 +154,7 @@ func TestCachedManifestResponse_HashBehavior(t *testing.T) {
|
||||
NumberOfCachedResponsesReturned: 0,
|
||||
NumberOfConsecutiveFailures: 0,
|
||||
}
|
||||
err := repoCache.SetManifests(response.Revision, appSrc, &apiclient.ManifestRequest{}, response.Namespace, appKey, appValue, store)
|
||||
err := repoCache.SetManifests(response.Revision, appSrc, &apiclient.ManifestRequest{}, response.Namespace, "", appKey, appValue, store)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -185,7 +185,7 @@ func TestCachedManifestResponse_HashBehavior(t *testing.T) {
|
||||
|
||||
// Retrieve the value using 'GetManifests' and confirm it works
|
||||
retrievedVal := &CachedManifestResponse{}
|
||||
err = repoCache.GetManifests(response.Revision, appSrc, &apiclient.ManifestRequest{}, response.Namespace, appKey, appValue, retrievedVal)
|
||||
err = repoCache.GetManifests(response.Revision, appSrc, &apiclient.ManifestRequest{}, response.Namespace, "", appKey, appValue, retrievedVal)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func TestCachedManifestResponse_HashBehavior(t *testing.T) {
|
||||
|
||||
// Retrieve the value using GetManifests and confirm it returns a cache miss
|
||||
retrievedVal = &CachedManifestResponse{}
|
||||
err = repoCache.GetManifests(response.Revision, appSrc, &apiclient.ManifestRequest{}, response.Namespace, appKey, appValue, retrievedVal)
|
||||
err = repoCache.GetManifests(response.Revision, appSrc, &apiclient.ManifestRequest{}, response.Namespace, "", appKey, appValue, retrievedVal)
|
||||
|
||||
assert.True(t, err == cacheutil.ErrCacheMiss)
|
||||
|
||||
|
||||
@@ -338,7 +338,7 @@ func (s *Service) runManifestGen(repoRoot, commitSHA, cacheKey string, ctxSrc op
|
||||
// Retrieve a new copy (if available) of the cached response: this ensures we are updating the latest copy of the cache,
|
||||
// rather than a copy of the cache that occurred before (a potentially lengthy) manifest generation.
|
||||
innerRes := &cache.CachedManifestResponse{}
|
||||
cacheErr := s.cache.GetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.AppLabelKey, q.AppName, innerRes)
|
||||
cacheErr := s.cache.GetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.TrackingMethod, q.AppLabelKey, q.AppName, innerRes)
|
||||
if cacheErr != nil && cacheErr != reposervercache.ErrCacheMiss {
|
||||
log.Warnf("manifest cache set error %s: %v", q.ApplicationSource.String(), cacheErr)
|
||||
return nil, cacheErr
|
||||
@@ -353,7 +353,7 @@ func (s *Service) runManifestGen(repoRoot, commitSHA, cacheKey string, ctxSrc op
|
||||
// Update the cache to include failure information
|
||||
innerRes.NumberOfConsecutiveFailures++
|
||||
innerRes.MostRecentError = err.Error()
|
||||
cacheErr = s.cache.SetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.AppLabelKey, q.AppName, innerRes)
|
||||
cacheErr = s.cache.SetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.TrackingMethod, q.AppLabelKey, q.AppName, innerRes)
|
||||
if cacheErr != nil {
|
||||
log.Warnf("manifest cache set error %s: %v", q.ApplicationSource.String(), cacheErr)
|
||||
return nil, cacheErr
|
||||
@@ -372,7 +372,7 @@ func (s *Service) runManifestGen(repoRoot, commitSHA, cacheKey string, ctxSrc op
|
||||
}
|
||||
manifestGenResult.Revision = commitSHA
|
||||
manifestGenResult.VerifyResult = ctx.verificationResult
|
||||
err = s.cache.SetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.AppLabelKey, q.AppName, &manifestGenCacheEntry)
|
||||
err = s.cache.SetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.TrackingMethod, q.AppLabelKey, q.AppName, &manifestGenCacheEntry)
|
||||
if err != nil {
|
||||
log.Warnf("manifest cache set error %s/%s: %v", q.ApplicationSource.String(), cacheKey, err)
|
||||
}
|
||||
@@ -387,7 +387,7 @@ func (s *Service) runManifestGen(repoRoot, commitSHA, cacheKey string, ctxSrc op
|
||||
// If true is returned, either the second or third parameter (but not both) will contain a value from the cache (a ManifestResponse, or error, respectively)
|
||||
func (s *Service) getManifestCacheEntry(cacheKey string, q *apiclient.ManifestRequest, firstInvocation bool) (bool, *apiclient.ManifestResponse, error) {
|
||||
res := cache.CachedManifestResponse{}
|
||||
err := s.cache.GetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.AppLabelKey, q.AppName, &res)
|
||||
err := s.cache.GetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.TrackingMethod, q.AppLabelKey, q.AppName, &res)
|
||||
if err == nil {
|
||||
|
||||
// The cache contains an existing value
|
||||
@@ -406,7 +406,7 @@ func (s *Service) getManifestCacheEntry(cacheKey string, q *apiclient.ManifestRe
|
||||
// After X minutes, reset the cache and retry the operation (e.g. perhaps the error is ephemeral and has passed)
|
||||
if elapsedTimeInMinutes >= s.initConstants.PauseGenerationOnFailureForMinutes {
|
||||
// We can now try again, so reset the cache state and run the operation below
|
||||
err = s.cache.DeleteManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.AppLabelKey, q.AppName)
|
||||
err = s.cache.DeleteManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.TrackingMethod, q.AppLabelKey, q.AppName)
|
||||
if err != nil {
|
||||
log.Warnf("manifest cache set error %s/%s: %v", q.ApplicationSource.String(), cacheKey, err)
|
||||
}
|
||||
@@ -420,7 +420,7 @@ func (s *Service) getManifestCacheEntry(cacheKey string, q *apiclient.ManifestRe
|
||||
|
||||
if res.NumberOfCachedResponsesReturned >= s.initConstants.PauseGenerationOnFailureForRequests {
|
||||
// We can now try again, so reset the error cache state and run the operation below
|
||||
err = s.cache.DeleteManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.AppLabelKey, q.AppName)
|
||||
err = s.cache.DeleteManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.TrackingMethod, q.AppLabelKey, q.AppName)
|
||||
if err != nil {
|
||||
log.Warnf("manifest cache set error %s/%s: %v", q.ApplicationSource.String(), cacheKey, err)
|
||||
}
|
||||
@@ -438,7 +438,7 @@ func (s *Service) getManifestCacheEntry(cacheKey string, q *apiclient.ManifestRe
|
||||
// Increment the number of returned cached responses and push that new value to the cache
|
||||
// (if we have not already done so previously in this function)
|
||||
res.NumberOfCachedResponsesReturned++
|
||||
err = s.cache.SetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.AppLabelKey, q.AppName, &res)
|
||||
err = s.cache.SetManifests(cacheKey, q.ApplicationSource, q, q.Namespace, q.TrackingMethod, q.AppLabelKey, q.AppName, &res)
|
||||
if err != nil {
|
||||
log.Warnf("manifest cache set error %s/%s: %v", q.ApplicationSource.String(), cacheKey, err)
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ func TestGenerateManifests_K8SAPIResetCache(t *testing.T) {
|
||||
|
||||
cachedFakeResponse := &apiclient.ManifestResponse{Manifests: []string{"Fake"}}
|
||||
|
||||
err := service.cache.SetManifests(mock.Anything, &src, &q, "", "", "", &cache.CachedManifestResponse{ManifestResponse: cachedFakeResponse})
|
||||
err := service.cache.SetManifests(mock.Anything, &src, &q, "", "", "", "", &cache.CachedManifestResponse{ManifestResponse: cachedFakeResponse})
|
||||
assert.NoError(t, err)
|
||||
|
||||
res, err := service.GenerateManifest(context.Background(), &q)
|
||||
@@ -178,7 +178,7 @@ func TestGenerateManifests_EmptyCache(t *testing.T) {
|
||||
Repo: &argoappv1.Repository{}, ApplicationSource: &src,
|
||||
}
|
||||
|
||||
err := service.cache.SetManifests(mock.Anything, &src, &q, "", "", "", &cache.CachedManifestResponse{ManifestResponse: nil})
|
||||
err := service.cache.SetManifests(mock.Anything, &src, &q, "", "", "", "", &cache.CachedManifestResponse{ManifestResponse: nil})
|
||||
assert.NoError(t, err)
|
||||
|
||||
res, err := service.GenerateManifest(context.Background(), &q)
|
||||
@@ -310,7 +310,7 @@ func TestManifestGenErrorCacheByNumRequests(t *testing.T) {
|
||||
assert.NotNil(t, manifestRequest)
|
||||
|
||||
cachedManifestResponse := &cache.CachedManifestResponse{}
|
||||
err := service.cache.GetManifests(mock.Anything, manifestRequest.ApplicationSource, manifestRequest, manifestRequest.Namespace, manifestRequest.AppLabelKey, manifestRequest.AppName, cachedManifestResponse)
|
||||
err := service.cache.GetManifests(mock.Anything, manifestRequest.ApplicationSource, manifestRequest, manifestRequest.Namespace, "", manifestRequest.AppLabelKey, manifestRequest.AppName, cachedManifestResponse)
|
||||
assert.Nil(t, err)
|
||||
return cachedManifestResponse
|
||||
}
|
||||
|
||||
@@ -2,10 +2,10 @@ health_status = {}
|
||||
if obj.status ~= nil then
|
||||
if obj.status.ingress ~= nil then
|
||||
numIngressRules = 0
|
||||
numTrue = 0
|
||||
numFalse = 0
|
||||
for _, ingressRules in pairs(obj.status.ingress) do
|
||||
numIngressRules = numIngressRules + 1
|
||||
numTrue = 0
|
||||
numFalse = 0
|
||||
if obj.status.ingress ~= nil then
|
||||
for _, condition in pairs(ingressRules.conditions) do
|
||||
if condition.type == "Admitted" and condition.status == "True" then
|
||||
|
||||
@@ -213,7 +213,7 @@ func (s *Server) Create(ctx context.Context, q *application.ApplicationCreateReq
|
||||
return existing, nil
|
||||
}
|
||||
if q.Upsert == nil || !*q.Upsert {
|
||||
return nil, status.Errorf(codes.InvalidArgument, argo.GenerateSpecIsDifferentErrorMessage("application", existing.Spec, a.Spec))
|
||||
return nil, status.Errorf(codes.InvalidArgument, "existing application spec is different, use upsert flag to force update")
|
||||
}
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(a)); err != nil {
|
||||
return nil, err
|
||||
@@ -489,7 +489,6 @@ func (s *Server) ListResourceEvents(ctx context.Context, q *application.Applicat
|
||||
"involvedObject.namespace": namespace,
|
||||
}).String()
|
||||
}
|
||||
|
||||
log.Infof("Querying for resource events with field selector: %s", fieldSelector)
|
||||
opts := metav1.ListOptions{FieldSelector: fieldSelector}
|
||||
return kubeClientset.CoreV1().Events(namespace).List(ctx, opts)
|
||||
|
||||
@@ -3,20 +3,20 @@ package cluster
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
servercache "github.com/argoproj/argo-cd/v2/server/cache"
|
||||
"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/clusterauth"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/rbac"
|
||||
@@ -181,6 +181,15 @@ var clusterFieldsByPath = map[string]func(updated *appv1.Cluster, existing *appv
|
||||
"clusterResources": func(updated *appv1.Cluster, existing *appv1.Cluster) {
|
||||
updated.ClusterResources = existing.ClusterResources
|
||||
},
|
||||
"labels": func(updated *appv1.Cluster, existing *appv1.Cluster) {
|
||||
updated.Labels = existing.Labels
|
||||
},
|
||||
"annotations": func(updated *appv1.Cluster, existing *appv1.Cluster) {
|
||||
updated.Annotations = existing.Annotations
|
||||
},
|
||||
"project": func(updated *appv1.Cluster, existing *appv1.Cluster) {
|
||||
updated.Project = existing.Project
|
||||
},
|
||||
}
|
||||
|
||||
// Update updates a cluster
|
||||
@@ -197,9 +206,12 @@ func (s *Server) Update(ctx context.Context, q *cluster.ClusterUpdateRequest) (*
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionUpdate, createRBACObject(c.Project, q.Cluster.Server)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// verify that user can do update inside project where cluster will be located
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionUpdate, createRBACObject(q.Cluster.Project, q.Cluster.Server)); err != nil {
|
||||
return nil, err
|
||||
|
||||
if len(q.UpdatedFields) == 0 || sets.NewString(q.UpdatedFields...).Has("project") {
|
||||
// verify that user can do update inside project where cluster will be located
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionUpdate, createRBACObject(q.Cluster.Project, q.Cluster.Server)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(q.UpdatedFields) != 0 {
|
||||
|
||||
@@ -109,4 +109,52 @@ func TestUpdateCluster_FieldsPathSet(t *testing.T) {
|
||||
assert.Equal(t, updated.Name, "minikube")
|
||||
assert.Equal(t, updated.Namespaces, []string{"default", "kube-system"})
|
||||
assert.Equal(t, *updated.Shard, int64(1))
|
||||
|
||||
labelEnv := map[string]string{
|
||||
"env": "qa",
|
||||
}
|
||||
_, err = server.Update(context.Background(), &clusterapi.ClusterUpdateRequest{
|
||||
Cluster: &v1alpha1.Cluster{
|
||||
Server: "https://127.0.0.1",
|
||||
Labels: labelEnv,
|
||||
},
|
||||
UpdatedFields: []string{"labels"},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, updated.Name, "minikube")
|
||||
assert.Equal(t, updated.Namespaces, []string{"default", "kube-system"})
|
||||
assert.Equal(t, updated.Labels, labelEnv)
|
||||
|
||||
annotationEnv := map[string]string{
|
||||
"env": "qa",
|
||||
}
|
||||
_, err = server.Update(context.Background(), &clusterapi.ClusterUpdateRequest{
|
||||
Cluster: &v1alpha1.Cluster{
|
||||
Server: "https://127.0.0.1",
|
||||
Annotations: annotationEnv,
|
||||
},
|
||||
UpdatedFields: []string{"annotations"},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, updated.Name, "minikube")
|
||||
assert.Equal(t, updated.Namespaces, []string{"default", "kube-system"})
|
||||
assert.Equal(t, updated.Annotations, annotationEnv)
|
||||
|
||||
_, err = server.Update(context.Background(), &clusterapi.ClusterUpdateRequest{
|
||||
Cluster: &v1alpha1.Cluster{
|
||||
Server: "https://127.0.0.1",
|
||||
Project: "new-project",
|
||||
},
|
||||
UpdatedFields: []string{"project"},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, updated.Name, "minikube")
|
||||
assert.Equal(t, updated.Namespaces, []string{"default", "kube-system"})
|
||||
assert.Equal(t, updated.Project, "new-project")
|
||||
}
|
||||
|
||||
@@ -167,8 +167,8 @@ func (p *RBACPolicyEnforcer) getProjectFromRequest(rvals ...interface{}) *v1alph
|
||||
if res, ok := rvals[1].(string); ok {
|
||||
if obj, ok := rvals[3].(string); ok {
|
||||
switch res {
|
||||
case ResourceApplications:
|
||||
if objSplit := strings.Split(obj, "/"); len(objSplit) == 2 {
|
||||
case ResourceApplications, ResourceRepositories, ResourceClusters:
|
||||
if objSplit := strings.Split(obj, "/"); len(objSplit) >= 2 {
|
||||
return getProjectByName(objSplit[0])
|
||||
}
|
||||
case ResourceProjects:
|
||||
|
||||
@@ -149,3 +149,13 @@ func TestGetScopes_CustomScopes(t *testing.T) {
|
||||
scopes := rbacEnforcer.GetScopes()
|
||||
assert.Equal(t, scopes, customScopes)
|
||||
}
|
||||
|
||||
func Test_getProjectFromRequest(t *testing.T) {
|
||||
fp := newFakeProj()
|
||||
projLister := test.NewFakeProjLister(fp)
|
||||
|
||||
rbacEnforcer := NewRBACPolicyEnforcer(nil, projLister)
|
||||
project := rbacEnforcer.getProjectFromRequest("", "repositories", "create", fp.Name+"/https://github.com/argoproj/argocd-example-apps")
|
||||
|
||||
assert.Equal(t, project.Name, fp.Name)
|
||||
}
|
||||
|
||||
@@ -318,8 +318,8 @@ func (s *Server) CreateRepository(ctx context.Context, q *repositorypkg.RepoCrea
|
||||
var repo *appsv1.Repository
|
||||
var err error
|
||||
|
||||
// check we can connect to the repo, copying any existing creds
|
||||
{
|
||||
// check we can connect to the repo, copying any existing creds (not supported for project scoped repositories)
|
||||
if q.Repo.Project == "" {
|
||||
repo := q.Repo.DeepCopy()
|
||||
if !repo.HasCredentials() {
|
||||
creds, err := s.db.GetRepositoryCredentials(ctx, repo.Repo)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
FROM redis:6.2.4 as redis
|
||||
FROM redis:6.2.6-alpine as redis
|
||||
|
||||
FROM node:12.18.4 as node
|
||||
|
||||
FROM golang:1.16.5 as golang
|
||||
FROM golang:1.16.11 as golang
|
||||
|
||||
FROM registry:2.7.1 as registry
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.16.5 AS go
|
||||
FROM golang:1.16.11 AS go
|
||||
|
||||
RUN go get github.com/mattn/goreman && \
|
||||
go get github.com/kisielk/godepgraph
|
||||
|
||||
@@ -31,4 +31,11 @@
|
||||
margin-right: 2px;
|
||||
line-height: 14px;
|
||||
}
|
||||
|
||||
&__checkboxes {
|
||||
text-align: right;
|
||||
label {
|
||||
padding-right: 2em;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import {DataLoader, Tab, Tabs} from 'argo-ui';
|
||||
import {Checkbox, DataLoader, Tab, Tabs} from 'argo-ui';
|
||||
import * as deepMerge from 'deepmerge';
|
||||
import * as moment from 'moment';
|
||||
import * as React from 'react';
|
||||
|
||||
@@ -93,11 +94,38 @@ export const ApplicationNodeInfo = (props: {
|
||||
key: 'manifest',
|
||||
title: 'Live Manifest',
|
||||
content: (
|
||||
<YamlEditor
|
||||
input={props.live}
|
||||
hideModeButtons={!props.live}
|
||||
onSave={(patch, patchType) => services.applications.patchResource(props.application.metadata.name, props.node, patch, patchType)}
|
||||
/>
|
||||
<DataLoader load={() => services.viewPreferences.getPreferences()}>
|
||||
{pref => {
|
||||
const live = deepMerge(props.live, {}) as any;
|
||||
if (live?.metadata?.managedFields && pref.appDetails.hideManagedFields) {
|
||||
delete live.metadata.managedFields;
|
||||
}
|
||||
return (
|
||||
<>
|
||||
<div className='application-node-info__checkboxes'>
|
||||
<Checkbox
|
||||
id='hideManagedFields'
|
||||
checked={!!pref.appDetails.hideManagedFields}
|
||||
onChange={() =>
|
||||
services.viewPreferences.updatePreferences({
|
||||
appDetails: {
|
||||
...pref.appDetails,
|
||||
hideManagedFields: !pref.appDetails.hideManagedFields
|
||||
}
|
||||
})
|
||||
}
|
||||
/>
|
||||
<label htmlFor='hideManagedFields'>Hide Managed Fields</label>
|
||||
</div>
|
||||
<YamlEditor
|
||||
input={live}
|
||||
hideModeButtons={!live}
|
||||
onSave={(patch, patchType) => services.applications.patchResource(props.application.metadata.name, props.node, patch, patchType)}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}}
|
||||
</DataLoader>
|
||||
)
|
||||
}
|
||||
];
|
||||
|
||||
@@ -27,11 +27,11 @@ const retryOptions: Array<(formApi: FormApi) => React.ReactNode> = [
|
||||
];
|
||||
|
||||
const defaultInitialValues = {
|
||||
limit: '',
|
||||
limit: 2,
|
||||
backoff: {
|
||||
duration: '',
|
||||
maxDuration: '',
|
||||
factor: ''
|
||||
duration: '5s',
|
||||
maxDuration: '3m0s',
|
||||
factor: 2
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -17,10 +17,10 @@ const retryOptionsView: Array<(initData: models.RetryStrategy) => React.ReactNod
|
||||
initData => buildRetryOptionView('Limit', initData?.limit),
|
||||
initData => buildRetryOptionView('Duration', initData?.backoff?.duration),
|
||||
initData => buildRetryOptionView('Max Duration', initData?.backoff?.maxDuration),
|
||||
initData => buildRetryOptionView('Factor', initData?.backoff.factor)
|
||||
initData => buildRetryOptionView('Factor', initData?.backoff?.factor)
|
||||
];
|
||||
|
||||
export const ApplicationRetryView = ({initValues}: {initValues?: models.RetryStrategy}) => {
|
||||
const result = !initValues ? 'Retry not installed' : retryOptionsView.map((render, i) => render(initValues));
|
||||
const result = !initValues ? 'Retry disabled' : retryOptionsView.map((render, i) => render(initValues));
|
||||
return <div className='application-retry-option-view-list'>{result}</div>;
|
||||
};
|
||||
|
||||
@@ -176,8 +176,10 @@ export const ApplicationSyncPanel = ({application, selectedResource, hide}: {app
|
||||
let contentEnd = resKey.substr(-Math.floor(resKey.length / 2));
|
||||
// We want the ellipsis to be in the middle of our text, so we use RTL layout to put it there.
|
||||
// Unfortunately, strong LTR characters get jumbled around, so make sure that the last character isn't strong.
|
||||
const indexOfFirstLetter = /[a-z]/i.exec(contentEnd).index;
|
||||
contentEnd = contentEnd.slice(indexOfFirstLetter);
|
||||
const firstLetter = /[a-z]/i.exec(contentEnd);
|
||||
if (firstLetter) {
|
||||
contentEnd = contentEnd.slice(firstLetter.index);
|
||||
}
|
||||
const isLongLabel = resKey.length > 68;
|
||||
return (
|
||||
<div key={resKey} className='application-sync-panel__resource'>
|
||||
|
||||
@@ -116,8 +116,7 @@ export const Filter = (props: FilterProps) => {
|
||||
{props.label || 'FILTER'}
|
||||
{(props.selected || []).length > 0 || (props.field && Object.keys(values).length > 0) ? (
|
||||
<button
|
||||
className='argo-button argo-button--base argo-button--sm'
|
||||
style={{marginLeft: 'auto'}}
|
||||
className='argo-button argo-button--base argo-button--sm argo-button--right'
|
||||
onClick={() => {
|
||||
setValues({} as {[label: string]: boolean});
|
||||
setInput('');
|
||||
|
||||
@@ -31,7 +31,10 @@ const MonacoEditorLazy = React.lazy(() =>
|
||||
|
||||
return (
|
||||
<div
|
||||
style={{height: `${Math.max(props.minHeight || 0, height)}px`}}
|
||||
style={{
|
||||
height: `${Math.max(props.minHeight || 0, height + 100)}px`,
|
||||
overflowY: 'hidden'
|
||||
}}
|
||||
ref={el => {
|
||||
if (el) {
|
||||
const container = el as {
|
||||
@@ -40,7 +43,16 @@ const MonacoEditorLazy = React.lazy(() =>
|
||||
};
|
||||
if (props.editor) {
|
||||
if (!container.editorApi) {
|
||||
container.editorApi = monaco.editor.create(el, props.editor.options);
|
||||
const editor = monaco.editor.create(el, {
|
||||
...props.editor.options,
|
||||
scrollBeyondLastLine: false,
|
||||
scrollbar: {
|
||||
handleMouseWheel: false,
|
||||
vertical: 'hidden'
|
||||
}
|
||||
});
|
||||
|
||||
container.editorApi = editor;
|
||||
}
|
||||
|
||||
const model = monaco.editor.createModel(props.editor.input.text, props.editor.input.language);
|
||||
|
||||
@@ -9,6 +9,7 @@ export interface AppDetailsPreferences {
|
||||
resourceView: 'manifest' | 'diff' | 'desiredManifest';
|
||||
inlineDiff: boolean;
|
||||
compactDiff: boolean;
|
||||
hideManagedFields?: boolean;
|
||||
orphanedResources: boolean;
|
||||
podView: PodViewPreferences;
|
||||
darkMode: boolean;
|
||||
|
||||
@@ -396,34 +396,6 @@ func APIResourcesToStrings(resources []kube.APIResourceInfo, includeKinds bool)
|
||||
return res
|
||||
}
|
||||
|
||||
func retrieveScopedRepositories(name string, db db.ArgoDB, ctx context.Context) []*argoappv1.Repository {
|
||||
var repositories []*argoappv1.Repository
|
||||
allRepos, err := db.ListRepositories(ctx)
|
||||
if err != nil {
|
||||
return repositories
|
||||
}
|
||||
for _, repo := range allRepos {
|
||||
if repo.Project == name {
|
||||
repositories = append(repositories, repo)
|
||||
}
|
||||
}
|
||||
return repositories
|
||||
}
|
||||
|
||||
func retrieveScopedClusters(name string, db db.ArgoDB, ctx context.Context) []*argoappv1.Cluster {
|
||||
var clusters []*argoappv1.Cluster
|
||||
allClusters, err := db.ListClusters(ctx)
|
||||
if err != nil {
|
||||
return clusters
|
||||
}
|
||||
for i, cluster := range allClusters.Items {
|
||||
if cluster.Project == name {
|
||||
clusters = append(clusters, &allClusters.Items[i])
|
||||
}
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
|
||||
// GetAppProjectWithScopedResources returns a project from an application with scoped resources
|
||||
func GetAppProjectWithScopedResources(name string, projLister applicationsv1.AppProjectLister, ns string, settingsManager *settings.SettingsManager, db db.ArgoDB, ctx context.Context) (*argoappv1.AppProject, argoappv1.Repositories, []*argoappv1.Cluster, error) {
|
||||
projOrig, err := projLister.AppProjects(ns).Get(name)
|
||||
@@ -437,7 +409,15 @@ func GetAppProjectWithScopedResources(name string, projLister applicationsv1.App
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return project, retrieveScopedRepositories(name, db, ctx), retrieveScopedClusters(name, db, ctx), nil
|
||||
clusters, err := db.GetProjectClusters(ctx, project.Name)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
repos, err := db.GetProjectRepositories(ctx, name)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return project, repos, clusters, nil
|
||||
|
||||
}
|
||||
|
||||
@@ -448,11 +428,17 @@ func GetAppProjectByName(name string, projLister applicationsv1.AppProjectLister
|
||||
return nil, err
|
||||
}
|
||||
project := projOrig.DeepCopy()
|
||||
repos := retrieveScopedRepositories(name, db, ctx)
|
||||
repos, err := db.GetProjectRepositories(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, repo := range repos {
|
||||
project.Spec.SourceRepos = append(project.Spec.SourceRepos, repo.Repo)
|
||||
}
|
||||
clusters := retrieveScopedClusters(name, db, ctx)
|
||||
clusters, err := db.GetProjectClusters(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
if len(cluster.Namespaces) == 0 {
|
||||
project.Spec.Destinations = append(project.Spec.Destinations, argoappv1.ApplicationDestination{Server: cluster.Server, Namespace: "*"})
|
||||
@@ -621,16 +607,10 @@ func GetPermittedRepos(proj *argoappv1.AppProject, repos []*argoappv1.Repository
|
||||
}
|
||||
|
||||
func getDestinationServer(ctx context.Context, db db.ArgoDB, clusterName string) (string, error) {
|
||||
clusterList, err := db.ListClusters(ctx)
|
||||
servers, err := db.GetClusterServersByName(ctx, clusterName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var servers []string
|
||||
for _, c := range clusterList.Items {
|
||||
if c.Name == clusterName {
|
||||
servers = append(servers, c.Server)
|
||||
}
|
||||
}
|
||||
if len(servers) > 1 {
|
||||
return "", fmt.Errorf("there are %d clusters with the same name: %v", len(servers), servers)
|
||||
} else if len(servers) == 0 {
|
||||
|
||||
@@ -680,14 +680,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
}
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("ListClusters", context.Background()).Return(&argoappv1.ClusterList{
|
||||
Items: []argoappv1.Cluster{
|
||||
{
|
||||
Name: "minikube",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
db.On("GetClusterServersByName", context.Background(), "minikube").Return([]string{"https://127.0.0.1:6443"}, nil)
|
||||
|
||||
appCond := ValidateDestination(context.Background(), &dest, db)
|
||||
assert.Nil(t, appCond)
|
||||
@@ -707,13 +700,13 @@ func TestValidateDestination(t *testing.T) {
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
|
||||
t.Run("List clusters fails", func(t *testing.T) {
|
||||
t.Run("GetClusterServersByName fails", func(t *testing.T) {
|
||||
dest := argoappv1.ApplicationDestination{
|
||||
Name: "minikube",
|
||||
}
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("ListClusters", context.Background()).Return(nil, fmt.Errorf("an error occurred"))
|
||||
db.On("GetClusterServersByName", context.Background(), mock.Anything).Return(nil, fmt.Errorf("an error occurred"))
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, db)
|
||||
assert.Equal(t, "unable to find destination server: an error occurred", err.Error())
|
||||
@@ -726,14 +719,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
}
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("ListClusters", context.Background()).Return(&argoappv1.ClusterList{
|
||||
Items: []argoappv1.Cluster{
|
||||
{
|
||||
Name: "dind",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
db.On("GetClusterServersByName", context.Background(), "minikube").Return(nil, nil)
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, db)
|
||||
assert.Equal(t, "unable to find destination server: there are no clusters with this name: minikube", err.Error())
|
||||
@@ -746,18 +732,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
}
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("ListClusters", context.Background()).Return(&argoappv1.ClusterList{
|
||||
Items: []argoappv1.Cluster{
|
||||
{
|
||||
Name: "dind",
|
||||
Server: "https://127.0.0.1:2443",
|
||||
},
|
||||
{
|
||||
Name: "dind",
|
||||
Server: "https://127.0.0.1:8443",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
db.On("GetClusterServersByName", context.Background(), "dind").Return([]string{"https://127.0.0.1:2443", "https://127.0.0.1:8443"}, nil)
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, db)
|
||||
assert.Equal(t, "unable to find destination server: there are 2 clusters with the same name: [https://127.0.0.1:2443 https://127.0.0.1:8443]", err.Error())
|
||||
@@ -891,39 +866,6 @@ func TestGetGlobalProjects(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func Test_retrieveScopedRepositories(t *testing.T) {
|
||||
repo := &argoappv1.Repository{Repo: fmt.Sprintf("file://%s", "test"), Project: "test"}
|
||||
|
||||
repos := make([]*argoappv1.Repository, 0)
|
||||
repos = append(repos, repo)
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
db.On("ListRepositories", context.TODO()).Return(repos, nil)
|
||||
|
||||
scopedRepos := retrieveScopedRepositories("test", db, context.TODO())
|
||||
|
||||
assert.Len(t, scopedRepos, 1)
|
||||
assert.Equal(t, scopedRepos[0].Repo, repo.Repo)
|
||||
|
||||
}
|
||||
|
||||
func Test_retrieveScopedRepositoriesWithNotProjectAssigned(t *testing.T) {
|
||||
repo := &argoappv1.Repository{Repo: fmt.Sprintf("file://%s", "test")}
|
||||
|
||||
repos := make([]*argoappv1.Repository, 0)
|
||||
repos = append(repos, repo)
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
db.On("ListRepositories", context.TODO()).Return(repos, nil)
|
||||
|
||||
scopedRepos := retrieveScopedRepositories("test", db, context.TODO())
|
||||
|
||||
assert.Len(t, scopedRepos, 0)
|
||||
|
||||
}
|
||||
|
||||
func Test_GetDifferentPathsBetweenStructs(t *testing.T) {
|
||||
|
||||
r1 := argoappv1.Repository{}
|
||||
|
||||
@@ -150,6 +150,8 @@ func (rt *resourceTracking) ParseAppInstanceValue(value string) (*AppInstanceVal
|
||||
return &appInstanceValue, nil
|
||||
}
|
||||
|
||||
// Normalize updates live resource and removes diff caused but missing annotation or extra tracking label.
|
||||
// The normalization is required to ensure smooth transition to new tracking method.
|
||||
func (rt *resourceTracking) Normalize(config, live *unstructured.Unstructured, labelKey, trackingMethod string) error {
|
||||
if IsOldTrackingMethod(trackingMethod) {
|
||||
return nil
|
||||
@@ -170,9 +172,8 @@ func (rt *resourceTracking) Normalize(config, live *unstructured.Unstructured, l
|
||||
return err
|
||||
}
|
||||
|
||||
err = argokube.SetAppInstanceLabel(config, labelKey, label)
|
||||
if err != nil {
|
||||
return err
|
||||
if argokube.GetAppInstanceLabel(config, labelKey) == "" {
|
||||
argokube.RemoveLabel(live, labelKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -107,30 +107,63 @@ func TestParseAppInstanceValueCorrectFormat(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestResourceIdNormalizer_Normalize(t *testing.T) {
|
||||
func sampleResource() *unstructured.Unstructured {
|
||||
yamlBytes, err := ioutil.ReadFile("testdata/svc.yaml")
|
||||
assert.Nil(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var obj *unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func TestResourceIdNormalizer_Normalize(t *testing.T) {
|
||||
rt := NewResourceTracking()
|
||||
|
||||
err = rt.SetAppInstance(obj, common.LabelKeyAppInstance, "my-app", "", TrackingMethodLabel)
|
||||
// live object is a resource that has old style tracking label
|
||||
liveObj := sampleResource()
|
||||
err := rt.SetAppInstance(liveObj, common.LabelKeyAppInstance, "my-app", "", TrackingMethodLabel)
|
||||
assert.Nil(t, err)
|
||||
|
||||
yamlBytes, err = ioutil.ReadFile("testdata/svc.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj2 *unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj2)
|
||||
// config object is a resource that has new style tracking annotation
|
||||
configObj := sampleResource()
|
||||
err = rt.SetAppInstance(configObj, common.AnnotationKeyAppInstance, "my-app2", "", TrackingMethodAnnotation)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = rt.SetAppInstance(obj2, common.AnnotationKeyAppInstance, "my-app2", "", TrackingMethodAnnotation)
|
||||
_ = rt.Normalize(configObj, liveObj, common.LabelKeyAppInstance, string(TrackingMethodAnnotation))
|
||||
|
||||
// the normalization should affect add the new style annotation and drop old tracking label from live object
|
||||
annotation := kube.GetAppInstanceAnnotation(configObj, common.AnnotationKeyAppInstance)
|
||||
assert.Equal(t, liveObj.GetAnnotations()[common.AnnotationKeyAppInstance], annotation)
|
||||
_, hasOldLabel := liveObj.GetLabels()[common.LabelKeyAppInstance]
|
||||
assert.False(t, hasOldLabel)
|
||||
}
|
||||
|
||||
func TestResourceIdNormalizer_Normalize_ConfigHasOldLabel(t *testing.T) {
|
||||
rt := NewResourceTracking()
|
||||
|
||||
// live object is a resource that has old style tracking label
|
||||
liveObj := sampleResource()
|
||||
err := rt.SetAppInstance(liveObj, common.LabelKeyAppInstance, "my-app", "", TrackingMethodLabel)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_ = rt.Normalize(obj2, obj, common.LabelKeyAppInstance, string(TrackingMethodAnnotation))
|
||||
annotation := kube.GetAppInstanceAnnotation(obj2, common.AnnotationKeyAppInstance)
|
||||
assert.Equal(t, obj.GetAnnotations()[common.AnnotationKeyAppInstance], annotation)
|
||||
// config object is a resource that has new style tracking annotation
|
||||
configObj := sampleResource()
|
||||
err = rt.SetAppInstance(configObj, common.AnnotationKeyAppInstance, "my-app2", "", TrackingMethodAnnotation)
|
||||
assert.Nil(t, err)
|
||||
err = rt.SetAppInstance(configObj, common.LabelKeyAppInstance, "my-app", "", TrackingMethodLabel)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_ = rt.Normalize(configObj, liveObj, common.LabelKeyAppInstance, string(TrackingMethodAnnotation))
|
||||
|
||||
// the normalization should affect add the new style annotation and drop old tracking label from live object
|
||||
annotation := kube.GetAppInstanceAnnotation(configObj, common.AnnotationKeyAppInstance)
|
||||
assert.Equal(t, liveObj.GetAnnotations()[common.AnnotationKeyAppInstance], annotation)
|
||||
_, hasOldLabel := liveObj.GetLabels()[common.LabelKeyAppInstance]
|
||||
assert.True(t, hasOldLabel)
|
||||
}
|
||||
|
||||
func TestIsOldTrackingMethod(t *testing.T) {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/collections"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -190,31 +191,73 @@ func (db *db) getClusterSecret(server string) (*apiv1.Secret, error) {
|
||||
return nil, status.Errorf(codes.NotFound, "cluster %q not found", server)
|
||||
}
|
||||
|
||||
func (db *db) getClusterFromSecret(server string) (*appv1.Cluster, error) {
|
||||
clusterSecrets, err := db.listSecretsByType(common.LabelValueSecretTypeCluster)
|
||||
// GetCluster returns a cluster from a query
|
||||
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
|
||||
informer, err := db.settingsMgr.GetSecretsInformer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := strings.TrimRight(server, "/")
|
||||
for _, clusterSecret := range clusterSecrets {
|
||||
if strings.TrimRight(string(clusterSecret.Data["server"]), "/") == srv {
|
||||
return secretToCluster(clusterSecret)
|
||||
}
|
||||
res, err := informer.GetIndexer().ByIndex(settings.ByClusterURLIndexer, server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(res) > 0 {
|
||||
return secretToCluster(res[0].(*apiv1.Secret))
|
||||
}
|
||||
if server == appv1.KubernetesInternalAPIServerAddr {
|
||||
return db.getLocalCluster(), nil
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.NotFound, "cluster %q not found", server)
|
||||
}
|
||||
|
||||
// GetCluster returns a cluster from a query
|
||||
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
|
||||
cluster, err := db.getClusterFromSecret(server)
|
||||
// GetProjectClusters return project scoped clusters by given project name
|
||||
func (db *db) GetProjectClusters(ctx context.Context, project string) ([]*appv1.Cluster, error) {
|
||||
informer, err := db.settingsMgr.GetSecretsInformer()
|
||||
if err != nil {
|
||||
if errorStatus, ok := status.FromError(err); ok && errorStatus.Code() == codes.NotFound && server == appv1.KubernetesInternalAPIServerAddr {
|
||||
return db.getLocalCluster(), nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
secrets, err := informer.GetIndexer().ByIndex(settings.ByProjectClusterIndexer, project)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res []*appv1.Cluster
|
||||
for i := range secrets {
|
||||
cluster, err := secretToCluster(secrets[i].(*apiv1.Secret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, cluster)
|
||||
}
|
||||
return cluster, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (db *db) GetClusterServersByName(ctx context.Context, name string) ([]string, error) {
|
||||
informer, err := db.settingsMgr.GetSecretsInformer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if local cluster name is not overridden and specified name is local cluster name, return local cluster server
|
||||
localClusterSecrets, err := informer.GetIndexer().ByIndex(settings.ByClusterURLIndexer, appv1.KubernetesInternalAPIServerAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(localClusterSecrets) == 0 && db.getLocalCluster().Name == name {
|
||||
return []string{appv1.KubernetesInternalAPIServerAddr}, nil
|
||||
}
|
||||
|
||||
secrets, err := informer.GetIndexer().ByIndex(settings.ByClusterNameIndexer, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res []string
|
||||
for i := range secrets {
|
||||
s := secrets[i].(*apiv1.Secret)
|
||||
res = append(res, strings.TrimRight(string(s.Data["server"]), "/"))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// UpdateCluster updates a cluster
|
||||
|
||||
@@ -27,8 +27,12 @@ type ArgoDB interface {
|
||||
handleAddEvent func(cluster *appv1.Cluster),
|
||||
handleModEvent func(oldCluster *appv1.Cluster, newCluster *appv1.Cluster),
|
||||
handleDeleteEvent func(clusterServer string)) error
|
||||
// Get returns a cluster from a query
|
||||
// GetCluster returns a cluster by given server url
|
||||
GetCluster(ctx context.Context, server string) (*appv1.Cluster, error)
|
||||
// GetClusterServersByName returns a cluster server urls by given cluster name
|
||||
GetClusterServersByName(ctx context.Context, name string) ([]string, error)
|
||||
// GetProjectClusters return project scoped clusters by given project name
|
||||
GetProjectClusters(ctx context.Context, project string) ([]*appv1.Cluster, error)
|
||||
// UpdateCluster updates a cluster
|
||||
UpdateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Cluster, error)
|
||||
// DeleteCluster deletes a cluster by name
|
||||
@@ -41,6 +45,8 @@ type ArgoDB interface {
|
||||
CreateRepository(ctx context.Context, r *appv1.Repository) (*appv1.Repository, error)
|
||||
// GetRepository returns a repository by URL
|
||||
GetRepository(ctx context.Context, url string) (*appv1.Repository, error)
|
||||
// GetProjectRepositories returns project scoped repositories by given project name
|
||||
GetProjectRepositories(ctx context.Context, project string) ([]*appv1.Repository, error)
|
||||
// RepositoryExists returns whether a repository is configured for the given URL
|
||||
RepositoryExists(ctx context.Context, repoURL string) (bool, error)
|
||||
// UpdateRepository updates a repository
|
||||
|
||||
@@ -693,3 +693,59 @@ func TestHelmRepositorySecretsTrim(t *testing.T) {
|
||||
assert.Equal(t, tt.expectedSecret, tt.retrievedSecret)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClusterServersByName(t *testing.T) {
|
||||
clientset := getClientset(nil, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-cluster-secret",
|
||||
Namespace: testNamespace,
|
||||
Labels: map[string]string{
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"name": []byte("my-cluster-name"),
|
||||
"server": []byte("https://my-cluster-server"),
|
||||
"config": []byte("{}"),
|
||||
},
|
||||
})
|
||||
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
|
||||
servers, err := db.GetClusterServersByName(context.Background(), "my-cluster-name")
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, []string{"https://my-cluster-server"}, servers)
|
||||
}
|
||||
|
||||
func TestGetClusterServersByName_InClusterNotConfigured(t *testing.T) {
|
||||
clientset := getClientset(nil)
|
||||
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
|
||||
servers, err := db.GetClusterServersByName(context.Background(), "in-cluster")
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, []string{v1alpha1.KubernetesInternalAPIServerAddr}, servers)
|
||||
}
|
||||
|
||||
func TestGetClusterServersByName_InClusterConfigured(t *testing.T) {
|
||||
clientset := getClientset(nil, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-cluster-secret",
|
||||
Namespace: testNamespace,
|
||||
Labels: map[string]string{
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"name": []byte("in-cluster-renamed"),
|
||||
"server": []byte(v1alpha1.KubernetesInternalAPIServerAddr),
|
||||
"config": []byte("{}"),
|
||||
},
|
||||
})
|
||||
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
|
||||
servers, err := db.GetClusterServersByName(context.Background(), "in-cluster-renamed")
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, []string{v1alpha1.KubernetesInternalAPIServerAddr}, servers)
|
||||
}
|
||||
|
||||
@@ -242,6 +242,75 @@ func (_m *ArgoDB) GetCluster(ctx context.Context, server string) (*v1alpha1.Clus
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetClusterServersByName provides a mock function with given fields: ctx, name
|
||||
func (_m *ArgoDB) GetClusterServersByName(ctx context.Context, name string) ([]string, error) {
|
||||
ret := _m.Called(ctx, name)
|
||||
|
||||
var r0 []string
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) []string); ok {
|
||||
r0 = rf(ctx, name)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, name)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetProjectClusters provides a mock function with given fields: ctx, project
|
||||
func (_m *ArgoDB) GetProjectClusters(ctx context.Context, project string) ([]*v1alpha1.Cluster, error) {
|
||||
ret := _m.Called(ctx, project)
|
||||
|
||||
var r0 []*v1alpha1.Cluster
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) []*v1alpha1.Cluster); ok {
|
||||
r0 = rf(ctx, project)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*v1alpha1.Cluster)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, project)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetProjectRepositories provides a mock function with given fields: ctx, project
|
||||
func (_m *ArgoDB) GetProjectRepositories(ctx context.Context, project string) ([]*v1alpha1.Repository, error) {
|
||||
ret := _m.Called(ctx, project)
|
||||
|
||||
var r0 []*v1alpha1.Repository
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) []*v1alpha1.Repository); ok {
|
||||
r0 = rf(ctx, project)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*v1alpha1.Repository)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, project)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetRepository provides a mock function with given fields: ctx, url
|
||||
func (_m *ArgoDB) GetRepository(ctx context.Context, url string) (*v1alpha1.Repository, error) {
|
||||
ret := _m.Called(ctx, url)
|
||||
@@ -265,29 +334,6 @@ func (_m *ArgoDB) GetRepository(ctx context.Context, url string) (*v1alpha1.Repo
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// RepositoryExists provides a mock function with given fields: ctx, url
|
||||
func (_m *ArgoDB) RepositoryExists(ctx context.Context, url string) (bool, error) {
|
||||
ret := _m.Called(ctx, url)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) bool); ok {
|
||||
r0 = rf(ctx, url)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, url)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetRepositoryCredentials provides a mock function with given fields: ctx, name
|
||||
func (_m *ArgoDB) GetRepositoryCredentials(ctx context.Context, name string) (*v1alpha1.RepoCreds, error) {
|
||||
ret := _m.Called(ctx, name)
|
||||
@@ -472,6 +518,27 @@ func (_m *ArgoDB) RemoveRepoCertificates(ctx context.Context, selector *db.Certi
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// RepositoryExists provides a mock function with given fields: ctx, repoURL
|
||||
func (_m *ArgoDB) RepositoryExists(ctx context.Context, repoURL string) (bool, error) {
|
||||
ret := _m.Called(ctx, repoURL)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) bool); ok {
|
||||
r0 = rf(ctx, repoURL)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, repoURL)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// UpdateCluster provides a mock function with given fields: ctx, c
|
||||
func (_m *ArgoDB) UpdateCluster(ctx context.Context, c *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
|
||||
ret := _m.Called(ctx, c)
|
||||
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"hash/fnv"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
|
||||
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -84,6 +86,26 @@ func (db *db) GetRepository(ctx context.Context, repoURL string) (*appsv1.Reposi
|
||||
return repository, err
|
||||
}
|
||||
|
||||
func (db *db) GetProjectRepositories(ctx context.Context, project string) ([]*appsv1.Repository, error) {
|
||||
informer, err := db.settingsMgr.GetSecretsInformer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secrets, err := informer.GetIndexer().ByIndex(settings.ByProjectRepoIndexer, project)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res []*appv1.Repository
|
||||
for i := range secrets {
|
||||
repo, err := secretToRepository(secrets[i].(*apiv1.Secret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, repo)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (db *db) RepositoryExists(ctx context.Context, repoURL string) (bool, error) {
|
||||
secretsBackend := db.repoBackend()
|
||||
exists, err := secretsBackend.RepositoryExists(ctx, repoURL)
|
||||
|
||||
@@ -83,7 +83,7 @@ func (s *secretsRepositoryBackend) GetRepository(ctx context.Context, repoURL st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repository, err := s.secretToRepository(secret)
|
||||
repository, err := secretToRepository(secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func (s *secretsRepositoryBackend) ListRepositories(ctx context.Context, repoTyp
|
||||
}
|
||||
|
||||
for _, secret := range secrets {
|
||||
r, err := s.secretToRepository(secret)
|
||||
r, err := secretToRepository(secret)
|
||||
if err != nil {
|
||||
if r != nil {
|
||||
modifiedTime := metav1.Now()
|
||||
@@ -291,7 +291,7 @@ func (s *secretsRepositoryBackend) GetAllHelmRepoCreds(ctx context.Context) ([]*
|
||||
return helmRepoCreds, nil
|
||||
}
|
||||
|
||||
func (s *secretsRepositoryBackend) secretToRepository(secret *corev1.Secret) (*appsv1.Repository, error) {
|
||||
func secretToRepository(secret *corev1.Secret) (*appsv1.Repository, error) {
|
||||
repository := &appsv1.Repository{
|
||||
Name: string(secret.Data["name"]),
|
||||
Repo: string(secret.Data["url"]),
|
||||
|
||||
@@ -279,3 +279,42 @@ func Test_CredsURLToSecretName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GetProjectRepositories(t *testing.T) {
|
||||
repoSecretWithProject := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: testNamespace,
|
||||
Name: "some-repo-secret",
|
||||
Labels: map[string]string{
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeRepository,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"type": []byte("git"),
|
||||
"url": []byte("git@github.com:argoproj/argo-cd"),
|
||||
"project": []byte("some-project"),
|
||||
},
|
||||
}
|
||||
|
||||
repoSecretWithoutProject := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: testNamespace,
|
||||
Name: "some-other-repo-secret",
|
||||
Labels: map[string]string{
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeRepository,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"type": []byte("git"),
|
||||
"url": []byte("git@github.com:argoproj/argo-cd"),
|
||||
},
|
||||
}
|
||||
|
||||
clientset := getClientset(map[string]string{}, repoSecretWithProject, repoSecretWithoutProject)
|
||||
argoDB := NewDB(testNamespace, settings.NewSettingsManager(context.TODO(), clientset, testNamespace), clientset)
|
||||
|
||||
repos, err := argoDB.GetProjectRepositories(context.TODO(), "some-project")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, repos, 1)
|
||||
assert.Equal(t, "git@github.com:argoproj/argo-cd", repos[0].Repo)
|
||||
}
|
||||
|
||||
@@ -120,3 +120,23 @@ func GetAppInstanceLabel(un *unstructured.Unstructured, key string) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RemoveLabel removes label with the specified name
|
||||
func RemoveLabel(un *unstructured.Unstructured, key string) {
|
||||
labels := un.GetLabels()
|
||||
if labels == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k := range labels {
|
||||
if k == key {
|
||||
delete(labels, k)
|
||||
if len(labels) == 0 {
|
||||
un.SetLabels(nil)
|
||||
} else {
|
||||
un.SetLabels(labels)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
@@ -207,3 +206,16 @@ func TestGetAppInstanceLabel(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "my-app", GetAppInstanceLabel(&obj, common.LabelKeyAppInstance))
|
||||
}
|
||||
|
||||
func TestRemoveLabel(t *testing.T) {
|
||||
yamlBytes, err := ioutil.ReadFile("testdata/svc.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
obj.SetLabels(map[string]string{"test": "value"})
|
||||
|
||||
RemoveLabel(&obj, "test")
|
||||
|
||||
assert.Nil(t, obj.GetLabels())
|
||||
}
|
||||
|
||||
@@ -13,9 +13,10 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
jwtutil "github.com/argoproj/argo-cd/v2/util/jwt"
|
||||
|
||||
"github.com/casbin/casbin"
|
||||
"github.com/casbin/casbin/model"
|
||||
"github.com/casbin/casbin/util"
|
||||
"github.com/Knetic/govaluate"
|
||||
"github.com/casbin/casbin/v2"
|
||||
"github.com/casbin/casbin/v2/model"
|
||||
"github.com/casbin/casbin/v2/util"
|
||||
jwt "github.com/dgrijalva/jwt-go/v4"
|
||||
gocache "github.com/patrickmn/go-cache"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -44,10 +45,10 @@ const (
|
||||
// CasbinEnforcer represents methods that must be implemented by a Casbin enforces
|
||||
type CasbinEnforcer interface {
|
||||
EnableLog(bool)
|
||||
Enforce(rvals ...interface{}) bool
|
||||
Enforce(rvals ...interface{}) (bool, error)
|
||||
LoadPolicy() error
|
||||
EnableEnforce(bool)
|
||||
AddFunction(name string, function func(args ...interface{}) (interface{}, error))
|
||||
AddFunction(name string, function govaluate.ExpressionFunction)
|
||||
GetGroupingPolicy() [][]string
|
||||
}
|
||||
|
||||
@@ -110,25 +111,26 @@ func (e *Enforcer) tryGetCabinEnforcer(project string, policy string) (CasbinEnf
|
||||
if cached != nil {
|
||||
return cached.enforcer, nil
|
||||
}
|
||||
matchFunc := globMatchFunc
|
||||
if e.matchMode == RegexMatchMode {
|
||||
matchFunc = util.RegexMatchFunc
|
||||
}
|
||||
|
||||
var err error
|
||||
var enforcer CasbinEnforcer
|
||||
if policy != "" {
|
||||
if enforcer, err = newEnforcerSafe(e.model, newAdapter(e.adapter.builtinPolicy, e.adapter.userDefinedPolicy, policy)); err != nil {
|
||||
if enforcer, err = newEnforcerSafe(matchFunc, e.model, newAdapter(e.adapter.builtinPolicy, e.adapter.userDefinedPolicy, policy)); err != nil {
|
||||
// fallback to default policy if project policy is invalid
|
||||
log.Errorf("Failed to load project '%s' policy", project)
|
||||
enforcer, err = newEnforcerSafe(e.model, e.adapter)
|
||||
enforcer, err = newEnforcerSafe(matchFunc, e.model, e.adapter)
|
||||
}
|
||||
} else {
|
||||
enforcer, err = newEnforcerSafe(e.model, e.adapter)
|
||||
enforcer, err = newEnforcerSafe(matchFunc, e.model, e.adapter)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matchFunc := globMatchFunc
|
||||
if e.matchMode == RegexMatchMode {
|
||||
matchFunc = util.RegexMatchFunc
|
||||
}
|
||||
enforcer.AddFunction("globOrRegexMatch", matchFunc)
|
||||
enforcer.EnableLog(e.enableLog)
|
||||
enforcer.EnableEnforce(e.enabled)
|
||||
@@ -139,19 +141,18 @@ func (e *Enforcer) tryGetCabinEnforcer(project string, policy string) (CasbinEnf
|
||||
// ClaimsEnforcerFunc is func template to enforce a JWT claims. The subject is replaced
|
||||
type ClaimsEnforcerFunc func(claims jwt.Claims, rvals ...interface{}) bool
|
||||
|
||||
func newEnforcerSafe(params ...interface{}) (e CasbinEnforcer, err error) {
|
||||
func newEnforcerSafe(matchFunction govaluate.ExpressionFunction, params ...interface{}) (e CasbinEnforcer, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("%v", r)
|
||||
e = nil
|
||||
}
|
||||
}()
|
||||
enfs := casbin.NewCachedEnforcer(params...)
|
||||
enfs, err := casbin.NewCachedEnforcer(params...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Default glob match mode
|
||||
enfs.AddFunction("globOrRegexMatch", globMatchFunc)
|
||||
enfs.AddFunction("globOrRegexMatch", matchFunction)
|
||||
return enfs, nil
|
||||
}
|
||||
|
||||
@@ -291,7 +292,7 @@ func (e *Enforcer) EnforceWithCustomEnforcer(enf CasbinEnforcer, rvals ...interf
|
||||
func enforce(enf CasbinEnforcer, defaultRole string, claimsEnforcerFunc ClaimsEnforcerFunc, rvals ...interface{}) bool {
|
||||
// check the default role
|
||||
if defaultRole != "" && len(rvals) >= 2 {
|
||||
if enf.Enforce(append([]interface{}{defaultRole}, rvals[1:]...)...) {
|
||||
if ok, err := enf.Enforce(append([]interface{}{defaultRole}, rvals[1:]...)...); ok && err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -312,7 +313,8 @@ func enforce(enf CasbinEnforcer, defaultRole string, claimsEnforcerFunc ClaimsEn
|
||||
default:
|
||||
rvals = append([]interface{}{""}, rvals[1:]...)
|
||||
}
|
||||
return enf.Enforce(rvals...)
|
||||
ok, err := enf.Enforce(rvals...)
|
||||
return ok && err == nil
|
||||
}
|
||||
|
||||
// SetBuiltinPolicy sets a built-in policy, which augments any user defined policies
|
||||
@@ -408,7 +410,7 @@ func (e *Enforcer) syncUpdate(cm *apiv1.ConfigMap, onUpdated func(cm *apiv1.Conf
|
||||
|
||||
// ValidatePolicy verifies a policy string is acceptable to casbin
|
||||
func ValidatePolicy(policy string) error {
|
||||
_, err := newEnforcerSafe(newBuiltInModel(), newAdapter("", "", policy))
|
||||
_, err := newEnforcerSafe(globMatchFunc, newBuiltInModel(), newAdapter("", "", policy))
|
||||
if err != nil {
|
||||
return fmt.Errorf("policy syntax error: %s", policy)
|
||||
}
|
||||
@@ -419,7 +421,11 @@ func ValidatePolicy(policy string) error {
|
||||
// This is needed because it is not safe to re-use the same casbin Model when instantiating new
|
||||
// casbin enforcers.
|
||||
func newBuiltInModel() model.Model {
|
||||
return casbin.NewModel(assets.ModelConf)
|
||||
m, err := model.NewModelFromString(assets.ModelConf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Casbin adapter which satisfies persist.Adapter interface
|
||||
|
||||
@@ -145,6 +145,63 @@ type KustomizeSettings struct {
|
||||
Versions []KustomizeVersion
|
||||
}
|
||||
|
||||
var (
|
||||
ByClusterURLIndexer = "byClusterURL"
|
||||
byClusterURLIndexerFunc = func(obj interface{}) ([]string, error) {
|
||||
s, ok := obj.(*apiv1.Secret)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if s.Labels == nil || s.Labels[common.LabelKeySecretType] != common.LabelValueSecretTypeCluster {
|
||||
return nil, nil
|
||||
}
|
||||
if s.Data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if url, ok := s.Data["server"]; ok {
|
||||
return []string{strings.TrimRight(string(url), "/")}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
ByClusterNameIndexer = "byClusterName"
|
||||
byClusterNameIndexerFunc = func(obj interface{}) ([]string, error) {
|
||||
s, ok := obj.(*apiv1.Secret)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if s.Labels == nil || s.Labels[common.LabelKeySecretType] != common.LabelValueSecretTypeCluster {
|
||||
return nil, nil
|
||||
}
|
||||
if s.Data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if name, ok := s.Data["name"]; ok {
|
||||
return []string{string(name)}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
ByProjectClusterIndexer = "byProjectCluster"
|
||||
ByProjectRepoIndexer = "byProjectRepo"
|
||||
byProjectIndexerFunc = func(secretType string) func(obj interface{}) ([]string, error) {
|
||||
return func(obj interface{}) ([]string, error) {
|
||||
s, ok := obj.(*apiv1.Secret)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if s.Labels == nil || s.Labels[common.LabelKeySecretType] != secretType {
|
||||
return nil, nil
|
||||
}
|
||||
if s.Data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if project, ok := s.Data["project"]; ok {
|
||||
return []string{string(project)}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
func (ks *KustomizeSettings) GetOptions(source v1alpha1.ApplicationSource) (*v1alpha1.KustomizeOptions, error) {
|
||||
binaryPath := ""
|
||||
buildOptions := ""
|
||||
@@ -327,11 +384,12 @@ const (
|
||||
|
||||
// SettingsManager holds config info for a new manager with which to access Kubernetes ConfigMaps.
|
||||
type SettingsManager struct {
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
secrets v1listers.SecretLister
|
||||
configmaps v1listers.ConfigMapLister
|
||||
namespace string
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
secrets v1listers.SecretLister
|
||||
secretsInformer cache.SharedIndexInformer
|
||||
configmaps v1listers.ConfigMapLister
|
||||
namespace string
|
||||
// subscribers is a list of subscribers to settings updates
|
||||
subscribers []chan<- *ArgoCDSettings
|
||||
// mutex protects concurrency sensitive parts of settings manager: access to subscribers list and initialization flag
|
||||
@@ -382,6 +440,14 @@ func (mgr *SettingsManager) GetSecretsLister() (v1listers.SecretLister, error) {
|
||||
return mgr.secrets, nil
|
||||
}
|
||||
|
||||
func (mgr *SettingsManager) GetSecretsInformer() (cache.SharedIndexInformer, error) {
|
||||
err := mgr.ensureSynced(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mgr.secretsInformer, nil
|
||||
}
|
||||
|
||||
func (mgr *SettingsManager) updateSecret(callback func(*apiv1.Secret) error) error {
|
||||
err := mgr.ensureSynced(false)
|
||||
if err != nil {
|
||||
@@ -992,7 +1058,13 @@ func (mgr *SettingsManager) initialize(ctx context.Context) error {
|
||||
mgr.onRepoOrClusterChanged()
|
||||
},
|
||||
}
|
||||
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
|
||||
indexers := cache.Indexers{
|
||||
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
|
||||
ByClusterURLIndexer: byClusterURLIndexerFunc,
|
||||
ByClusterNameIndexer: byClusterNameIndexerFunc,
|
||||
ByProjectClusterIndexer: byProjectIndexerFunc(common.LabelValueSecretTypeCluster),
|
||||
ByProjectRepoIndexer: byProjectIndexerFunc(common.LabelValueSecretTypeRepository),
|
||||
}
|
||||
cmInformer := v1.NewFilteredConfigMapInformer(mgr.clientset, mgr.namespace, 3*time.Minute, indexers, tweakConfigMap)
|
||||
secretsInformer := v1.NewSecretInformer(mgr.clientset, mgr.namespace, 3*time.Minute, indexers)
|
||||
cmInformer.AddEventHandler(eventHandler)
|
||||
@@ -1042,6 +1114,7 @@ func (mgr *SettingsManager) initialize(ctx context.Context) error {
|
||||
secretsInformer.AddEventHandler(handler)
|
||||
cmInformer.AddEventHandler(handler)
|
||||
mgr.secrets = v1listers.NewSecretLister(secretsInformer.GetIndexer())
|
||||
mgr.secretsInformer = secretsInformer
|
||||
mgr.configmaps = v1listers.NewConfigMapLister(cmInformer.GetIndexer())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
|
||||
type settingsSource interface {
|
||||
GetAppInstanceLabelKey() (string, error)
|
||||
GetTrackingMethod() (string, error)
|
||||
}
|
||||
|
||||
var _ settingsSource = &settings.SettingsManager{}
|
||||
@@ -214,6 +215,11 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
trackingMethod, err := a.settingsSrc.GetTrackingMethod()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get trackingMethod: %v", err)
|
||||
return
|
||||
}
|
||||
appInstanceLabelKey, err := a.settingsSrc.GetAppInstanceLabelKey()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get appInstanceLabelKey: %v", err)
|
||||
@@ -243,7 +249,7 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload interface{}) {
|
||||
continue
|
||||
}
|
||||
} else if change.shaBefore != "" && change.shaAfter != "" {
|
||||
if err := a.storePreviouslyCachedManifests(&app, change, appInstanceLabelKey); err != nil {
|
||||
if err := a.storePreviouslyCachedManifests(&app, change, trackingMethod, appInstanceLabelKey); err != nil {
|
||||
log.Warnf("Failed to store cached manifests of previous revision for app '%s': %v", app.Name, err)
|
||||
}
|
||||
}
|
||||
@@ -252,7 +258,7 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Application, change changeInfo, appInstanceLabelKey string) error {
|
||||
func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Application, change changeInfo, trackingMethod string, appInstanceLabelKey string) error {
|
||||
err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, a.db)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -264,10 +270,10 @@ func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Appl
|
||||
return err
|
||||
}
|
||||
var cachedManifests cache.CachedManifestResponse
|
||||
if err := a.repoCache.GetManifests(change.shaBefore, &app.Spec.Source, &clusterInfo, app.Spec.Destination.Namespace, appInstanceLabelKey, app.Name, &cachedManifests); err == nil {
|
||||
if err := a.repoCache.GetManifests(change.shaBefore, &app.Spec.Source, &clusterInfo, app.Spec.Destination.Namespace, trackingMethod, appInstanceLabelKey, app.Name, &cachedManifests); err == nil {
|
||||
return err
|
||||
}
|
||||
if err = a.repoCache.SetManifests(change.shaAfter, &app.Spec.Source, &clusterInfo, app.Spec.Destination.Namespace, appInstanceLabelKey, app.Name, &cachedManifests); err != nil {
|
||||
if err = a.repoCache.SetManifests(change.shaAfter, &app.Spec.Source, &clusterInfo, app.Spec.Destination.Namespace, trackingMethod, appInstanceLabelKey, app.Name, &cachedManifests); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -32,6 +32,10 @@ func (f fakeSettingsSrc) GetAppInstanceLabelKey() (string, error) {
|
||||
return "mycompany.com/appname", nil
|
||||
}
|
||||
|
||||
func (f fakeSettingsSrc) GetTrackingMethod() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func NewMockHandler() *ArgoCDWebhookHandler {
|
||||
appClientset := appclientset.NewSimpleClientset()
|
||||
cacheClient := cacheutil.NewCache(cacheutil.NewInMemoryCache(1 * time.Hour))
|
||||
|
||||
Reference in New Issue
Block a user