Compare commits

...

64 Commits

Author SHA1 Message Date
Alexander Matyushentsev
bdeecc2ddb Fix test compile error (#1052) 2019-01-18 15:16:34 -08:00
Jesse Suen
a5fbddab75 Handle case where manifests contain a null items list (#1051) 2019-01-18 15:07:35 -08:00
Jesse Suen
3577ea94a2 Fix controller deadlock when checking for stale cache (#1046)
* Controller cache was susceptible to clock skew in managed cluster

* Fix controller deadlock when checking for stale cache
2019-01-18 12:33:02 -08:00
Jesse Suen
dec46cfae0 Controller cache was susceptible to clock skew in managed cluster (#1043) 2019-01-18 12:32:54 -08:00
Alexander Matyushentsev
e1579824e9 Fix sync operation sorting (#1042) 2019-01-18 12:32:47 -08:00
Jesse Suen
aa3c1dd0ae Fix ability to unset ApplicationSource specific parameters (#1041) 2019-01-18 12:32:35 -08:00
Alexander Matyushentsev
66f3a0dad2 Issue #1039 - Correct redirect to login page if dex authentication is not successful (#1040) 2019-01-18 12:32:23 -08:00
Alexander Matyushentsev
20678dc22e Update manifests to v0.11.1 2019-01-17 13:37:10 -08:00
Alexander Matyushentsev
d47ad35208 Hooks result should have Running phase by default (given we don't have Pending state) (#1037) 2019-01-17 13:31:54 -08:00
Alexander Matyushentsev
265b2c8c29 Issue #1033 - Fix force resource delete API (#1034) 2019-01-17 13:31:44 -08:00
Jesse Suen
fe078e7842 Fix PermissionDenied issue during app creation with project roles. Fix custom casbin adapter (#1030) 2019-01-17 13:31:39 -08:00
Alexander Matyushentsev
59d7abf65c Replace grpc repo-server parallelism limit interceptor with semaphore (#1029) 2019-01-17 13:31:31 -08:00
Jesse Suen
e29f438134 Downgrade kubectl to v1.12 to regain kubectl convert functionality (#1023) 2019-01-17 13:31:25 -08:00
Alexander Matyushentsev
fde6c5c741 Issue #1025 - Fix /v1/applications/<appName>/manifests for app with helm depencencies (#1026) 2019-01-17 13:31:20 -08:00
Alexander Matyushentsev
0f23ca055a Issue #937 - Allow using redis as a cache in repo-server (#1020)
* Issue #937 - Allow using redis as a cache in repo-server

* Support repo server grpc methods throttling

* Upgrade redis
2019-01-17 13:31:08 -08:00
Jesse Suen
117cfb27bb Do not allow metadata.creationTimestamp to affect sync status (#1021) 2019-01-17 13:30:55 -08:00
Jesse Suen
689dce0e8f Switch to a custom casbin adapter for rbac enforcment (#1022) 2019-01-17 13:30:45 -08:00
Jesse Suen
056e1d218d Graceful handling of clusters where API resource discovery is partially successful (#1018) 2019-01-17 13:30:19 -08:00
Alexander Matyushentsev
e859766fdf Issue #1013 - handle k8s resources circular dependency (#1016) 2019-01-17 13:30:12 -08:00
Jesse Suen
93d89bc3f1 Fix app diff --local command (#1008) 2019-01-17 13:29:51 -08:00
Saradhi Sreegiriraju
d82974a266 Update parameters.md (#1007) 2019-01-17 13:29:45 -08:00
Jesse Suen
71b646dfae Update manifests to v0.11.0 2019-01-10 15:14:21 -08:00
Jesse Suen
f76a92944e Moving apps between projects requires create/update in new project (#1002) 2019-01-10 15:12:53 -08:00
Jesse Suen
f41af8bcc3 Settings were getting re-initialized when incomplete. Session manager now uses settings manager (#1000) 2019-01-09 16:24:28 -08:00
Alexander Matyushentsev
1a7a086da0 Log manifest with debug log level (#999) 2019-01-09 16:22:45 -08:00
Alexander Matyushentsev
0ca2259fd3 Update manifests to v0.11.0-rc6 2019-01-09 15:52:18 -08:00
Jesse Suen
3ee437e7f4 Update manifests to v0.11.0-rc5 2019-01-08 15:59:00 -08:00
Jesse Suen
86aa44891a Add better project policy rule validation (#990) 2019-01-08 15:54:25 -08:00
Alexander Matyushentsev
3f7c60c375 Use informers to load ArgoCD settings (#989)
* Use informers to load Argo CD settings
2019-01-08 15:54:17 -08:00
Jesse Suen
317dd15978 Eliminate reconcile hotloop by prevent Endpoint updates from requeuing apps (#986) 2019-01-08 15:54:10 -08:00
Jesse Suen
c5db14db3a Increase QPS and Burst used in K8s client configs to 25/50 (#984) 2019-01-08 15:54:02 -08:00
Jesse Suen
2a3ad769b5 Fix issue where custom resource objects might get synced to incorrect namespace during initial sync (#982) 2019-01-08 15:53:55 -08:00
Alexander Matyushentsev
9fb7b99e6d Fix loading cluster connection status (#980) 2019-01-08 15:53:46 -08:00
Alexander Matyushentsev
4840a9c5fc Update manifests to v0.11.0-rc4 2019-01-04 13:34:29 -08:00
Jesse Suen
5fe99816da Update golang to v1.11.4 (#977) 2019-01-04 13:30:25 -08:00
Alexander Matyushentsev
0013b20394 Issue #978 - Fix application rollback to deployment without overrides (#979)
* Issue #978 - Fix application rollback to deployment without overrides

* Fix imports sorting
2019-01-04 13:30:21 -08:00
Paul van Staden
db08f8b4fe Improving documentation regarding params (#974) (#975) 2019-01-04 13:30:18 -08:00
Alexander Matyushentsev
7c2b92f70c Update manifests to v0.11.0-rc3 2019-01-03 15:37:27 -08:00
Jesse Suen
b451c4fe53 Update versions for kubectl (v1.13.1), helm (v2.12.1), ksonnet (v0.13.1) (#973) 2019-01-03 15:24:25 -08:00
Alexander Matyushentsev
de26eac63f Reduce timeout for checking cluster health (#972) 2019-01-03 15:22:34 -08:00
Alexander Matyushentsev
71194d7fac Update sample commands in project management doc (#971) 2019-01-03 15:22:27 -08:00
Jesse Suen
ff2de56543 Update manifests to v0.11.0-rc2 2018-12-27 16:18:08 -08:00
Alexander Matyushentsev
c8ad7ccf39 Use --refresh --hard-refresh flags in 'app get' 'app diff' commands (#963) 2018-12-27 16:09:42 -08:00
Alexander Matyushentsev
58b77718f8 Issue #916 - Use 'diff' to render actual vs target state difference (#962) 2018-12-27 11:29:57 -08:00
Jesse Suen
5fff89bd12 Show sync policy in app list view (#961) 2018-12-27 11:29:50 -08:00
Jesse Suen
c2975bd258 Handle diff corner case where Role/ClusterRole rules are null (#960) 2018-12-27 11:29:43 -08:00
Alexander Matyushentsev
99daa3e315 Load repo/cluster status in parallel to improve /repos /clusters API performance (#958) 2018-12-27 11:29:36 -08:00
Alexander Matyushentsev
d21c8d09fa Issue #956 - Slow comparison if cluster is down (#957) 2018-12-27 11:29:30 -08:00
Jesse Suen
75dc868ecf Make injected application instance label configurable from default (#944)
* Make injected application instance label configurable from default
Stop removing ksonnet.io/component label, unless using legacy label

* Fix applying of resources when namespace is empty
2018-12-27 11:29:24 -08:00
Zvi Cahana
43064ac126 Prefix controller resource names with 'argocd-' (#917)
* Prefix controller resource names with 'argocd-'

* Regenerate installation manifests

* Rename some additional application-controller occurrences

* Rename [cluster]role[binding] resources

* Regenerate installation manifests
2018-12-27 11:29:17 -08:00
Alexander Matyushentsev
21902828ee Issue #950 - Application controller don't refresh app after destination update (#951) 2018-12-27 11:29:09 -08:00
lbrictson
d250155672 Update aws-iam-authenticator to new version, fix url (#948) 2018-12-27 11:29:03 -08:00
Alexander Matyushentsev
a8221c33ab Correctly drop cluster cache after CRD creation/deletion (#947) 2018-12-27 11:28:53 -08:00
Jesse Suen
f463589348 Diff library handles case where live object has null secret data (#945) 2018-12-27 11:28:46 -08:00
Alexander Matyushentsev
3a6dadd803 Issue #939 - Fix nil dereference error in Diff function (#940) 2018-12-27 11:28:39 -08:00
Alexander Matyushentsev
d265404e23 Issue 914 - Allow invalidating application related cache (#931) 2018-12-27 11:28:33 -08:00
Alexander Matyushentsev
09d8a8ee42 Issue 906 - Support setting different base href in UI (#930) 2018-12-27 11:28:27 -08:00
Alexander Matyushentsev
dbac3d4905 Issue #912 - Make ResourceNode 'tags' into a more generic 'info' struct (#926)
* Issue #912 - Make ResourceNode 'tags' into a more generic 'info' struct
2018-12-27 11:28:21 -08:00
Alexander Matyushentsev
ec0e64975e Issue #927 - Add missing handlings for deprecated extensions group kinds (#928) 2018-12-27 11:28:14 -08:00
Alexander Matyushentsev
6802e1ed35 Issue #922 - Fix nil derefrence error in 'argocd app diff' command (#925) 2018-12-27 11:28:07 -08:00
Alexander Matyushentsev
d09f8f974e Issue #910 - Reconstruct tree structure on the flight to avoid inconsistent state (#921) 2018-12-27 11:28:00 -08:00
Alexander Matyushentsev
7a6951dfe2 Issue #915 - Local 'argocd app diff' fails (#920) 2018-12-27 11:27:52 -08:00
Jesse Suen
476e351d12 Fix issue preventing kustomize apps being multi-namespaced (#913) 2018-12-10 09:42:53 -08:00
Alexander Matyushentsev
e0bc94dcf6 Update manifests to v0.11.0-rc1 2018-12-07 17:07:31 -08:00
122 changed files with 4056 additions and 1903 deletions

View File

@@ -1,2 +1,6 @@
ignore:
- "**/*.pb.go"
- "**/*.pb.go"
- "**/*_test.go"
- "pkg/apis/.*"
- "pkg/client/.*"
- "test/.*"

View File

@@ -3,7 +3,7 @@
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM golang:1.10.3 as builder
FROM golang:1.11.4 as builder
RUN apt-get update && apt-get install -y \
git \
@@ -29,33 +29,35 @@ RUN wget https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-lin
chmod +x /usr/local/bin/dep
# Install gometalinter
RUN curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-amd64.tar.gz | \
ENV GOMETALINTER_VERSION=2.0.12
RUN curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v${GOMETALINTER_VERSION}/gometalinter-${GOMETALINTER_VERSION}-linux-amd64.tar.gz | \
tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- && \
ln -s $GOPATH/bin/gometalinter $GOPATH/bin/gometalinter.v2
# Install packr
ENV PACKR_VERSION=1.13.2
ENV PACKR_VERSION=1.21.9
RUN wget https://github.com/gobuffalo/packr/releases/download/v${PACKR_VERSION}/packr_${PACKR_VERSION}_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
# Install kubectl
RUN curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
# NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt
# Keep version at 1.12.X until https://github.com/argoproj/argo-cd/issues/1012 is resolved
ENV KUBECTL_VERSION=1.12.4
RUN curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \
chmod +x /usr/local/bin/kubectl
# Install ksonnet
# NOTE: we frequently switch between tip of master ksonnet vs. official builds. Comment/uncomment
# the corresponding section to switch between the two options:
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /usr/local/bin/ks
# Option 2: use official tagged ksonnet release
ENV KSONNET_VERSION=0.13.0
ENV KSONNET_VERSION=0.13.1
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks
# NOTE: we occasionally switch between tip of master ksonnet vs. official builds. Run the following
# to use tip instead of official release:
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /usr/local/bin/ks
# Install helm
ENV HELM_VERSION=2.11.0
ENV HELM_VERSION=2.12.1
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /usr/local/bin/helm
@@ -65,15 +67,16 @@ ENV KUSTOMIZE_VERSION=1.0.11
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/kustomize
ENV AWS_IAM_AUTHENTICATOR_VERSION=0.3.0
RUN curl -L -o /usr/local/bin/aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v${AWS_IAM_AUTHENTICATOR_VERSION}/heptio-authenticator-aws_${AWS_IAM_AUTHENTICATOR_VERSION}_linux_amd64 && \
# Install AWS IAM Authenticator
ENV AWS_IAM_AUTHENTICATOR_VERSION=0.4.0-alpha.1
RUN curl -L -o /usr/local/bin/aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/${AWS_IAM_AUTHENTICATOR_VERSION}/aws-iam-authenticator_${AWS_IAM_AUTHENTICATOR_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/aws-iam-authenticator
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM golang:1.10.3 as argocd-build
FROM golang:1.11.4 as argocd-build
COPY --from=builder /usr/local/bin/dep /usr/local/bin/dep
COPY --from=builder /usr/local/bin/packr /usr/local/bin/packr

33
Gopkg.lock generated
View File

@@ -262,7 +262,7 @@
version = "v6.3.5"
[[projects]]
digest = "1:34c6632be33dacedc5acf9f4489cfa64e0d716a55b00e2f6ff839a4437c3f7da"
digest = "1:b73fabc1ff8f2417bc5cc51d3f7274d6af5300b5ad9b8606967213134c1700dc"
name = "github.com/go-redis/redis"
packages = [
".",
@@ -271,12 +271,11 @@
"internal/hashtag",
"internal/pool",
"internal/proto",
"internal/singleflight",
"internal/util",
]
pruneopts = ""
revision = "877867d2845fbaf86798befe410b6ceb6f5c29a3"
version = "v6.10.2"
revision = "22be8a3eaf992c828cecb69dc07348313bf08d2e"
version = "v6.15.1"
[[projects]]
digest = "1:842c1acbacc80da775cfc0c412c4fe322c2d1b86c260db632987730d0d67a6bd"
@@ -381,6 +380,14 @@
pruneopts = ""
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
branch = "master"
digest = "1:d0899ec7c2f61fd5e4ccba7dbefe72e366a3ecce23ecdb982c768fa1d38812fb"
name = "github.com/google/shlex"
packages = ["."]
pruneopts = ""
revision = "c34317bd91bf98fab745d77b03933cf8769299fe"
[[projects]]
digest = "1:2a131706ff80636629ab6373f2944569b8252ecc018cda8040931b05d32e3c16"
name = "github.com/googleapis/gnostic"
@@ -654,14 +661,6 @@
pruneopts = ""
revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
[[projects]]
branch = "master"
digest = "1:1ee3e3e12ffdb5ba70b918148685cab6340bbc0d03ba723bcb46062d1bea69c6"
name = "github.com/qiangmzsx/string-adapter"
packages = ["."]
pruneopts = ""
revision = "38f25303bb0cd40e674a6fac01e0171ab905f5a1"
[[projects]]
digest = "1:3962f553b77bf6c03fc07cd687a22dd3b00fe11aa14d31194f5505f5bb65cdc8"
name = "github.com/sergi/go-diff"
@@ -840,7 +839,10 @@
branch = "master"
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
name = "golang.org/x/sync"
packages = ["errgroup"]
packages = [
"errgroup",
"semaphore",
]
pruneopts = ""
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
@@ -1395,6 +1397,7 @@
"github.com/argoproj/pkg/time",
"github.com/casbin/casbin",
"github.com/casbin/casbin/model",
"github.com/casbin/casbin/persist",
"github.com/coreos/go-oidc",
"github.com/dgrijalva/jwt-go",
"github.com/dustin/go-humanize",
@@ -1412,6 +1415,7 @@
"github.com/golang/protobuf/protoc-gen-go",
"github.com/golang/protobuf/ptypes/empty",
"github.com/google/go-jsonnet",
"github.com/google/shlex",
"github.com/grpc-ecosystem/go-grpc-middleware",
"github.com/grpc-ecosystem/go-grpc-middleware/auth",
"github.com/grpc-ecosystem/go-grpc-middleware/logging",
@@ -1425,7 +1429,6 @@
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/qiangmzsx/string-adapter",
"github.com/sirupsen/logrus",
"github.com/skratchdot/open-golang/open",
"github.com/soheilhy/cmux",
@@ -1442,6 +1445,7 @@
"golang.org/x/net/context",
"golang.org/x/oauth2",
"golang.org/x/sync/errgroup",
"golang.org/x/sync/semaphore",
"google.golang.org/genproto/googleapis/api/annotations",
"google.golang.org/grpc",
"google.golang.org/grpc/codes",
@@ -1488,6 +1492,7 @@
"k8s.io/client-go/informers/core/v1",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/listers/core/v1",
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
"k8s.io/client-go/plugin/pkg/client/auth/oidc",
"k8s.io/client-go/rest",

View File

@@ -1,3 +1,4 @@
[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd)
# Argo CD - Declarative Continuous Delivery for Kubernetes

View File

@@ -1 +1 @@
0.11.0
0.11.1

View File

@@ -17,13 +17,14 @@ import (
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"github.com/argoproj/argo-cd"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/settings"
"github.com/argoproj/argo-cd/util/stats"
"github.com/argoproj/argo-cd/util/tls"
)
@@ -54,6 +55,8 @@ func newCommand() *cobra.Command {
cli.SetGLogLevel(glogLevel)
config, err := clientConfig.ClientConfig()
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
errors.CheckError(err)
kubeClient := kubernetes.NewForConfigOrDie(config)
@@ -64,15 +67,18 @@ func newCommand() *cobra.Command {
resyncDuration := time.Duration(appResyncPeriod) * time.Second
repoClientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
appController := controller.NewApplicationController(
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
appController, err := controller.NewApplicationController(
namespace,
settingsMgr,
kubeClient,
appClient,
repoClientset,
resyncDuration)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
errors.CheckError(err)
log.Infof("Application Controller (version: %s) starting (namespace: %s)", argocd.GetVersion(), namespace)
stats.RegisterStackDumper()

View File

@@ -6,6 +6,7 @@ import (
"os"
"time"
"github.com/go-redis/redis"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -30,6 +31,10 @@ const (
func newCommand() *cobra.Command {
var (
logLevel string
redisAddress string
sentinelAddresses []string
sentinelMaster string
parallelismLimit int64
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
)
var command = cobra.Command{
@@ -41,7 +46,7 @@ func newCommand() *cobra.Command {
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
errors.CheckError(err)
server, err := reposerver.NewServer(git.NewFactory(), newCache(), tlsConfigCustomizer)
server, err := reposerver.NewServer(git.NewFactory(), newCache(redisAddress, sentinelAddresses, sentinelMaster), tlsConfigCustomizer, parallelismLimit)
errors.CheckError(err)
grpc := server.CreateGRPC()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
@@ -62,18 +67,30 @@ func newCommand() *cobra.Command {
}
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().StringVar(&redisAddress, "redis", "", "Redis server hostname and port (e.g. argocd-redis:6379). ")
command.Flags().StringArrayVar(&sentinelAddresses, "sentinel", []string{}, "Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). ")
command.Flags().StringVar(&sentinelMaster, "sentinelmaster", "master", "Redis sentinel master group name.")
command.Flags().Int64Var(&parallelismLimit, "parallelismlimit", 0, "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
return &command
}
func newCache() cache.Cache {
func newCache(redisAddress string, sentinelAddresses []string, sentinelMaster string) cache.Cache {
if redisAddress != "" {
client := redis.NewClient(&redis.Options{
Addr: redisAddress,
Password: "",
DB: 0,
})
return cache.NewRedisCache(client, repository.DefaultRepoCacheExpiration)
} else if len(sentinelAddresses) > 0 {
client := redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: sentinelMaster,
SentinelAddrs: sentinelAddresses,
})
return cache.NewRedisCache(client, repository.DefaultRepoCacheExpiration)
}
return cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
// client := redis.NewClient(&redis.Options{
// Addr: "localhost:6379",
// Password: "",
// DB: 0,
// })
// return cache.NewRedisCache(client, repository.DefaultRepoCacheExpiration)
}
func main() {

View File

@@ -27,6 +27,7 @@ func NewCommand() *cobra.Command {
glogLevel int
clientConfig clientcmd.ClientConfig
staticAssetsDir string
baseHRef string
repoServerAddress string
appControllerServerAddress string
dexServerAddress string
@@ -43,6 +44,8 @@ func NewCommand() *cobra.Command {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
@@ -59,6 +62,7 @@ func NewCommand() *cobra.Command {
Insecure: insecure,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
BaseHRef: baseHRef,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
@@ -73,9 +77,9 @@ func NewCommand() *cobra.Command {
stats.RegisterHeapDumper("memprofile")
for {
argocd := server.NewServer(argoCDOpts)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
argocd := server.NewServer(ctx, argoCDOpts)
argocd.Run(ctx, 8080)
cancel()
}
@@ -85,6 +89,7 @@ func NewCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().BoolVar(&insecure, "insecure", false, "Run server without TLS")
command.Flags().StringVar(&staticAssetsDir, "staticassets", "", "Static assets directory path")
command.Flags().StringVar(&baseHRef, "basehref", "/", "Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address")

View File

@@ -59,7 +59,6 @@ func NewCommand() *cobra.Command {
command.AddCommand(NewGenDexConfigCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.AddCommand(NewSettingsCommand())
command.AddCommand(NewClusterConfig())
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
@@ -81,17 +80,15 @@ func NewRunDexCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
prevSettings, err := settingsMgr.GetSettings()
errors.CheckError(err)
ctx := context.Background()
settingsMgr.StartNotifier(ctx, settings)
updateCh := make(chan struct{}, 1)
updateCh := make(chan *settings.ArgoCDSettings, 1)
settingsMgr.Subscribe(updateCh)
for {
var cmd *exec.Cmd
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
dexCfgBytes, err := dex.GenerateDexConfigYAML(prevSettings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
@@ -108,10 +105,11 @@ func NewRunDexCommand() *cobra.Command {
// loop until the dex config changes
for {
<-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
newSettings := <-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(newSettings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(dexCfgBytes) {
prevSettings = newSettings
log.Infof("dex config modified. restarting dex")
if cmd != nil && cmd.Process != nil {
err = cmd.Process.Signal(syscall.SIGTERM)
@@ -146,7 +144,7 @@ func NewGenDexConfigCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
@@ -224,7 +222,7 @@ func NewImportCommand() *cobra.Command {
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
err = settingsMgr.SaveSettings(newSettings)
errors.CheckError(err)
db := db.NewDB(namespace, settingsMgr, kubeClientset)
@@ -278,7 +276,7 @@ func NewExportCommand() *cobra.Command {
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
// certificate data is included in secrets that are exported alongside
@@ -349,42 +347,6 @@ func NewExportCommand() *cobra.Command {
return &command
}
// NewSettingsCommand returns a new instance of `argocd-util settings` command
func NewSettingsCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
updateSuperuser bool
superuserPassword string
updateSignature bool
)
var command = &cobra.Command{
Use: "settings",
Short: "Creates or updates Argo CD settings",
Long: "Creates or updates Argo CD settings",
Run: func(c *cobra.Command, args []string) {
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if !(wasSpecified) {
namespace = "argocd"
}
kubeclientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
settingsMgr := settings.NewSettingsManager(kubeclientset, namespace)
_, err = settings.UpdateSettings(superuserPassword, settingsMgr, updateSignature, updateSuperuser, namespace)
errors.CheckError(err)
},
}
command.Flags().BoolVar(&updateSuperuser, "update-superuser", false, "force updating the superuser password")
command.Flags().StringVar(&superuserPassword, "superuser-password", "", "password for super user")
command.Flags().BoolVar(&updateSignature, "update-signature", false, "force updating the server-side token signing signature")
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}
// NewClusterConfig returns a new instance of `argocd-util cluster-kubeconfig` command
func NewClusterConfig() *cobra.Command {
var (
@@ -411,7 +373,7 @@ func NewClusterConfig() *cobra.Command {
kubeclientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(kubeclientset, namespace), kubeclientset).GetCluster(context.Background(), serverUrl)
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(context.Background(), kubeclientset, namespace), kubeclientset).GetCluster(context.Background(), serverUrl)
errors.CheckError(err)
err = kube.WriteKubeConfig(cluster.RESTConfig(), namespace, output)
errors.CheckError(err)

View File

@@ -5,8 +5,11 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path"
"reflect"
"strconv"
"strings"
@@ -14,14 +17,16 @@ import (
"time"
"github.com/ghodss/yaml"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/yudai/gojsondiff/formatter"
"golang.org/x/crypto/ssh/terminal"
"github.com/yudai/gojsondiff"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller/services"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apiclient"
@@ -124,13 +129,28 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
return command
}
func getRefreshType(refresh bool, hardRefresh bool) *string {
if hardRefresh {
refreshType := string(argoappv1.RefreshTypeHard)
return &refreshType
}
if refresh {
refreshType := string(argoappv1.RefreshTypeNormal)
return &refreshType
}
return nil
}
// NewApplicationGetCommand returns a new instance of an `argocd app get` command
func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
refresh bool
hardRefresh bool
output string
showParams bool
showOperation bool
refresh bool
)
var command = &cobra.Command{
Use: "get APPNAME",
@@ -144,7 +164,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
conn, appIf := acdClient.NewApplicationClientOrDie()
defer util.Close(conn)
appName := args[0]
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: refresh})
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)})
errors.CheckError(err)
switch output {
case "yaml":
@@ -188,6 +208,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
command.Flags().BoolVar(&showOperation, "show-operation", false, "Show application operation")
command.Flags().BoolVar(&showParams, "show-params", false, "Show application parameters and overrides")
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
return command
}
@@ -400,6 +421,10 @@ func setKsonnetOpt(src *argoappv1.ApplicationSource, env *string) {
}
if env != nil {
src.Ksonnet.Environment = *env
src.Environment = *env
}
if src.Ksonnet.IsZero() {
src.Ksonnet = nil
}
}
@@ -410,6 +435,9 @@ func setKustomizeOpt(src *argoappv1.ApplicationSource, namePrefix *string) {
if namePrefix != nil {
src.Kustomize.NamePrefix = *namePrefix
}
if src.Kustomize.IsZero() {
src.Kustomize = nil
}
}
func setHelmOpt(src *argoappv1.ApplicationSource, valueFiles []string) {
@@ -418,6 +446,10 @@ func setHelmOpt(src *argoappv1.ApplicationSource, valueFiles []string) {
}
if valueFiles != nil {
src.Helm.ValueFiles = valueFiles
src.ValuesFiles = valueFiles
}
if src.Helm.IsZero() {
src.Helm = nil
}
}
@@ -592,9 +624,7 @@ func getLocalObjects(app *argoappv1.Application, local string, env string, value
log.Fatal("--env option invalid when performing local diff on Kustomize application")
}
k := kustomize.NewKustomizeApp(local)
opts := kustomize.KustomizeBuildOpts{
Namespace: app.Namespace,
}
opts := kustomize.KustomizeBuildOpts{}
if app.Spec.Source.Kustomize != nil {
opts.NamePrefix = app.Spec.Source.Kustomize.NamePrefix
}
@@ -613,17 +643,47 @@ func getLocalObjects(app *argoappv1.Application, local string, env string, value
return localObjs
}
func groupLocalObjs(localObs []*unstructured.Unstructured, liveObjs []*unstructured.Unstructured, appNamespace string) map[kube.ResourceKey]*unstructured.Unstructured {
namespacedByGk := make(map[schema.GroupKind]bool)
for i := range liveObjs {
if liveObjs[i] != nil {
key := kube.GetResourceKey(liveObjs[i])
namespacedByGk[schema.GroupKind{Group: key.Group, Kind: key.Kind}] = key.Namespace != ""
}
}
objByKey := make(map[kube.ResourceKey]*unstructured.Unstructured)
for i := range localObs {
obj := localObs[i]
gk := obj.GroupVersionKind().GroupKind()
// Infer if obj is namespaced or not from corresponding live objects list. If corresponding live object has namespace then target object is also namespaced.
// If live object is missing then it does not matter if target is namespaced or not.
namespace := obj.GetNamespace()
if !namespacedByGk[gk] {
namespace = ""
} else {
if namespace == "" {
namespace = appNamespace
}
}
objByKey[kube.NewResourceKey(gk.Group, gk.Kind, namespace, obj.GetName())] = obj
}
return objByKey
}
// NewApplicationDiffCommand returns a new instance of an `argocd app diff` command
func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
refresh bool
local string
env string
values []string
refresh bool
hardRefresh bool
local string
env string
values []string
)
shortDesc := "Perform a diff against the target and live state."
var command = &cobra.Command{
Use: "diff APPNAME",
Short: "Perform a diff against the target and live state",
Short: shortDesc,
Long: shortDesc + "\nUses 'diff' to render the difference. KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff tool.",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
@@ -633,59 +693,147 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
appName := args[0]
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: refresh})
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)})
errors.CheckError(err)
resources, err := appIf.ManagedResources(context.Background(), &services.ResourcesQuery{ApplicationName: appName})
errors.CheckError(err)
liveObjs, err := liveObjects(resources.Items)
errors.CheckError(err)
targetObjs, err := targetObjects(resources.Items)
errors.CheckError(err)
localObjs := []*unstructured.Unstructured{}
items := make([]struct {
key kube.ResourceKey
live *unstructured.Unstructured
target *unstructured.Unstructured
}, 0)
if local != "" {
localObjs = getLocalObjects(app, local, env, values)
if len(resources.Items) != len(localObjs) {
log.Fatal("Local comparison doesn't have the same number of resources as the live resource")
localObjs := groupLocalObjs(getLocalObjects(app, local, env, values), liveObjs, app.Spec.Destination.Namespace)
for _, res := range resources.Items {
var live = &unstructured.Unstructured{}
err := json.Unmarshal([]byte(res.LiveState), &live)
errors.CheckError(err)
key := kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name)
if local, ok := localObjs[key]; ok || live != nil {
if local != nil {
// TODO(jessesuen): expose the configured app label key in settings and
// use configured label instead of default
err = kube.SetAppInstanceLabel(local, common.LabelKeyAppInstance, appName)
errors.CheckError(err)
}
items = append(items, struct {
key kube.ResourceKey
live *unstructured.Unstructured
target *unstructured.Unstructured
}{
live: live,
target: local,
key: key,
})
delete(localObjs, key)
}
}
for key, local := range localObjs {
items = append(items, struct {
key kube.ResourceKey
live *unstructured.Unstructured
target *unstructured.Unstructured
}{
live: nil,
target: local,
key: key,
})
}
} else {
for i := range resources.Items {
res := resources.Items[i]
var live = &unstructured.Unstructured{}
err := json.Unmarshal([]byte(res.LiveState), &live)
errors.CheckError(err)
var target = &unstructured.Unstructured{}
err = json.Unmarshal([]byte(res.TargetState), &target)
errors.CheckError(err)
items = append(items, struct {
key kube.ResourceKey
live *unstructured.Unstructured
target *unstructured.Unstructured
}{
live: live,
target: target,
key: kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name),
})
}
} else if env != "" || len(values) > 0 {
log.Fatal("--env option invalid when performing git diff")
}
for i := range resources.Items {
item := resources.Items[i]
// Diff is already available in ManagedResource Diff field but we have to recalculate diff again due to https://github.com/yudai/gojsondiff/issues/31
diffRes := diff.Diff(targetObjs[i], liveObjs[i])
fmt.Printf("===== %s %s ======\n", item.Kind, item.Name)
if local != "" {
liveObj, err := item.LiveObject()
errors.CheckError(err)
err = kube.SetAppInstanceLabel(localObjs[i], appName)
errors.CheckError(err)
diffRes = diff.Diff(localObjs[i], liveObj)
}
if diffRes.Modified {
formatOpts := formatter.AsciiFormatterConfig{
Coloring: terminal.IsTerminal(int(os.Stdout.Fd())),
for i := range items {
item := items[i]
// Diff is already available in ResourceDiff Diff field but we have to recalculate diff again due to https://github.com/yudai/gojsondiff/issues/31
diffRes := diff.Diff(item.target, item.live)
fmt.Printf("===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name)
if diffRes.Modified || item.target == nil || item.live == nil {
var live *unstructured.Unstructured
var target *unstructured.Unstructured
if item.target != nil && item.live != nil {
target = item.live
live = item.live.DeepCopy()
gojsondiff.New().ApplyPatch(live.Object, diffRes.Diff)
} else {
live = item.live
target = item.target
}
liveObj := liveObjs[i]
if liveObj == nil {
liveObj = &unstructured.Unstructured{Object: make(map[string]interface{})}
}
out, err := diffRes.ASCIIFormat(liveObj, formatOpts)
errors.CheckError(err)
fmt.Println(out)
printDiff(item.key.Name, live, target)
}
}
},
}
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
command.Flags().StringVar(&local, "local", "", "Compare live app to a local ksonnet app")
command.Flags().StringVar(&env, "env", "", "Compare live app to a specific environment")
command.Flags().StringArrayVar(&values, "values", []string{}, "Helm values file(s) in the helm directory to use")
return command
}
func printDiff(name string, live *unstructured.Unstructured, target *unstructured.Unstructured) {
tempDir, err := ioutil.TempDir("", "argocd-diff")
errors.CheckError(err)
targetFile := path.Join(tempDir, fmt.Sprintf("%s", name))
targetData := []byte("")
if target != nil {
targetData, err = yaml.Marshal(target)
errors.CheckError(err)
}
err = ioutil.WriteFile(targetFile, targetData, 0644)
errors.CheckError(err)
liveFile := path.Join(tempDir, fmt.Sprintf("%s-live.yaml", name))
liveData := []byte("")
if live != nil {
liveData, err = yaml.Marshal(live)
errors.CheckError(err)
}
err = ioutil.WriteFile(liveFile, liveData, 0644)
errors.CheckError(err)
cmdBinary := "diff"
var args []string
if envDiff := os.Getenv("KUBECTL_EXTERNAL_DIFF"); envDiff != "" {
parts, err := shlex.Split(envDiff)
errors.CheckError(err)
cmdBinary = parts[0]
args = parts[1:]
}
cmd := exec.Command(cmdBinary, append(args, liveFile, targetFile)...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
_ = cmd.Run()
}
// NewApplicationDeleteCommand returns a new instance of an `argocd app delete` command
func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
@@ -732,12 +880,12 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var fmtStr string
headers := []interface{}{"NAME", "CLUSTER", "NAMESPACE", "PROJECT", "STATUS", "HEALTH", "CONDITIONS"}
headers := []interface{}{"NAME", "CLUSTER", "NAMESPACE", "PROJECT", "STATUS", "HEALTH", "SYNCPOLICY", "CONDITIONS"}
if output == "wide" {
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
headers = append(headers, "REPO", "PATH", "TARGET")
} else {
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
}
fmt.Fprintf(w, fmtStr, headers...)
for _, app := range apps.Items {
@@ -748,6 +896,7 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
app.Spec.GetProject(),
app.Status.Sync.Status,
app.Status.Health.Status,
formatSyncPolicy(app),
formatConditionsSummary(app),
}
if output == "wide" {
@@ -762,6 +911,17 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
return command
}
func formatSyncPolicy(app argoappv1.Application) string {
if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil {
return "<none>"
}
policy := "Auto"
if app.Spec.SyncPolicy.Automated.Prune {
policy = policy + "-Prune"
}
return policy
}
func formatConditionsSummary(app argoappv1.Application) string {
typeToCnt := make(map[string]int)
for i := range app.Status.Conditions {
@@ -1074,7 +1234,8 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
var err error
if refresh {
conn, appClient := acdClient.NewApplicationClientOrDie()
app, err = appClient.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: true})
refreshType := string(argoappv1.RefreshTypeNormal)
app, err = appClient.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: &refreshType})
errors.CheckError(err)
_ = conn.Close()
}

View File

@@ -234,7 +234,7 @@ func oauth2Login(ctx context.Context, port int, oauth2conf *oauth2.Config, provi
fmt.Fprintf(w, successPage)
completionChan <- ""
}
srv := &http.Server{Addr: ":" + strconv.Itoa(port)}
srv := &http.Server{Addr: "localhost:" + strconv.Itoa(port)}
http.HandleFunc("/auth/callback", callbackHandler)
// Redirect user to login & consent page to ask for permission for the scopes specified above.

View File

@@ -3,7 +3,7 @@ package common
// Default service addresses and URLS of Argo CD internal services
const (
// DefaultAppControllerServerAddr is the gRPC address of the Argo CD app controller server
DefaultAppControllerServerAddr = "application-controller:8083"
DefaultAppControllerServerAddr = "argocd-application-controller:8083"
// DefaultRepoServerAddr is the gRPC address of the Argo CD repo server
DefaultRepoServerAddr = "argocd-repo-server:8081"
// DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against
@@ -31,6 +31,10 @@ const (
AuthCookieName = "argocd.token"
// RevisionHistoryLimit is the max number of successful sync to keep in history
RevisionHistoryLimit = 10
// K8sClientConfigQPS controls the QPS to be used in K8s REST client configs
K8sClientConfigQPS = 25
// K8sClientConfigBurst controls the burst to be used in K8s REST client configs
K8sClientConfigBurst = 50
)
// Dex related constants
@@ -67,16 +71,9 @@ const (
AnnotationKeyHook = "argocd.argoproj.io/hook"
// AnnotationKeyHookDeletePolicy is the policy of deleting a hook
AnnotationKeyHookDeletePolicy = "argocd.argoproj.io/hook-delete-policy"
// AnnotationKeyRefresh is the annotation key in the application which is updated with an
// arbitrary value (i.e. timestamp) on a git event, to force the controller to wake up and
// re-evaluate the application
// AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed.
// Might take values 'normal'/'hard'. Value 'hard' means manifest cache and target cluster state cache should be invalidated before refresh.
AnnotationKeyRefresh = "argocd.argoproj.io/refresh"
// AnnotationKeyConnectionStatus contains connection state status
AnnotationKeyConnectionStatus = "argocd.argoproj.io/connection-status"
// AnnotationKeyConnectionMessage contains additional information about connection status
AnnotationKeyConnectionMessage = "argocd.argoproj.io/connection-message"
// AnnotationConnectionModifiedAt contains timestamp when connection state had been modified
AnnotationConnectionModifiedAt = "argocd.argoproj.io/connection-modified-at"
// AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application.
AnnotationKeyManagedBy = "managed-by"
// AnnotationValueManagedByArgoCD is a 'managed-by' annotation value for resources managed by Argo CD
@@ -95,6 +92,7 @@ const (
EnvVarSSODebug = "ARGOCD_SSO_DEBUG"
// EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server
EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG"
// EnvVarLegacyLabels is an environment variable to use the legacy 'applications.argoproj.io/app-name' label instead of 'app.kubernetes.io/instance'
EnvVarLegacyLabels = "ARGOCD_LEGACY_LABELS"
// EnvVarFakeInClusterConfig is an environment variable to fake an in-cluster RESTConfig using
// the current kubectl context (for development purposes)
EnvVarFakeInClusterConfig = "ARGOCD_FAKE_IN_CLUSTER"
)

View File

@@ -13,13 +13,14 @@ import (
"github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -31,11 +32,14 @@ import (
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/apis/core"
"github.com/argoproj/argo-cd/common"
statecache "github.com/argoproj/argo-cd/controller/cache"
"github.com/argoproj/argo-cd/controller/services"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
appinformers "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
"github.com/argoproj/argo-cd/pkg/client/informers/externalversions/application/v1alpha1"
applisters "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/argo"
@@ -53,23 +57,26 @@ const (
// ApplicationController is the controller for application resources.
type ApplicationController struct {
namespace string
kubeClientset kubernetes.Interface
kubectl kube.Kubectl
applicationClientset appclientset.Interface
auditLogger *argo.AuditLogger
appRefreshQueue workqueue.RateLimitingInterface
appOperationQueue workqueue.RateLimitingInterface
appInformer cache.SharedIndexInformer
appStateManager AppStateManager
stateCache statecache.LiveStateCache
statusRefreshTimeout time.Duration
repoClientset reposerver.Clientset
db db.ArgoDB
forceRefreshApps map[string]bool
forceRefreshAppsMutex *sync.Mutex
managedResources map[string][]managedResource
managedResourcesMutex *sync.Mutex
namespace string
kubeClientset kubernetes.Interface
kubectl kube.Kubectl
applicationClientset appclientset.Interface
auditLogger *argo.AuditLogger
appRefreshQueue workqueue.RateLimitingInterface
appOperationQueue workqueue.RateLimitingInterface
appInformer cache.SharedIndexInformer
projInformer cache.SharedIndexInformer
appStateManager AppStateManager
stateCache statecache.LiveStateCache
statusRefreshTimeout time.Duration
repoClientset reposerver.Clientset
db db.ArgoDB
settings *settings_util.ArgoCDSettings
settingsMgr *settings_util.SettingsManager
refreshRequestedApps map[string]bool
refreshRequestedAppsMutex *sync.Mutex
managedResources map[string][]managedResource
managedResourcesMutex *sync.Mutex
}
type ApplicationControllerConfig struct {
@@ -80,40 +87,48 @@ type ApplicationControllerConfig struct {
// NewApplicationController creates new instance of ApplicationController.
func NewApplicationController(
namespace string,
settingsMgr *settings_util.SettingsManager,
kubeClientset kubernetes.Interface,
applicationClientset appclientset.Interface,
repoClientset reposerver.Clientset,
appResyncPeriod time.Duration,
) *ApplicationController {
settingsMgr := settings_util.NewSettingsManager(kubeClientset, namespace)
) (*ApplicationController, error) {
db := db.NewDB(namespace, settingsMgr, kubeClientset)
settings, err := settingsMgr.GetSettings()
if err != nil {
return nil, err
}
kubectlCmd := kube.KubectlCmd{}
ctrl := ApplicationController{
namespace: namespace,
kubeClientset: kubeClientset,
kubectl: kubectlCmd,
applicationClientset: applicationClientset,
repoClientset: repoClientset,
appRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
appOperationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
db: db,
statusRefreshTimeout: appResyncPeriod,
forceRefreshApps: make(map[string]bool),
forceRefreshAppsMutex: &sync.Mutex{},
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "application-controller"),
managedResources: make(map[string][]managedResource),
managedResourcesMutex: &sync.Mutex{},
namespace: namespace,
kubeClientset: kubeClientset,
kubectl: kubectlCmd,
applicationClientset: applicationClientset,
repoClientset: repoClientset,
appRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
appOperationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
db: db,
statusRefreshTimeout: appResyncPeriod,
refreshRequestedApps: make(map[string]bool),
refreshRequestedAppsMutex: &sync.Mutex{},
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"),
managedResources: make(map[string][]managedResource),
managedResourcesMutex: &sync.Mutex{},
settingsMgr: settingsMgr,
settings: settings,
}
appInformer := ctrl.newApplicationInformer()
stateCache := statecache.NewLiveStateCache(db, appInformer, kubectlCmd, func(appName string) {
ctrl.forceAppRefresh(appName)
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{})
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settings, kubectlCmd, func(appName string) {
ctrl.requestAppRefresh(appName)
ctrl.appRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, appName))
})
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, stateCache)
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settings, stateCache, projInformer)
ctrl.appInformer = appInformer
ctrl.projInformer = projInformer
ctrl.appStateManager = appStateManager
ctrl.stateCache = stateCache
return &ctrl
return &ctrl, nil
}
func (ctrl *ApplicationController) getApp(name string) (*appv1.Application, error) {
@@ -251,7 +266,7 @@ func hideSecretData(target *unstructured.Unstructured, live *unstructured.Unstru
if obj == nil {
continue
}
diff.EncodeSecretStringData(obj)
diff.NormalizeSecret(obj)
if data, found, err := unstructured.NestedMap(obj.Object, "data"); found && err == nil {
for k := range data {
keys[k] = true
@@ -313,8 +328,10 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
defer ctrl.appRefreshQueue.ShutDown()
go ctrl.appInformer.Run(ctx.Done())
go ctrl.projInformer.Run(ctx.Done())
go ctrl.watchSettings(ctx)
if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced) {
if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced, ctrl.projInformer.HasSynced) {
log.Error("Timed out waiting for caches to sync")
return
}
@@ -342,7 +359,7 @@ func (ctrl *ApplicationController) CreateGRPC(tlsConfCustomizer tlsutil.ConfigCu
// generate TLS cert
hosts := []string{
"localhost",
"application-controller",
"argocd-application-controller",
}
cert, err := tlsutil.GenerateX509KeyPair(tlsutil.CertOptions{
Hosts: hosts,
@@ -374,18 +391,18 @@ func (ctrl *ApplicationController) CreateGRPC(tlsConfCustomizer tlsutil.ConfigCu
return server, nil
}
func (ctrl *ApplicationController) forceAppRefresh(appName string) {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
ctrl.forceRefreshApps[appName] = true
func (ctrl *ApplicationController) requestAppRefresh(appName string) {
ctrl.refreshRequestedAppsMutex.Lock()
defer ctrl.refreshRequestedAppsMutex.Unlock()
ctrl.refreshRequestedApps[appName] = true
}
func (ctrl *ApplicationController) isRefreshForced(appName string) bool {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
_, ok := ctrl.forceRefreshApps[appName]
func (ctrl *ApplicationController) isRefreshRequested(appName string) bool {
ctrl.refreshRequestedAppsMutex.Lock()
defer ctrl.refreshRequestedAppsMutex.Unlock()
_, ok := ctrl.refreshRequestedApps[appName]
if ok {
delete(ctrl.forceRefreshApps, appName)
delete(ctrl.refreshRequestedApps, appName)
}
return ok
}
@@ -572,7 +589,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
if state.Phase.Completed() {
// if we just completed an operation, force a refresh so that UI will report up-to-date
// sync/health information
ctrl.forceAppRefresh(app.ObjectMeta.Name)
ctrl.requestAppRefresh(app.ObjectMeta.Name)
}
}
@@ -662,9 +679,16 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
log.Warnf("Key '%s' in index is not an application", appKey)
return
}
if !ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout) {
needRefresh, refreshType := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout)
if !needRefresh {
return
}
startTime := time.Now()
defer func() {
logCtx := log.WithFields(log.Fields{"application": origApp.Name})
logCtx.Infof("Reconciliation completed in %v", time.Now().Sub(startTime))
}()
// NOTE: normalization returns a copy
app := ctrl.normalizeApplication(origApp)
@@ -677,7 +701,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
return
}
compareResult, err := ctrl.appStateManager.CompareAppState(app, "", nil)
compareResult, err := ctrl.appStateManager.CompareAppState(app, "", nil, refreshType == appv1.RefreshTypeHard)
if err != nil {
conditions = append(conditions, appv1.ApplicationCondition{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()})
} else {
@@ -701,29 +725,35 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
// needRefreshAppStatus answers if application status needs to be refreshed.
// Returns true if application never been compared, has changed or comparison result has expired.
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) bool {
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType) {
logCtx := log.WithFields(log.Fields{"application": app.Name})
var reason string
refreshType := appv1.RefreshTypeNormal
expired := app.Status.ObservedAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
if ctrl.isRefreshForced(app.Name) {
reason = "force refresh"
if requestedType, ok := app.IsRefreshRequested(); ok {
refreshType = requestedType
reason = fmt.Sprintf("%s refresh requested", refreshType)
} else if ctrl.isRefreshRequested(app.Name) {
reason = fmt.Sprintf("controller refresh requested")
} else if app.Status.Sync.Status == appv1.SyncStatusCodeUnknown && expired {
reason = "comparison status unknown"
} else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo) {
} else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) {
reason = "spec.source differs"
} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
reason = "spec.source differs"
} else if expired {
reason = fmt.Sprintf("comparison expired. observedAt: %v, expiry: %v", app.Status.ObservedAt, statusRefreshTimeout)
}
if reason != "" {
logCtx.Infof("Refreshing app status (%s)", reason)
return true
return true, refreshType
}
return false
return false, refreshType
}
func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) ([]appv1.ApplicationCondition, bool) {
conditions := make([]appv1.ApplicationCondition, 0)
proj, err := argo.GetAppProject(&app.Spec, ctrl.applicationClientset, ctrl.namespace)
proj, err := argo.GetAppProject(&app.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace)
if err != nil {
if errors.IsNotFound(err) {
conditions = append(conditions, appv1.ApplicationCondition{
@@ -808,7 +838,17 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
message := fmt.Sprintf("Updated health status: %s -> %s", orig.Status.Health.Status, newStatus.Health.Status)
ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message)
}
patch, modified, err := diff.CreateTwoWayMergePatch(&appv1.Application{Status: orig.Status}, &appv1.Application{Status: *newStatus}, appv1.Application{})
var newAnnotations map[string]string
if orig.GetAnnotations() != nil {
newAnnotations = make(map[string]string)
for k, v := range orig.GetAnnotations() {
newAnnotations[k] = v
}
delete(newAnnotations, common.AnnotationKeyRefresh)
}
patch, modified, err := diff.CreateTwoWayMergePatch(
&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: orig.GetAnnotations()}, Status: orig.Status},
&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: newAnnotations}, Status: *newStatus}, appv1.Application{})
if err != nil {
logCtx.Errorf("Error constructing app status patch: %v", err)
return
@@ -919,7 +959,7 @@ func (ctrl *ApplicationController) newApplicationInformer() cache.SharedIndexInf
if oldOK && newOK {
if toggledAutomatedSync(oldApp, newApp) {
log.WithField("application", newApp.Name).Info("Enabled automated sync")
ctrl.forceAppRefresh(newApp.Name)
ctrl.requestAppRefresh(newApp.Name)
}
}
ctrl.appRefreshQueue.Add(key)
@@ -955,3 +995,27 @@ func toggledAutomatedSync(old *appv1.Application, new *appv1.Application) bool {
// nothing changed
return false
}
func (ctrl *ApplicationController) watchSettings(ctx context.Context) {
updateCh := make(chan *settings_util.ArgoCDSettings, 1)
ctrl.settingsMgr.Subscribe(updateCh)
prevAppLabelKey := ctrl.settings.GetAppInstanceLabelKey()
done := false
for !done {
select {
case newSettings := <-updateCh:
newAppLabelKey := newSettings.GetAppInstanceLabelKey()
if prevAppLabelKey != newAppLabelKey {
ctrl.settings.AppInstanceLabelKey = newAppLabelKey
log.Infof("label key changed: %s -> %s", prevAppLabelKey, newAppLabelKey)
ctrl.stateCache.Invalidate()
prevAppLabelKey = newAppLabelKey
}
case <-ctx.Done():
done = true
}
}
log.Info("shutting down settings watch")
ctrl.settingsMgr.Unsubscribe(updateCh)
close(updateCh)
}

View File

@@ -1,6 +1,7 @@
package controller
import (
"context"
"encoding/json"
"strings"
"testing"
@@ -25,6 +26,7 @@ import (
mockrepoclient "github.com/argoproj/argo-cd/reposerver/repository/mocks"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
type fakeData struct {
@@ -63,14 +65,24 @@ func newFakeController(data *fakeData) *ApplicationController {
},
Data: nil,
}
ctrl := NewApplicationController(
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
ctrl, err := NewApplicationController(
test.FakeArgoCDNamespace,
fake.NewSimpleClientset(&clust, &cm, &secret),
settingsMgr,
kubeClient,
appclientset.NewSimpleClientset(data.apps...),
&mockRepoClientset,
time.Minute,
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go ctrl.projInformer.Run(ctx.Done())
cache.WaitForCacheSync(ctx.Done(), ctrl.projInformer.HasSynced)
if err != nil {
panic(err)
}
// Mock out call to GetManagedLiveObjs if fake data supplied
if data.managedLiveObjs != nil {
mockStateCache := mockstatecache.LiveStateCache{}

View File

@@ -4,25 +4,21 @@ import (
"context"
"fmt"
"sync"
"time"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
)
const (
watchResourcesRetryTimeout = 10 * time.Second
"github.com/argoproj/argo-cd/util/settings"
)
type LiveStateCache interface {
@@ -35,6 +31,8 @@ type LiveStateCache interface {
Run(ctx context.Context)
// Deletes specified resource from cluster.
Delete(server string, obj *unstructured.Unstructured) error
// Invalidate invalidates the entire cluster state cache
Invalidate()
}
func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isNamespaced bool) kube.ResourceKey {
@@ -48,7 +46,7 @@ func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isName
return key
}
func NewLiveStateCache(db db.ArgoDB, appInformer cache.SharedIndexInformer, kubectl kube.Kubectl, onAppUpdated func(appName string)) LiveStateCache {
func NewLiveStateCache(db db.ArgoDB, appInformer cache.SharedIndexInformer, settings *settings.ArgoCDSettings, kubectl kube.Kubectl, onAppUpdated func(appName string)) LiveStateCache {
return &liveStateCache{
appInformer: appInformer,
db: db,
@@ -56,6 +54,7 @@ func NewLiveStateCache(db db.ArgoDB, appInformer cache.SharedIndexInformer, kube
lock: &sync.Mutex{},
onAppUpdated: onAppUpdated,
kubectl: kubectl,
settings: settings,
}
}
@@ -66,10 +65,11 @@ type liveStateCache struct {
appInformer cache.SharedIndexInformer
onAppUpdated func(appName string)
kubectl kube.Kubectl
settings *settings.ArgoCDSettings
}
func (c *liveStateCache) processEvent(event watch.EventType, obj *unstructured.Unstructured, url string) error {
info, err := c.getCluster(url)
info, err := c.getSyncedCluster(url)
if err != nil {
return err
}
@@ -85,54 +85,58 @@ func (c *liveStateCache) removeCluster(server string) {
func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
c.lock.Lock()
defer c.lock.Unlock()
info, ok := c.clusters[server]
if !ok {
cluster, err := c.db.GetCluster(context.Background(), server)
if err != nil {
return nil, err
}
info = &clusterInfo{
apis: make(map[schema.GroupVersionKind]v1.APIResource),
apis: make(map[schema.GroupVersionKind]metav1.APIResource),
lock: &sync.Mutex{},
nodes: make(map[kube.ResourceKey]*node),
nsIndex: make(map[string]map[kube.ResourceKey]*node),
onAppUpdated: c.onAppUpdated,
kubectl: c.kubectl,
cluster: cluster,
syncTime: nil,
syncLock: &sync.Mutex{},
log: log.WithField("server", cluster.Server),
settings: c.settings,
}
c.clusters[cluster.Server] = info
disco, err := discovery.NewDiscoveryClientForConfig(cluster.RESTConfig())
if err != nil {
return nil, err
}
resources, err := disco.ServerResources()
if err != nil {
return nil, err
}
for _, r := range resources {
gv, err := schema.ParseGroupVersion(r.GroupVersion)
if err != nil {
gv = schema.GroupVersion{}
}
for i := range r.APIResources {
info.apis[gv.WithKind(r.APIResources[i].Kind)] = r.APIResources[i]
}
}
}
c.lock.Unlock()
err := info.ensureSynced()
return info, nil
}
func (c *liveStateCache) getSyncedCluster(server string) (*clusterInfo, error) {
info, err := c.getCluster(server)
if err != nil {
return nil, err
}
err = info.ensureSynced()
if err != nil {
return nil, err
}
return info, nil
}
func (c *liveStateCache) Invalidate() {
log.Info("invalidating live state cache")
c.lock.Lock()
defer c.lock.Unlock()
for _, clust := range c.clusters {
clust.lock.Lock()
clust.invalidate()
clust.lock.Unlock()
}
log.Info("live state cache invalidated")
}
func (c *liveStateCache) Delete(server string, obj *unstructured.Unstructured) error {
clusterInfo, err := c.getCluster(server)
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return err
}
@@ -140,7 +144,7 @@ func (c *liveStateCache) Delete(server string, obj *unstructured.Unstructured) e
}
func (c *liveStateCache) IsNamespaced(server string, gvk schema.GroupVersionKind) (bool, error) {
clusterInfo, err := c.getCluster(server)
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return false, err
}
@@ -148,7 +152,7 @@ func (c *liveStateCache) IsNamespaced(server string, gvk schema.GroupVersionKind
}
func (c *liveStateCache) GetChildren(server string, obj *unstructured.Unstructured) ([]appv1.ResourceNode, error) {
clusterInfo, err := c.getCluster(server)
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return nil, err
}
@@ -156,7 +160,7 @@ func (c *liveStateCache) GetChildren(server string, obj *unstructured.Unstructur
}
func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
clusterInfo, err := c.getCluster(a.Spec.Destination.Server)
clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server)
if err != nil {
return nil, err
}
@@ -220,11 +224,24 @@ func (c *liveStateCache) Run(ctx context.Context) {
}
}
c.appInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{AddFunc: onAppModified, DeleteFunc: onAppModified})
c.appInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: onAppModified,
UpdateFunc: func(oldObj, newObj interface{}) {
oldApp, oldOk := oldObj.(*appv1.Application)
newApp, newOk := newObj.(*appv1.Application)
if oldOk && newOk {
if oldApp.Spec.Destination.Server != newApp.Spec.Destination.Server {
onAppModified(oldObj)
onAppModified(newApp)
}
}
},
DeleteFunc: onAppModified,
})
return c.db.WatchClusters(ctx, clusterEventCallback)
}, "watch clusters", ctx, watchResourcesRetryTimeout)
}, "watch clusters", ctx, clusterRetryTimeout)
<-ctx.Done()
}
@@ -238,9 +255,14 @@ func (c *liveStateCache) watchClusterResources(ctx context.Context, item appv1.C
}
}()
config := item.RESTConfig()
watchStartTime := time.Now()
ch, err := c.kubectl.WatchResources(ctx, config, "")
ctx, cancel := context.WithCancel(ctx)
defer cancel()
knownCRDs, err := getCRDs(config)
if err != nil {
return err
}
ch, err := c.kubectl.WatchResources(ctx, config, "")
if err != nil {
return err
}
@@ -248,10 +270,16 @@ func (c *liveStateCache) watchClusterResources(ctx context.Context, item appv1.C
eventObj := event.Object.(*unstructured.Unstructured)
if kube.IsCRD(eventObj) {
// restart if new CRD has been created after watch started
if event.Type == watch.Added && watchStartTime.Before(eventObj.GetCreationTimestamp().Time) {
return fmt.Errorf("Restarting the watch because a new CRD was added.")
if event.Type == watch.Added {
if !knownCRDs[eventObj.GetName()] {
c.removeCluster(item.Server)
return fmt.Errorf("Restarting the watch because a new CRD %s was added", eventObj.GetName())
} else {
log.Infof("CRD %s updated", eventObj.GetName())
}
} else if event.Type == watch.Deleted {
return fmt.Errorf("Restarting the watch because a CRD was deleted.")
c.removeCluster(item.Server)
return fmt.Errorf("Restarting the watch because CRD %s was deleted", eventObj.GetName())
}
}
err = c.processEvent(event.Type, eventObj, item.Server)
@@ -260,5 +288,20 @@ func (c *liveStateCache) watchClusterResources(ctx context.Context, item appv1.C
}
}
return fmt.Errorf("resource updates channel has closed")
}, fmt.Sprintf("watch app resources on %s", item.Server), ctx, watchResourcesRetryTimeout)
}, fmt.Sprintf("watch app resources on %s", item.Server), ctx, clusterRetryTimeout)
}
// getCRDs returns a map of crds
func getCRDs(config *rest.Config) (map[string]bool, error) {
crdsByName := make(map[string]bool)
apiextensionsClientset := apiextensionsclient.NewForConfigOrDie(config)
crds, err := apiextensionsClientset.ApiextensionsV1beta1().CustomResourceDefinitions().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, crd := range crds.Items {
crdsByName[crd.Name] = true
}
// TODO: support api service, like ServiceCatalog
return crdsByName, nil
}

View File

@@ -1,39 +1,46 @@
package cache
import (
"fmt"
"runtime/debug"
"sync"
"time"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
const (
clusterSyncTimeout = 1 * time.Hour
clusterSyncTimeout = 1 * time.Hour
clusterRetryTimeout = 10 * time.Second
)
type clusterInfo struct {
apis map[schema.GroupVersionKind]metav1.APIResource
nodes map[kube.ResourceKey]*node
nsIndex map[string]map[kube.ResourceKey]*node
lock *sync.Mutex
onAppUpdated func(appName string)
kubectl kube.Kubectl
cluster *appv1.Cluster
syncLock *sync.Mutex
syncTime *time.Time
syncError error
log *log.Entry
settings *settings.ArgoCDSettings
}
func createObjInfo(un *unstructured.Unstructured) *node {
func createObjInfo(un *unstructured.Unstructured, appInstanceLabel string) *node {
ownerRefs := un.GetOwnerReferences()
// Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed
if un.GroupVersionKind().Group == "" && un.GetKind() == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0 {
@@ -52,11 +59,9 @@ func createObjInfo(un *unstructured.Unstructured) *node {
Namespace: un.GetNamespace(),
},
ownerRefs: ownerRefs,
parents: make(map[kube.ResourceKey]*node),
children: make(map[kube.ResourceKey]*node),
tags: getTags(un),
info: getNodeInfo(un),
}
appName := kube.GetAppInstanceLabel(un)
appName := kube.GetAppInstanceLabel(un, appInstanceLabel)
if len(ownerRefs) == 0 && appName != "" {
info.appName = appName
info.resource = un
@@ -64,20 +69,68 @@ func createObjInfo(un *unstructured.Unstructured) *node {
return info
}
func (c *clusterInfo) synced() bool {
return c.syncTime != nil && time.Now().Before(c.syncTime.Add(clusterSyncTimeout))
func (c *clusterInfo) setNode(n *node) {
key := n.resourceKey()
c.nodes[key] = n
ns, ok := c.nsIndex[key.Namespace]
if !ok {
ns = make(map[kube.ResourceKey]*node)
c.nsIndex[key.Namespace] = ns
}
ns[key] = n
}
func (c *clusterInfo) ensureSynced() error {
if c.synced() {
return nil
func (c *clusterInfo) removeNode(key kube.ResourceKey) {
delete(c.nodes, key)
if ns, ok := c.nsIndex[key.Namespace]; ok {
delete(ns, key)
if len(ns) == 0 {
delete(c.nsIndex, key.Namespace)
}
}
c.syncLock.Lock()
defer c.syncLock.Unlock()
if c.synced() {
return nil
}
func (c *clusterInfo) invalidate() {
c.syncTime = nil
}
func (c *clusterInfo) synced() bool {
if c.syncTime == nil {
return false
}
if c.syncError != nil {
return time.Now().Before(c.syncTime.Add(clusterRetryTimeout))
}
return time.Now().Before(c.syncTime.Add(clusterSyncTimeout))
}
func (c *clusterInfo) sync() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
}()
c.log.Info("Start syncing cluster")
clusterResources, err := c.kubectl.GetAPIResources(c.cluster.RESTConfig())
if err != nil {
if len(clusterResources) == 0 {
return err
}
log.Warnf("Partial success when getting API resources during sync: %v", err)
}
c.apis = make(map[schema.GroupVersionKind]metav1.APIResource)
for _, r := range clusterResources {
gv, err := schema.ParseGroupVersion(r.GroupVersion)
if err != nil {
gv = schema.GroupVersion{}
}
for i := range r.APIResources {
c.apis[gv.WithKind(r.APIResources[i].Kind)] = r.APIResources[i]
}
}
c.nodes = make(map[kube.ResourceKey]*node)
resources, err := c.kubectl.GetResources(c.cluster.RESTConfig(), "")
if err != nil {
@@ -85,32 +138,42 @@ func (c *clusterInfo) ensureSynced() error {
return err
}
appLabelKey := c.settings.GetAppInstanceLabelKey()
for i := range resources {
c.nodes[kube.GetResourceKey(resources[i])] = createObjInfo(resources[i])
c.setNode(createObjInfo(resources[i], appLabelKey))
}
nodes := make(map[kube.ResourceKey]*node)
for k, v := range c.nodes {
nodes[k] = v
}
for _, obj := range c.nodes {
if len(obj.ownerRefs) == 0 {
obj.fillChildren(nodes)
}
}
resyncTime := time.Now()
c.syncTime = &resyncTime
c.log.Info("Cluster successfully synced")
return nil
}
func (c *clusterInfo) ensureSynced() error {
if c.synced() {
return c.syncError
}
c.syncLock.Lock()
defer c.syncLock.Unlock()
if c.synced() {
return c.syncError
}
err := c.sync()
syncTime := time.Now()
c.syncTime = &syncTime
c.syncError = err
return c.syncError
}
func (c *clusterInfo) getChildren(obj *unstructured.Unstructured) []appv1.ResourceNode {
c.lock.Lock()
defer c.lock.Unlock()
children := make([]appv1.ResourceNode, 0)
if objInfo, ok := c.nodes[kube.GetResourceKey(obj)]; ok {
for _, child := range objInfo.children {
children = append(children, child.childResourceNodes())
nsNodes := c.nsIndex[obj.GetNamespace()]
for _, child := range nsNodes {
if objInfo.isParentOf(child) {
children = append(children, child.childResourceNodes(nsNodes, map[kube.ResourceKey]bool{objInfo.resourceKey(): true}))
}
}
}
return children
@@ -128,11 +191,14 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
defer c.lock.Unlock()
managedObjs := make(map[kube.ResourceKey]*unstructured.Unstructured)
// iterate all objects in live state cache to find ones associated with app
for key, o := range c.nodes {
if o.appName == a.Name && o.resource != nil && len(o.parents) == 0 {
if o.appName == a.Name && o.resource != nil && len(o.ownerRefs) == 0 {
managedObjs[key] = o.resource
}
}
// iterate target objects and identify ones that already exist in the cluster,\
// but are simply missing our label
lock := &sync.Mutex{}
err := util.RunAllAsync(len(targetObjs), func(i int) error {
targetObj := targetObjs[i]
@@ -148,8 +214,11 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
} else {
var err error
managedObj, err = c.kubectl.GetResource(c.cluster.RESTConfig(), targetObj.GroupVersionKind(), existingObj.ref.Name, existingObj.ref.Namespace)
err = c.handleError(targetObj.GroupVersionKind(), existingObj.ref.Namespace, existingObj.ref.Name, err)
if err != nil && !errors.IsNotFound(err) {
if err != nil {
if errors.IsNotFound(err) {
c.checkAndInvalidateStaleCache(targetObj.GroupVersionKind(), existingObj.ref.Namespace, existingObj.ref.Name)
return nil
}
return err
}
}
@@ -174,99 +243,79 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
return managedObjs, nil
}
func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion {
gv, err := schema.ParseGroupVersion(ownerRef.APIVersion)
if err != nil {
gv = schema.GroupVersion{}
}
return gv
}
func (c *clusterInfo) delete(obj *unstructured.Unstructured) error {
err := c.kubectl.DeleteResource(c.cluster.RESTConfig(), obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), false)
err = c.handleError(obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err)
if err != nil && errors.IsNotFound(err) {
err = nil
// a delete request came in for an object which does not exist. it's possible that our cache
// is stale. Check and invalidate if it is
c.lock.Lock()
c.checkAndInvalidateStaleCache(obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
c.lock.Unlock()
return nil
}
return err
}
func (c *clusterInfo) handleError(gvk schema.GroupVersionKind, namespace string, name string, err error) error {
if err != nil && errors.IsNotFound(err) {
c.lock.Lock()
defer c.lock.Unlock()
if _, ok := c.nodes[kube.NewResourceKey(gvk.Group, gvk.Kind, namespace, name)]; ok {
if c.syncTime != nil {
c.log.Warn("Dropped stale cache")
c.syncTime = nil
}
// checkAndInvalidateStaleCache checks if our cache is stale and invalidate it based on error
// should be called whenever we suspect our cache is stale
func (c *clusterInfo) checkAndInvalidateStaleCache(gvk schema.GroupVersionKind, namespace string, name string) {
if _, ok := c.nodes[kube.NewResourceKey(gvk.Group, gvk.Kind, namespace, name)]; ok {
if c.syncTime != nil {
c.log.Warnf("invalidated stale cache due to mismatch of %s, %s/%s", gvk, namespace, name)
c.invalidate()
}
}
return err
}
func (c *clusterInfo) processEvent(event watch.EventType, un *unstructured.Unstructured) error {
c.lock.Lock()
defer c.lock.Unlock()
obj, exists := c.nodes[kube.GetResourceKey(un)]
if exists && event == watch.Deleted {
for i := range obj.parents {
delete(obj.parents[i].children, obj.resourceKey())
}
for i := range obj.children {
delete(obj.children[i].parents, obj.resourceKey())
}
delete(c.nodes, kube.GetResourceKey(un))
if obj.appName != "" {
c.onAppUpdated(obj.appName)
}
} else if !exists && event != watch.Deleted {
newObj := createObjInfo(un)
c.nodes[newObj.resourceKey()] = newObj
if len(newObj.ownerRefs) > 0 {
sameNamespace := make(map[kube.ResourceKey]*node)
for k := range c.nodes {
if c.nodes[k].ref.Namespace == un.GetNamespace() {
sameNamespace[k] = c.nodes[k]
}
}
for _, ownerRef := range newObj.ownerRefs {
if owner, ok := sameNamespace[kube.NewResourceKey(ownerRefGV(ownerRef).Group, ownerRef.Kind, un.GetNamespace(), ownerRef.Name)]; ok {
owner.fillChildren(sameNamespace)
}
key := kube.GetResourceKey(un)
existingNode, exists := c.nodes[key]
if event == watch.Deleted {
if exists {
c.removeNode(key)
if existingNode.appName != "" {
c.onAppUpdated(existingNode.appName)
}
}
if newObj.appName != "" {
c.onAppUpdated(newObj.appName)
}
} else if exists {
obj.resourceVersion = un.GetResourceVersion()
toNotify := make([]string, 0)
if obj.appName != "" {
toNotify = append(toNotify, obj.appName)
} else if event != watch.Deleted {
nodes := make([]*node, 0)
if exists {
nodes = append(nodes, existingNode)
}
newObj := createObjInfo(un, c.settings.GetAppInstanceLabelKey())
c.setNode(newObj)
nodes = append(nodes, newObj)
if len(obj.ownerRefs) == 0 {
newAppName := kube.GetAppInstanceLabel(un)
if newAppName != obj.appName {
obj.setAppName(newAppName)
if newAppName != "" {
toNotify = append(toNotify, newAppName)
toNotify := make(map[string]bool)
for i := range nodes {
n := nodes[i]
if ns, ok := c.nsIndex[n.ref.Namespace]; ok {
app := n.getApp(ns)
if app == "" || skipAppRequeing(key) {
continue
}
toNotify[app] = true
}
}
if len(obj.parents) == 0 && obj.appName != "" {
obj.resource = un
} else {
obj.resource = nil
}
obj.tags = getTags(un)
for _, name := range toNotify {
for name := range toNotify {
c.onAppUpdated(name)
}
}
return nil
}
var (
ignoredRefreshResources = map[string]bool{
"/" + kube.EndpointsKind: true,
}
)
// skipAppRequeing checks if the object is an API type which we want to skip requeuing against.
// We ignore API types which have a high churn rate, and/or whose updates are irrelevant to the app
func skipAppRequeing(key kube.ResourceKey) bool {
return ignoredRefreshResources[key.Group+"/"+key.Kind]
}

View File

@@ -6,20 +6,19 @@ import (
"sync"
"testing"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/kube/kubetest"
"github.com/argoproj/argo-cd/util/settings"
log "github.com/sirupsen/logrus"
)
@@ -81,11 +80,13 @@ func newCluster(resources ...*unstructured.Unstructured) *clusterInfo {
kubectl: kubetest.MockKubectlCmd{
Resources: resources,
},
nsIndex: make(map[string]map[kube.ResourceKey]*node),
cluster: &appv1.Cluster{},
syncTime: nil,
syncLock: &sync.Mutex{},
apis: make(map[schema.GroupVersionKind]v1.APIResource),
apis: make(map[schema.GroupVersionKind]metav1.APIResource),
log: log.WithField("cluster", "test"),
settings: &settings.ArgoCDSettings{},
}
}
@@ -101,7 +102,7 @@ func TestGetChildren(t *testing.T) {
Name: "helm-guestbook-pod",
Group: "",
Version: "v1",
Tags: []string{"0/0"},
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Children: make([]appv1.ResourceNode, 0),
ResourceVersion: "123",
}}, rsChildren)
@@ -115,7 +116,7 @@ func TestGetChildren(t *testing.T) {
Version: "v1beta1",
ResourceVersion: "123",
Children: rsChildren,
Tags: []string{},
Info: []appv1.InfoItem{},
}}, deployChildren)
}
@@ -133,7 +134,7 @@ metadata:
app: helm-guestbook`)
managedObjs, err := cluster.getManagedLiveObjs(&appv1.Application{
ObjectMeta: v1.ObjectMeta{Name: "helm-guestbook"},
ObjectMeta: metav1.ObjectMeta{Name: "helm-guestbook"},
Spec: appv1.ApplicationSpec{
Destination: appv1.ApplicationDestination{
Namespace: "default",
@@ -188,7 +189,7 @@ func TestProcessNewChildEvent(t *testing.T) {
Name: "helm-guestbook-pod",
Group: "",
Version: "v1",
Tags: []string{"0/0"},
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Children: make([]appv1.ResourceNode, 0),
ResourceVersion: "123",
}, {
@@ -197,7 +198,7 @@ func TestProcessNewChildEvent(t *testing.T) {
Name: "helm-guestbook-pod2",
Group: "",
Version: "v1",
Tags: []string{"0/0"},
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Children: make([]appv1.ResourceNode, 0),
ResourceVersion: "123",
}}, rsChildren)
@@ -205,8 +206,8 @@ func TestProcessNewChildEvent(t *testing.T) {
func TestUpdateResourceTags(t *testing.T) {
pod := &corev1.Pod{
TypeMeta: v1.TypeMeta{Kind: "Pod", APIVersion: "v1"},
ObjectMeta: v1.ObjectMeta{Name: "testPod", Namespace: "default"},
TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "default"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "test",
@@ -222,7 +223,7 @@ func TestUpdateResourceTags(t *testing.T) {
podNode := cluster.nodes[kube.GetResourceKey(mustToUnstructured(pod))]
assert.NotNil(t, podNode)
assert.Equal(t, []string{"0/1"}, podNode.tags)
assert.Equal(t, []appv1.InfoItem{{Name: "Containers", Value: "0/1"}}, podNode.info)
pod.Status = corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
@@ -239,7 +240,7 @@ func TestUpdateResourceTags(t *testing.T) {
podNode = cluster.nodes[kube.GetResourceKey(mustToUnstructured(pod))]
assert.NotNil(t, podNode)
assert.Equal(t, []string{"ExitCode:-1", "0/1"}, podNode.tags)
assert.Equal(t, []appv1.InfoItem{{Name: "Status Reason", Value: "ExitCode:-1"}, {Name: "Containers", Value: "0/1"}}, podNode.info)
}
func TestUpdateAppResource(t *testing.T) {
@@ -258,27 +259,18 @@ func TestUpdateAppResource(t *testing.T) {
assert.Equal(t, []string{"helm-guestbook"}, updatesReceived)
}
func TestUpdateRootAppResource(t *testing.T) {
updatesReceived := make([]string, 0)
cluster := newCluster(testPod, testRS, testDeploy)
cluster.onAppUpdated = func(appName string) {
updatesReceived = append(updatesReceived, appName)
}
func TestCircularReference(t *testing.T) {
dep := testDeploy.DeepCopy()
dep.SetOwnerReferences([]metav1.OwnerReference{{
Name: testPod.GetName(),
Kind: testPod.GetKind(),
APIVersion: testPod.GetAPIVersion(),
}})
cluster := newCluster(testPod, testRS, dep)
err := cluster.ensureSynced()
assert.Nil(t, err)
for k := range cluster.nodes {
assert.Equal(t, "helm-guestbook", cluster.nodes[k].appName)
}
updatedDeploy := testDeploy.DeepCopy()
updatedDeploy.SetLabels(map[string]string{common.LabelKeyAppInstance: "helm-guestbook2"})
err = cluster.processEvent(watch.Modified, updatedDeploy)
assert.Nil(t, err)
assert.Equal(t, []string{"helm-guestbook", "helm-guestbook2"}, updatesReceived)
for k := range cluster.nodes {
assert.Equal(t, "helm-guestbook2", cluster.nodes[k].appName)
}
children := cluster.getChildren(dep)
assert.Len(t, children, 1)
}

View File

@@ -3,6 +3,8 @@ package cache
import (
"fmt"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -10,20 +12,20 @@ import (
k8snode "k8s.io/kubernetes/pkg/util/node"
)
func getTags(un *unstructured.Unstructured) []string {
func getNodeInfo(un *unstructured.Unstructured) []v1alpha1.InfoItem {
gvk := un.GroupVersionKind()
if gvk.Kind == kube.PodKind && gvk.Group == "" {
return getPodTags(un)
return getPodInfo(un)
}
return []string{}
return []v1alpha1.InfoItem{}
}
func getPodTags(un *unstructured.Unstructured) []string {
func getPodInfo(un *unstructured.Unstructured) []v1alpha1.InfoItem {
pod := v1.Pod{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
if err != nil {
return []string{}
return []v1alpha1.InfoItem{}
}
restarts := 0
totalContainers := len(pod.Spec.Containers)
@@ -97,9 +99,9 @@ func getPodTags(un *unstructured.Unstructured) []string {
reason = "Terminating"
}
tags := make([]string, 0)
info := make([]v1alpha1.InfoItem, 0)
if reason != "" {
tags = append(tags, reason)
info = append(info, v1alpha1.InfoItem{Name: "Status Reason", Value: reason})
}
return append(tags, fmt.Sprintf("%d/%d", readyContainers, totalContainers))
return append(info, v1alpha1.InfoItem{Name: "Containers", Value: fmt.Sprintf("%d/%d", readyContainers, totalContainers)})
}

View File

@@ -74,6 +74,11 @@ func (_m *LiveStateCache) GetManagedLiveObjs(a *v1alpha1.Application, targetObjs
return r0, r1
}
// Invalidate provides a mock function with given fields:
func (_m *LiveStateCache) Invalidate() {
_m.Called()
}
// IsNamespaced provides a mock function with given fields: server, gvk
func (_m *LiveStateCache) IsNamespaced(server string, gvk schema.GroupVersionKind) (bool, error) {
ret := _m.Called(server, gvk)

View File

@@ -1,8 +1,11 @@
package cache
import (
log "github.com/sirupsen/logrus"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -13,9 +16,7 @@ type node struct {
resourceVersion string
ref v1.ObjectReference
ownerRefs []metav1.OwnerReference
children map[kube.ResourceKey]*node
parents map[kube.ResourceKey]*node
tags []string
info []appv1.InfoItem
appName string
resource *unstructured.Unstructured
}
@@ -25,9 +26,6 @@ func (n *node) resourceKey() kube.ResourceKey {
}
func (n *node) isParentOf(child *node) bool {
if n.ref.Namespace != child.ref.Namespace {
return false
}
ownerGvk := n.ref.GroupVersionKind()
for _, ownerRef := range child.ownerRefs {
if kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name) == n.resourceKey() {
@@ -38,29 +36,52 @@ func (n *node) isParentOf(child *node) bool {
return false
}
func (n *node) setAppName(appName string) {
n.appName = appName
for i := range n.children {
n.children[i].setAppName(appName)
func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion {
gv, err := schema.ParseGroupVersion(ownerRef.APIVersion)
if err != nil {
gv = schema.GroupVersion{}
}
return gv
}
func (n *node) fillChildren(nodes map[kube.ResourceKey]*node) {
for k, child := range nodes {
if n.isParentOf(child) {
delete(nodes, k)
child.appName = n.appName
child.parents[n.resourceKey()] = n
n.children[child.resourceKey()] = child
child.fillChildren(nodes)
func (n *node) getApp(ns map[kube.ResourceKey]*node) string {
if n.appName != "" {
return n.appName
}
for _, ownerRef := range n.ownerRefs {
gv := ownerRefGV(ownerRef)
if parent, ok := ns[kube.NewResourceKey(gv.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name)]; ok {
app := parent.getApp(ns)
if app != "" {
return app
}
}
}
return ""
}
func (n *node) childResourceNodes() appv1.ResourceNode {
func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey) map[kube.ResourceKey]bool {
newSet := make(map[kube.ResourceKey]bool)
for k, v := range set {
newSet[k] = v
}
for i := range keys {
newSet[keys[i]] = true
}
return newSet
}
func (n *node) childResourceNodes(ns map[kube.ResourceKey]*node, parents map[kube.ResourceKey]bool) appv1.ResourceNode {
children := make([]appv1.ResourceNode, 0)
for i := range n.children {
children = append(children, n.children[i].childResourceNodes())
for childKey := range ns {
if n.isParentOf(ns[childKey]) {
if parents[childKey] {
key := n.resourceKey()
log.Warnf("Circular dependency detected. %s is child and parent of %s", childKey.String(), key.String())
} else {
children = append(children, ns[childKey].childResourceNodes(ns, newResourceKeySet(parents, n.resourceKey())))
}
}
}
gv, err := schema.ParseGroupVersion(n.ref.APIVersion)
if err != nil {
@@ -72,7 +93,7 @@ func (n *node) childResourceNodes() appv1.ResourceNode {
Version: gv.Version,
Kind: n.ref.Kind,
Namespace: n.ref.Namespace,
Tags: n.tags,
Info: n.info,
Children: children,
ResourceVersion: n.resourceVersion,
}

View File

@@ -35,7 +35,7 @@ func (m *ResourcesQuery) Reset() { *m = ResourcesQuery{} }
func (m *ResourcesQuery) String() string { return proto.CompactTextString(m) }
func (*ResourcesQuery) ProtoMessage() {}
func (*ResourcesQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_9e06f70e07dd0483, []int{0}
return fileDescriptor_application_22f1e591d294b941, []int{0}
}
func (m *ResourcesQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -82,7 +82,7 @@ func (m *ResourceTreeResponse) Reset() { *m = ResourceTreeResponse{} }
func (m *ResourceTreeResponse) String() string { return proto.CompactTextString(m) }
func (*ResourceTreeResponse) ProtoMessage() {}
func (*ResourceTreeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_application_9e06f70e07dd0483, []int{1}
return fileDescriptor_application_22f1e591d294b941, []int{1}
}
func (m *ResourceTreeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -129,7 +129,7 @@ func (m *ManagedResourcesResponse) Reset() { *m = ManagedResourcesRespon
func (m *ManagedResourcesResponse) String() string { return proto.CompactTextString(m) }
func (*ManagedResourcesResponse) ProtoMessage() {}
func (*ManagedResourcesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_application_9e06f70e07dd0483, []int{2}
return fileDescriptor_application_22f1e591d294b941, []int{2}
}
func (m *ManagedResourcesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -784,10 +784,10 @@ var (
)
func init() {
proto.RegisterFile("controller/services/application.proto", fileDescriptor_application_9e06f70e07dd0483)
proto.RegisterFile("controller/services/application.proto", fileDescriptor_application_22f1e591d294b941)
}
var fileDescriptor_application_9e06f70e07dd0483 = []byte{
var fileDescriptor_application_22f1e591d294b941 = []byte{
// 337 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0xcd, 0x4a, 0x33, 0x31,
0x14, 0x6d, 0xbe, 0x0f, 0x05, 0xa3, 0xa8, 0x04, 0x17, 0xa5, 0x8b, 0x52, 0x06, 0x84, 0x6e, 0x4c,

View File

@@ -10,6 +10,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd/common"
statecache "github.com/argoproj/argo-cd/controller/cache"
@@ -24,6 +25,7 @@ import (
"github.com/argoproj/argo-cd/util/health"
hookutil "github.com/argoproj/argo-cd/util/hook"
kubeutil "github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
type managedResource struct {
@@ -48,7 +50,7 @@ func GetLiveObjs(res []managedResource) []*unstructured.Unstructured {
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) (*comparisonResult, error)
CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter, noCache bool) (*comparisonResult, error)
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
}
@@ -65,14 +67,16 @@ type comparisonResult struct {
// appStateManager allows to compare applications to git
type appStateManager struct {
db db.ArgoDB
settings *settings.ArgoCDSettings
appclientset appclientset.Interface
projInformer cache.SharedIndexInformer
kubectl kubeutil.Kubectl
repoClientset reposerver.Clientset
liveStateCache statecache.LiveStateCache
namespace string
}
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, []*unstructured.Unstructured, *repository.ManifestResponse, error) {
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, appLabelKey, revision string, overrides []v1alpha1.ComponentParameter, noCache bool) ([]*unstructured.Unstructured, []*unstructured.Unstructured, *repository.ManifestResponse, error) {
helmRepos, err := m.db.ListHelmRepos(context.Background())
if err != nil {
return nil, nil, nil, err
@@ -110,8 +114,10 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, revision string
Repo: repo,
HelmRepos: helmRepos,
Revision: revision,
NoCache: noCache,
ComponentParameterOverrides: mfReqOverrides,
AppLabel: app.Name,
AppLabelKey: appLabelKey,
AppLabelValue: app.Name,
Namespace: app.Spec.Destination.Namespace,
ApplicationSource: &app.Spec.Source,
})
@@ -138,27 +144,30 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, revision string
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied overrides. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) (*comparisonResult, error) {
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter, noCache bool) (*comparisonResult, error) {
logCtx := log.WithField("application", app.Name)
logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
observedAt := metav1.Now()
failedToLoadObjs := false
conditions := make([]v1alpha1.ApplicationCondition, 0)
targetObjs, hooks, manifestInfo, err := m.getRepoObjs(app, revision, overrides)
appLabelKey := m.settings.GetAppInstanceLabelKey()
targetObjs, hooks, manifestInfo, err := m.getRepoObjs(app, appLabelKey, revision, overrides, noCache)
if err != nil {
targetObjs = make([]*unstructured.Unstructured, 0)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
failedToLoadObjs = true
}
logCtx.Debugf("Generated config manifests")
liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)
if err != nil {
liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
failedToLoadObjs = true
}
logCtx.Debugf("Retrieved lived manifests")
for _, liveObj := range liveObjByKey {
if liveObj != nil {
appInstanceName := kubeutil.GetAppInstanceLabel(liveObj)
appInstanceName := kubeutil.GetAppInstanceLabel(liveObj, appLabelKey)
if appInstanceName != "" && appInstanceName != app.Name {
conditions = append(conditions, v1alpha1.ApplicationCondition{
Type: v1alpha1.ApplicationConditionSharedResourceWarning,
@@ -183,7 +192,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
managedLiveObj[i] = nil
}
}
logCtx.Debugf("built managed objects list")
// Everything remaining in liveObjByKey are "extra" resources that aren't tracked in git.
// The following adds all the extras to the managedLiveObj list and backfills the targetObj
// list with nils, so that the lists are of equal lengths for comparison purposes.
@@ -192,8 +201,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
managedLiveObj = append(managedLiveObj, obj)
}
log.Infof("Comparing app %s state in cluster %s (namespace: %s)", app.ObjectMeta.Name, app.Spec.Destination.Server, app.Spec.Destination.Namespace)
// Do the actual comparison
diffResults, err := diff.DiffArray(targetObjs, managedLiveObj)
if err != nil {
@@ -253,8 +260,11 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
syncCode = v1alpha1.SyncStatusCodeUnknown
}
syncStatus := v1alpha1.SyncStatus{
ComparedTo: app.Spec.Source,
Status: syncCode,
ComparedTo: appv1.ComparedTo{
Source: app.Spec.Source,
Destination: app.Spec.Destination,
},
Status: syncCode,
}
if manifestInfo != nil {
syncStatus.Revision = manifestInfo.Revision
@@ -286,14 +296,17 @@ func (m *appStateManager) getRepo(repoURL string) *v1alpha1.Repository {
return repo
}
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, overrides *[]v1alpha1.ComponentParameter) error {
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) error {
var nextID int64 = 0
if len(app.Status.History) > 0 {
nextID = app.Status.History[len(app.Status.History)-1].ID + 1
}
if overrides == nil {
overrides = app.Spec.Source.ComponentParameterOverrides
}
history := append(app.Status.History, v1alpha1.RevisionHistory{
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
ComponentParameterOverrides: overrides,
Revision: revision,
DeployedAt: metav1.NewTime(time.Now().UTC()),
ID: nextID,
@@ -322,7 +335,9 @@ func NewAppStateManager(
repoClientset reposerver.Clientset,
namespace string,
kubectl kubeutil.Kubectl,
settings *settings.ArgoCDSettings,
liveStateCache statecache.LiveStateCache,
projInformer cache.SharedIndexInformer,
) AppStateManager {
return &appStateManager{
liveStateCache: liveStateCache,
@@ -331,5 +346,7 @@ func NewAppStateManager(
kubectl: kubectl,
repoClientset: repoClientset,
namespace: namespace,
settings: settings,
projInformer: projInformer,
}
}

View File

@@ -28,7 +28,7 @@ func TestCompareAppStateEmpty(t *testing.T) {
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -51,7 +51,7 @@ func TestCompareAppStateMissing(t *testing.T) {
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -78,7 +78,7 @@ func TestCompareAppStateExtra(t *testing.T) {
},
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -105,7 +105,7 @@ func TestCompareAppStateHook(t *testing.T) {
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -133,7 +133,7 @@ func TestCompareAppStateExtraHook(t *testing.T) {
},
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", nil, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)

View File

@@ -15,6 +15,7 @@ import (
"k8s.io/client-go/rest"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/util/argo"
hookutil "github.com/argoproj/argo-cd/util/hook"
"github.com/argoproj/argo-cd/util/kube"
@@ -75,7 +76,7 @@ func (m *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
revision = syncOp.Revision
}
compareResult, err := m.CompareAppState(app, revision, overrides)
compareResult, err := m.CompareAppState(app, revision, overrides, false)
if err != nil {
state.Phase = appv1.OperationError
state.Message = err.Error()
@@ -120,7 +121,7 @@ func (m *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
return
}
proj, err := argo.GetAppProject(&app.Spec, m.appclientset, m.namespace)
proj, err := argo.GetAppProject(&app.Spec, v1alpha1.NewAppProjectLister(m.projInformer.GetIndexer()), m.namespace)
if err != nil {
state.Phase = appv1.OperationError
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
@@ -151,7 +152,7 @@ func (m *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
}
if !syncOp.DryRun && len(syncOp.Resources) == 0 && syncCtx.opState.Phase.Successful() {
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, nil)
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, overrides)
if err != nil {
state.Phase = appv1.OperationError
state.Message = fmt.Sprintf("failed to record sync to history: %v", err)
@@ -248,13 +249,19 @@ func (sc *syncContext) generateSyncTasks() ([]syncTask, bool) {
if resourceState.Target != nil {
targetObj = resourceState.Target.DeepCopy()
if targetObj.GetNamespace() == "" {
// If target object's namespace is empty, we set namespace in the object. We do
// this even though it might be a cluster-scoped resource. This prevents any
// possibility of the resource from unintentionally becoming created in the
// namespace during the `kubectl apply`
targetObj.SetNamespace(sc.namespace)
}
gvk := targetObj.GroupVersionKind()
serverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
if err != nil {
// Special case for custom resources: if custom resource definition is not supported by the cluster by defined in application then
// skip verification using `kubectl apply --dry-run` and since CRD should be created during app synchronization.
// Special case for custom resources: if CRD is not yet known by the K8s API server,
// skip verification during `kubectl apply --dry-run` since we expect the CRD
// to be created during app synchronization.
if apierr.IsNotFound(err) && hasCRDOfGroupKind(sc.compareResult.managedResources, gvk.Group, gvk.Kind) {
skipDryRun = true
} else {
@@ -267,6 +274,7 @@ func (sc *syncContext) generateSyncTasks() ([]syncTask, bool) {
Message: err.Error(),
Status: appv1.ResultCodeSyncFailed,
})
successful = false
}
} else {
if !sc.proj.IsResourcePermitted(metav1.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, serverRes.Namespaced) {
@@ -584,14 +592,12 @@ func (k *kindSorter) Less(i, j int) bool {
}
first, aok := k.ordering[a.GetKind()]
second, bok := k.ordering[b.GetKind()]
// if same kind (including unknown) sub sort alphanumeric
if first == second {
// if both are unknown and of different kind sort by kind alphabetically
if !aok && !bok && a.GetKind() != b.GetKind() {
return a.GetKind() < b.GetKind()
}
return a.GetName() < b.GetName()
// if both are unknown and of different kind sort by kind alphabetically
if !aok && !bok && a.GetKind() != b.GetKind() {
return a.GetKind() < b.GetKind()
}
// unknown kind is last
if !aok {
return false
@@ -599,6 +605,11 @@ func (k *kindSorter) Less(i, j int) bool {
if !bok {
return true
}
// if same kind (including unknown) sub sort alphanumeric
if first == second {
return a.GetName() < b.GetName()
}
// sort different kinds
return first < second
}

View File

@@ -311,6 +311,7 @@ func newHookStatus(hook *unstructured.Unstructured, hookType appv1.HookType) app
Group: gvk.Group,
Version: gvk.Version,
HookType: hookType,
HookPhase: appv1.OperationRunning,
Namespace: hook.GetNamespace(),
}
if isBatchJob(gvk) {

View File

@@ -12,12 +12,14 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
fakedisco "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/rest"
testcore "k8s.io/client-go/testing"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/kube/kubetest"
@@ -385,6 +387,33 @@ func TestSortManifestHandleNil(t *testing.T) {
assert.Nil(t, manifest[1].targetObj)
}
func TestSyncNamespaceAgainstCRD(t *testing.T) {
crd := syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": "argoproj.io/alpha1",
"kind": "Workflow",
},
}}
namespace := syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Namespace",
},
},
}
unsorted := []syncTask{crd, namespace}
ks := newKindSorter(unsorted, resourceOrder)
sort.Sort(ks)
expectedOrder := []syncTask{namespace, crd}
assert.Equal(t, len(unsorted), len(expectedOrder))
for i, sorted := range unsorted {
assert.Equal(t, expectedOrder[i], sorted)
}
}
func TestDontSyncOrPruneHooks(t *testing.T) {
syncCtx := newTestSyncCtx()
targetPod := test.NewPod()
@@ -410,3 +439,47 @@ func TestDontSyncOrPruneHooks(t *testing.T) {
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestPersistRevisionHistory(t *testing.T) {
app := newFakeApp()
defaultProject := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
Name: "default",
},
}
data := fakeData{
apps: []runtime.Object{app, defaultProject},
manifestResponse: &repository.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
ctrl.appStateManager.SyncAppState(app, &v1alpha1.OperationState{Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
}})
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, v1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, 1, len(updatedApp.Status.History))
assert.Equal(t, 0, len(updatedApp.Status.History[0].ComponentParameterOverrides))
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
overrides := []v1alpha1.ComponentParameter{{Name: "test", Value: "123"}}
ctrl.appStateManager.SyncAppState(app, &v1alpha1.OperationState{Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
ParameterOverrides: overrides,
},
}})
updatedApp, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, v1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, 1, len(updatedApp.Status.History))
assert.ElementsMatch(t, overrides, updatedApp.Status.History[0].ComponentParameterOverrides)
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
}

View File

@@ -124,3 +124,53 @@ happen at the ingress controller.
Neither ALBs and Classic ELB in HTTP mode, do not have full support for HTTP2/gRPC which is the
protocol used by the `argocd` CLI. Thus, when using an AWS load balancer, either Classic ELB in
passthrough mode is needed, or NLBs.
## UI base path
If Argo CD UI is available under non-root path (e.g. `/argo-cd` instead of `/`) then UI path should be configured in API server.
To configure UI path add `--basehref` flag into `argocd-server` deployment command:
```yaml
spec:
template:
spec:
name: argocd-server
containers:
- command:
- /argocd-server
- --staticassets
- /shared/app
- --repo-server
- argocd-repo-server:8081
- --base-href
- /argo-cd
```
NOTE: flag `--basehref` only changes UI base URL. API server keep using `/` path so you need to add URL rewrite rule to proxy config.
Example nginx.conf with URL rewrite:
```
worker_processes 1;
events { worker_connections 1024; }
http {
sendfile on;
server {
listen 443;
location /argo-cd {
rewrite /argo-cd/(.*) /$1 break;
proxy_pass https://localhost:8080;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}
}
```

View File

@@ -1,14 +1,14 @@
# Parameter Overrides
Argo CD provides a mechanism to override the parameters of a ksonnet/helm app. This gives some extra
flexibility in having most of the application manifests defined in git, while leaving room for
*some* parts of the k8s manifests determined dynamically, or outside of git. It also serves as an
alternative way of redeploying an application by changing application parameters via Argo CD, instead
of making the changes to the manifests in git.
Argo CD provides a mechanism to override the parameters of a ksonnet/helm app. This provides flexibility
in having most of the application manifests defined in git, while leaving room for *some* parts of the
k8s manifests determined dynamically, or outside of git. It also serves as an alternative way of
redeploying an application by changing application parameters via Argo CD, instead of making the
changes to the manifests in git.
**NOTE:** many consider this mode of operation as an anti-pattern to GitOps, since the source of
truth becomes a union of the git repository, and the application overrides. The Argo CD parameter
overrides feature is provided mainly convenience to developers and is intended to be used more for
overrides feature is provided mainly as a convenience to developers and is intended to be used in
dev/test environments, vs. production environments.
To use parameter overrides, run the `argocd app set -p (COMPONENT=)PARAM=VALUE` command:
@@ -17,21 +17,28 @@ argocd app set guestbook -p guestbook=image=example/guestbook:abcd123
argocd app sync guestbook
```
The `PARAM` is expected to be a normal YAML path
```bash
argocd app set guestbook -p guestbook=ingress.enabled=true
argocd app set guestbook -p guestbook=ingress.hosts[0]=guestbook.myclusterurl
```
The following are situations where parameter overrides would be useful:
1. A team maintains a "dev" environment, which needs to be continually updated with the latest
version of their guestbook application after every build in the tip of master. To address this use
case, the application would expose an parameter named `image`, whose value used in the `dev`
case, the application would expose a parameter named `image`, whose value used in the `dev`
environment contains a placeholder value (e.g. `example/guestbook:replaceme`). The placeholder value
would be determined externally (outside of git) such as a build systems. Then, as part of the build
would be determined externally (outside of git) such as a build system. Then, as part of the build
pipeline, the parameter value of the `image` would be continually updated to the freshly built image
(e.g. `argocd app set guestbook -p guestbook=image=example/guestbook:abcd123`). A sync operation
would result in the application being redeployed with the new image.
2. A repository of helm manifests is already publicly available (e.g. https://github.com/helm/charts).
2. A repository of Helm manifests is already publicly available (e.g. https://github.com/helm/charts).
Since commit access to the repository is unavailable, it is useful to be able to install charts from
the public repository, customizing the deployment with different parameters, without resorting to
forking the repository to make the changes. For example, to install redis from the helm chart
the public repository and customize the deployment with different parameters, without resorting to
forking the repository to make the changes. For example, to install Redis from the Helm chart
repository and customize the the database password, you would run:
```

View File

@@ -42,24 +42,24 @@ argocd proj create myproject -d https://kubernetes.default.svc,mynamespace -s ht
Permitted source git repositories are managed using commands:
```bash
argocd project add-source <PROJECT> <REPO>
argocd project remove-source <PROJECT> <REPO>
argocd proj add-source <PROJECT> <REPO>
argocd proj remove-source <PROJECT> <REPO>
```
Permitted destination clusters and namespaces are managed with the commands:
```
argocd project add-destination <PROJECT> <CLUSTER>,<NAMESPACE>
argocd project remove-destination <PROJECT> <CLUSTER>,<NAMESPACE>
argocd proj add-destination <PROJECT> <CLUSTER>,<NAMESPACE>
argocd proj remove-destination <PROJECT> <CLUSTER>,<NAMESPACE>
```
Permitted destination K8s resource kinds are managed with the commands. Note that namespaced-scoped
resources are restricted via a blacklist, whereas cluster-scoped resources are restricted via
whitelist.
```
argocd project allow-cluster-resource <PROJECT> <GROUP> <KIND>
argocd project allow-namespace-resource <PROJECT> <GROUP> <KIND>
argocd project deny-cluster-resource <PROJECT> <GROUP> <KIND>
argocd project deny-namespace-resource <PROJECT> <GROUP> <KIND>
argocd proj allow-cluster-resource <PROJECT> <GROUP> <KIND>
argocd proj allow-namespace-resource <PROJECT> <GROUP> <KIND>
argocd proj deny-cluster-resource <PROJECT> <GROUP> <KIND>
argocd proj deny-namespace-resource <PROJECT> <GROUP> <KIND>
```
### Assign application to a project

View File

@@ -21,6 +21,7 @@
"Exclude": [
"pkg/client",
"vendor/",
".pb.go"
".pb.go",
".*warning.*fmt.Fprint"
]
}

View File

@@ -1,15 +1,15 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: application-controller
name: argocd-application-controller
spec:
selector:
matchLabels:
app: application-controller
app: argocd-application-controller
template:
metadata:
labels:
app: application-controller
app: argocd-application-controller
spec:
containers:
- command:
@@ -20,7 +20,7 @@ spec:
- "10"
image: argoproj/argocd:latest
imagePullPolicy: Always
name: application-controller
name: argocd-application-controller
ports:
- containerPort: 8083
readinessProbe:
@@ -28,4 +28,4 @@ spec:
port: 8083
initialDelaySeconds: 5
periodSeconds: 10
serviceAccountName: application-controller
serviceAccountName: argocd-application-controller

View File

@@ -1,15 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: application-controller-role
name: argocd-application-controller
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- argoproj.io
resources:
@@ -23,14 +25,6 @@ rules:
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- argocd-cm
verbs:
- get
- apiGroups:
- ""
resources:

View File

@@ -1,11 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-controller-role-binding
name: argocd-application-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: application-controller-role
name: argocd-application-controller
subjects:
- kind: ServiceAccount
name: application-controller
name: argocd-application-controller

View File

@@ -1,4 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: application-controller
name: argocd-application-controller

View File

@@ -1,10 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: application-controller
name: argocd-application-controller
spec:
ports:
- port: 8083
targetPort: 8083
selector:
app: application-controller
app: argocd-application-controller

View File

@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argocd-server-role
name: argocd-server
rules:
- apiGroups:
- ""

View File

@@ -1,11 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argocd-server-role-binding
name: argocd-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argocd-server-role
name: argocd-server
subjects:
- kind: ServiceAccount
name: argocd-server

View File

@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dex-server-role
name: dex-server
rules:
- apiGroups:
- ""

View File

@@ -1,11 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dex-server-role-binding
name: dex-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dex-server-role
name: dex-server
subjects:
- kind: ServiceAccount
name: dex-server

View File

@@ -2,11 +2,11 @@ resources:
- argocd-cm.yaml
- argocd-secret.yaml
- argocd-rbac-cm.yaml
- application-controller-sa.yaml
- application-controller-role.yaml
- application-controller-rolebinding.yaml
- application-controller-deployment.yaml
- application-controller-service.yaml
- argocd-application-controller-sa.yaml
- argocd-application-controller-role.yaml
- argocd-application-controller-rolebinding.yaml
- argocd-application-controller-deployment.yaml
- argocd-application-controller-service.yaml
- argocd-server-sa.yaml
- argocd-server-role.yaml
- argocd-server-rolebinding.yaml
@@ -23,6 +23,6 @@ resources:
imageTags:
- name: argoproj/argocd
newTag: latest
newTag: v0.11.1
- name: argoproj/argocd-ui
newTag: latest
newTag: v0.11.1

View File

@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-controller-clusterrole
name: argocd-application-controller
rules:
- apiGroups:
- '*'

View File

@@ -1,12 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: application-controller-clusterrolebinding
name: argocd-application-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-controller-clusterrole
name: argocd-application-controller
subjects:
- kind: ServiceAccount
name: application-controller
name: argocd-application-controller
namespace: argocd

View File

@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argocd-server-clusterrole
name: argocd-server
rules:
- apiGroups:
- '*'

View File

@@ -1,11 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argocd-server-clusterrolebinding
name: argocd-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server-clusterrole
name: argocd-server
subjects:
- kind: ServiceAccount
name: argocd-server

View File

@@ -2,7 +2,7 @@ bases:
- ../namespace-install
resources:
- application-controller-clusterrole.yaml
- application-controller-clusterrolebinding.yaml
- argocd-application-controller-clusterrole.yaml
- argocd-application-controller-clusterrolebinding.yaml
- argocd-server-clusterrole.yaml
- argocd-server-clusterrolebinding.yaml

View File

@@ -31,7 +31,7 @@ spec:
apiVersion: v1
kind: ServiceAccount
metadata:
name: application-controller
name: argocd-application-controller
---
apiVersion: v1
kind: ServiceAccount
@@ -46,15 +46,17 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: application-controller-role
name: argocd-application-controller
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- argoproj.io
resources:
@@ -68,14 +70,6 @@ rules:
- update
- patch
- delete
- apiGroups:
- ""
resourceNames:
- argocd-cm
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
@@ -87,7 +81,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argocd-server-role
name: argocd-server
rules:
- apiGroups:
- ""
@@ -126,7 +120,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dex-server-role
name: dex-server
rules:
- apiGroups:
- ""
@@ -141,7 +135,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-controller-clusterrole
name: argocd-application-controller
rules:
- apiGroups:
- '*'
@@ -157,7 +151,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argocd-server-clusterrole
name: argocd-server
rules:
- apiGroups:
- '*'
@@ -165,6 +159,7 @@ rules:
- '*'
verbs:
- delete
- get
- apiGroups:
- ""
resources:
@@ -182,23 +177,23 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-controller-role-binding
name: argocd-application-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: application-controller-role
name: argocd-application-controller
subjects:
- kind: ServiceAccount
name: application-controller
name: argocd-application-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argocd-server-role-binding
name: argocd-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argocd-server-role
name: argocd-server
subjects:
- kind: ServiceAccount
name: argocd-server
@@ -206,11 +201,11 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dex-server-role-binding
name: dex-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dex-server-role
name: dex-server
subjects:
- kind: ServiceAccount
name: dex-server
@@ -218,24 +213,24 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: application-controller-clusterrolebinding
name: argocd-application-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-controller-clusterrole
name: argocd-application-controller
subjects:
- kind: ServiceAccount
name: application-controller
name: argocd-application-controller
namespace: argocd
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argocd-server-clusterrolebinding
name: argocd-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server-clusterrole
name: argocd-server
subjects:
- kind: ServiceAccount
name: argocd-server
@@ -260,13 +255,13 @@ type: Opaque
apiVersion: v1
kind: Service
metadata:
name: application-controller
name: argocd-application-controller
spec:
ports:
- port: 8083
targetPort: 8083
selector:
app: application-controller
app: argocd-application-controller
---
apiVersion: v1
kind: Service
@@ -331,15 +326,15 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: application-controller
name: argocd-application-controller
spec:
selector:
matchLabels:
app: application-controller
app: argocd-application-controller
template:
metadata:
labels:
app: application-controller
app: argocd-application-controller
spec:
containers:
- command:
@@ -348,9 +343,9 @@ spec:
- "20"
- --operation-processors
- "10"
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: application-controller
name: argocd-application-controller
ports:
- containerPort: 8083
readinessProbe:
@@ -358,7 +353,7 @@ spec:
periodSeconds: 10
tcpSocket:
port: 8083
serviceAccountName: application-controller
serviceAccountName: argocd-application-controller
---
apiVersion: apps/v1
kind: Deployment
@@ -377,7 +372,7 @@ spec:
containers:
- command:
- argocd-repo-server
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: argocd-repo-server
ports:
@@ -406,7 +401,7 @@ spec:
- argocd-server
- --staticassets
- /shared/app
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: argocd-server
ports:
@@ -426,7 +421,7 @@ spec:
- -r
- /app
- /shared
image: argoproj/argocd-ui:latest
image: argoproj/argocd-ui:v0.11.1
imagePullPolicy: Always
name: ui
volumeMounts:
@@ -468,7 +463,7 @@ spec:
- cp
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: copyutil
volumeMounts:

View File

@@ -31,7 +31,7 @@ spec:
apiVersion: v1
kind: ServiceAccount
metadata:
name: application-controller
name: argocd-application-controller
---
apiVersion: v1
kind: ServiceAccount
@@ -46,15 +46,17 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: application-controller-role
name: argocd-application-controller
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- argoproj.io
resources:
@@ -68,14 +70,6 @@ rules:
- update
- patch
- delete
- apiGroups:
- ""
resourceNames:
- argocd-cm
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
@@ -87,7 +81,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argocd-server-role
name: argocd-server
rules:
- apiGroups:
- ""
@@ -126,7 +120,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dex-server-role
name: dex-server
rules:
- apiGroups:
- ""
@@ -141,23 +135,23 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-controller-role-binding
name: argocd-application-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: application-controller-role
name: argocd-application-controller
subjects:
- kind: ServiceAccount
name: application-controller
name: argocd-application-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argocd-server-role-binding
name: argocd-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argocd-server-role
name: argocd-server
subjects:
- kind: ServiceAccount
name: argocd-server
@@ -165,11 +159,11 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dex-server-role-binding
name: dex-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dex-server-role
name: dex-server
subjects:
- kind: ServiceAccount
name: dex-server
@@ -193,13 +187,13 @@ type: Opaque
apiVersion: v1
kind: Service
metadata:
name: application-controller
name: argocd-application-controller
spec:
ports:
- port: 8083
targetPort: 8083
selector:
app: application-controller
app: argocd-application-controller
---
apiVersion: v1
kind: Service
@@ -264,15 +258,15 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: application-controller
name: argocd-application-controller
spec:
selector:
matchLabels:
app: application-controller
app: argocd-application-controller
template:
metadata:
labels:
app: application-controller
app: argocd-application-controller
spec:
containers:
- command:
@@ -281,9 +275,9 @@ spec:
- "20"
- --operation-processors
- "10"
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: application-controller
name: argocd-application-controller
ports:
- containerPort: 8083
readinessProbe:
@@ -291,7 +285,7 @@ spec:
periodSeconds: 10
tcpSocket:
port: 8083
serviceAccountName: application-controller
serviceAccountName: argocd-application-controller
---
apiVersion: apps/v1
kind: Deployment
@@ -310,7 +304,7 @@ spec:
containers:
- command:
- argocd-repo-server
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: argocd-repo-server
ports:
@@ -339,7 +333,7 @@ spec:
- argocd-server
- --staticassets
- /shared/app
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: argocd-server
ports:
@@ -359,7 +353,7 @@ spec:
- -r
- /app
- /shared
image: argoproj/argocd-ui:latest
image: argoproj/argocd-ui:v0.11.1
imagePullPolicy: Always
name: ui
volumeMounts:
@@ -401,7 +395,7 @@ spec:
- cp
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:latest
image: argoproj/argocd:v0.11.1
imagePullPolicy: Always
name: copyutil
volumeMounts:

File diff suppressed because it is too large Load Diff

View File

@@ -242,6 +242,13 @@ message ClusterList {
repeated Cluster items = 2;
}
// ComparedTo contains application source and target which was used for resources comparison
message ComparedTo {
optional ApplicationSource source = 1;
optional ApplicationDestination destination = 2;
}
// ComponentParameter contains information about component parameter value
message ComponentParameter {
optional string component = 1;
@@ -282,6 +289,15 @@ message HelmRepository {
optional string password = 7;
}
// InfoItem contains human readable information about object
message InfoItem {
// Name is a human readable title for this piece of information.
optional string name = 1;
// Value is human readable content.
optional string value = 2;
}
// JWTToken holds the issuedAt and expiresAt values of a token
message JWTToken {
optional int64 iat = 1;
@@ -391,7 +407,7 @@ message ResourceNode {
optional string name = 5;
repeated string tags = 6;
repeated InfoItem info = 6;
repeated ResourceNode children = 7;
@@ -507,7 +523,7 @@ message SyncPolicyAutomated {
message SyncStatus {
optional string status = 1;
optional ApplicationSource comparedTo = 2;
optional ComparedTo comparedTo = 2;
optional string revision = 3;
}

View File

@@ -3,6 +3,7 @@ package v1alpha1
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
@@ -12,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"github.com/argoproj/argo-cd/common"
@@ -76,24 +78,43 @@ const (
ApplicationSourceTypeDirectory ApplicationSourceType = "Directory"
)
type RefreshType string
const (
RefreshTypeNormal RefreshType = "normal"
RefreshTypeHard RefreshType = "hard"
)
// ApplicationSourceHelm holds helm specific options
type ApplicationSourceHelm struct {
// ValuesFiles is a list of Helm value files to use when generating a template
ValueFiles []string `json:"valueFiles,omitempty" protobuf:"bytes,1,opt,name=valueFiles"`
}
func (h *ApplicationSourceHelm) IsZero() bool {
return len(h.ValueFiles) == 0
}
// ApplicationSourceKustomize holds kustomize specific options
type ApplicationSourceKustomize struct {
// NamePrefix is a prefix appended to resources for kustomize apps
NamePrefix string `json:"namePrefix" protobuf:"bytes,1,opt,name=namePrefix"`
}
func (k *ApplicationSourceKustomize) IsZero() bool {
return k.NamePrefix == ""
}
// ApplicationSourceKsonnet holds ksonnet specific options
type ApplicationSourceKsonnet struct {
// Environment is a ksonnet application environment name
Environment string `json:"environment,omitempty" protobuf:"bytes,1,opt,name=environment"`
}
func (k *ApplicationSourceKsonnet) IsZero() bool {
return k.Environment == ""
}
// ApplicationDestination contains deployment destination information
type ApplicationDestination struct {
// Server overrides the environment server value in the ksonnet app.yaml
@@ -375,11 +396,17 @@ type ApplicationCondition struct {
Message string `json:"message" protobuf:"bytes,2,opt,name=message"`
}
// ComparedTo contains application source and target which was used for resources comparison
type ComparedTo struct {
Source ApplicationSource `json:"source" protobuf:"bytes,1,opt,name=source"`
Destination ApplicationDestination `json:"destination" protobuf:"bytes,2,opt,name=destination"`
}
// SyncStatus is a comparison result of application spec and deployed application.
type SyncStatus struct {
Status SyncStatusCode `json:"status" protobuf:"bytes,1,opt,name=status,casttype=SyncStatusCode"`
ComparedTo ApplicationSource `json:"comparedTo" protobuf:"bytes,2,opt,name=comparedTo"`
Revision string `json:"revision" protobuf:"bytes,3,opt,name=revision"`
Status SyncStatusCode `json:"status" protobuf:"bytes,1,opt,name=status,casttype=SyncStatusCode"`
ComparedTo ComparedTo `json:"comparedTo" protobuf:"bytes,2,opt,name=comparedTo"`
Revision string `json:"revision" protobuf:"bytes,3,opt,name=revision"`
}
type HealthStatus struct {
@@ -397,6 +424,14 @@ const (
HealthStatusMissing HealthStatusCode = "Missing"
)
// InfoItem contains human readable information about object
type InfoItem struct {
// Name is a human readable title for this piece of information.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Value is human readable content.
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// ResourceNode contains information about live resource and its children
type ResourceNode struct {
Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
@@ -404,7 +439,7 @@ type ResourceNode struct {
Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"`
Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
Tags []string `json:"tags,omitempty" protobuf:"bytes,6,opt,name=tags"`
Info []InfoItem `json:"info,omitempty" protobuf:"bytes,6,opt,name=info"`
Children []ResourceNode `json:"children,omitempty" protobuf:"bytes,7,opt,name=children"`
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,8,opt,name=resourceVersion"`
}
@@ -649,6 +684,25 @@ func (app *Application) CascadedDeletion() bool {
return app.getFinalizerIndex(common.ResourcesFinalizerName) > -1
}
func (app *Application) IsRefreshRequested() (RefreshType, bool) {
refreshType := RefreshTypeNormal
annotations := app.GetAnnotations()
if annotations == nil {
return refreshType, false
}
typeStr, ok := annotations[common.AnnotationKeyRefresh]
if !ok {
return refreshType, false
}
if typeStr == string(RefreshTypeHard) {
refreshType = RefreshTypeHard
}
return refreshType, true
}
// SetCascadedDeletion sets or remove resources finalizer
func (app *Application) SetCascadedDeletion(prune bool) {
index := app.getFinalizerIndex(common.ResourcesFinalizerName)
@@ -684,6 +738,11 @@ func (source ApplicationSource) Equals(other ApplicationSource) bool {
return reflect.DeepEqual(source, other)
}
// Equals compares two instances of ApplicationDestination and return true if instances are equal.
func (source ApplicationDestination) Equals(other ApplicationDestination) bool {
return reflect.DeepEqual(source, other)
}
// GetProject returns the application's project. This is preferred over spec.Project which may be empty
func (spec ApplicationSpec) GetProject() string {
if spec.Project == "" {
@@ -741,43 +800,50 @@ func (proj AppProject) IsDestinationPermitted(dst ApplicationDestination) bool {
// RESTConfig returns a go-client REST config from cluster
func (c *Cluster) RESTConfig() *rest.Config {
if c.Server == common.KubernetesInternalAPIServerAddr && c.Config.Username == "" && c.Config.Password == "" && c.Config.BearerToken == "" {
config, err := rest.InClusterConfig()
if err != nil {
panic("Unable to create in-cluster config")
var config *rest.Config
var err error
if c.Server == common.KubernetesInternalAPIServerAddr && os.Getenv(common.EnvVarFakeInClusterConfig) == "true" {
config, err = clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config"))
} else if c.Server == common.KubernetesInternalAPIServerAddr && c.Config.Username == "" && c.Config.Password == "" && c.Config.BearerToken == "" {
config, err = rest.InClusterConfig()
} else {
tlsClientConfig := rest.TLSClientConfig{
Insecure: c.Config.TLSClientConfig.Insecure,
ServerName: c.Config.TLSClientConfig.ServerName,
CertData: c.Config.TLSClientConfig.CertData,
KeyData: c.Config.TLSClientConfig.KeyData,
CAData: c.Config.TLSClientConfig.CAData,
}
return config
}
tlsClientConfig := rest.TLSClientConfig{
Insecure: c.Config.TLSClientConfig.Insecure,
ServerName: c.Config.TLSClientConfig.ServerName,
CertData: c.Config.TLSClientConfig.CertData,
KeyData: c.Config.TLSClientConfig.KeyData,
CAData: c.Config.TLSClientConfig.CAData,
}
if c.Config.AWSAuthConfig != nil {
args := []string{"token", "-i", c.Config.AWSAuthConfig.ClusterName}
if c.Config.AWSAuthConfig.RoleARN != "" {
args = append(args, "-r", c.Config.AWSAuthConfig.RoleARN)
}
return &rest.Config{
Host: c.Server,
TLSClientConfig: tlsClientConfig,
ExecProvider: &api.ExecConfig{
APIVersion: "client.authentication.k8s.io/v1alpha1",
Command: "aws-iam-authenticator",
Args: args,
},
if c.Config.AWSAuthConfig != nil {
args := []string{"token", "-i", c.Config.AWSAuthConfig.ClusterName}
if c.Config.AWSAuthConfig.RoleARN != "" {
args = append(args, "-r", c.Config.AWSAuthConfig.RoleARN)
}
config = &rest.Config{
Host: c.Server,
TLSClientConfig: tlsClientConfig,
ExecProvider: &api.ExecConfig{
APIVersion: "client.authentication.k8s.io/v1alpha1",
Command: "aws-iam-authenticator",
Args: args,
},
}
} else {
config = &rest.Config{
Host: c.Server,
Username: c.Config.Username,
Password: c.Config.Password,
BearerToken: c.Config.BearerToken,
TLSClientConfig: tlsClientConfig,
}
}
}
return &rest.Config{
Host: c.Server,
Username: c.Config.Username,
Password: c.Config.Password,
BearerToken: c.Config.BearerToken,
TLSClientConfig: tlsClientConfig,
if err != nil {
panic("Unable to create K8s REST config")
}
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
return config
}
func UnmarshalToUnstructured(resource string) (*unstructured.Unstructured, error) {

View File

@@ -493,6 +493,24 @@ func (in *ClusterList) DeepCopy() *ClusterList {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComparedTo) DeepCopyInto(out *ComparedTo) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
out.Destination = in.Destination
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComparedTo.
func (in *ComparedTo) DeepCopy() *ComparedTo {
if in == nil {
return nil
}
out := new(ComparedTo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentParameter) DeepCopyInto(out *ComponentParameter) {
*out = *in
@@ -581,6 +599,22 @@ func (in *HelmRepository) DeepCopy() *HelmRepository {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InfoItem) DeepCopyInto(out *InfoItem) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfoItem.
func (in *InfoItem) DeepCopy() *InfoItem {
if in == nil {
return nil
}
out := new(InfoItem)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JWTToken) DeepCopyInto(out *JWTToken) {
*out = *in
@@ -749,9 +783,9 @@ func (in *ResourceDiff) DeepCopy() *ResourceDiff {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceNode) DeepCopyInto(out *ResourceNode) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
if in.Info != nil {
in, out := &in.Info, &out.Info
*out = make([]InfoItem, len(*in))
copy(*out, *in)
}
if in.Children != nil {

View File

@@ -11,13 +11,15 @@ import (
"strings"
"time"
"github.com/google/go-jsonnet"
jsonnet "github.com/google/go-jsonnet"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cache"
@@ -36,14 +38,21 @@ const (
// Service implements ManifestService interface
type Service struct {
repoLock *util.KeyLock
gitFactory git.ClientFactory
cache cache.Cache
repoLock *util.KeyLock
gitFactory git.ClientFactory
cache cache.Cache
parallelismLimitSemaphore *semaphore.Weighted
}
// NewService returns a new instance of the Manifest service
func NewService(gitFactory git.ClientFactory, cache cache.Cache) *Service {
func NewService(gitFactory git.ClientFactory, cache cache.Cache, parallelismLimit int64) *Service {
var parallelismLimitSemaphore *semaphore.Weighted
if parallelismLimit > 0 {
parallelismLimitSemaphore = semaphore.NewWeighted(parallelismLimit)
}
return &Service{
parallelismLimitSemaphore: parallelismLimitSemaphore,
repoLock: util.NewKeyLock(),
gitFactory: gitFactory,
cache: cache,
@@ -133,20 +142,45 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
return nil, err
}
cacheKey := manifestCacheKey(commitSHA, q)
var res ManifestResponse
err = s.cache.Get(cacheKey, &res)
if err == nil {
log.Infof("manifest cache hit: %s", cacheKey)
return &res, nil
getCached := func() *ManifestResponse {
var res ManifestResponse
if !q.NoCache {
err = s.cache.Get(cacheKey, &res)
if err == nil {
log.Infof("manifest cache hit: %s", cacheKey)
return &res
}
if err != cache.ErrCacheMiss {
log.Warnf("manifest cache error %s: %v", cacheKey, err)
} else {
log.Infof("manifest cache miss: %s", cacheKey)
}
}
return nil
}
if err != cache.ErrCacheMiss {
log.Warnf("manifest cache error %s: %v", cacheKey, err)
} else {
log.Infof("manifest cache miss: %s", cacheKey)
cached := getCached()
if cached != nil {
return cached, nil
}
s.repoLock.Lock(gitClient.Root())
defer s.repoLock.Unlock(gitClient.Root())
cached = getCached()
if cached != nil {
return cached, nil
}
if s.parallelismLimitSemaphore != nil {
err = s.parallelismLimitSemaphore.Acquire(c, 1)
if err != nil {
return nil, err
}
defer s.parallelismLimitSemaphore.Release(1)
}
commitSHA, err = checkoutRevision(gitClient, commitSHA)
if err != nil {
return nil, err
@@ -157,7 +191,7 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
if err != nil {
return nil, err
}
res = *genRes
res := *genRes
res.Revision = commitSHA
err = s.cache.Set(&cache.Item{
Key: manifestCacheKey(commitSHA, q),
@@ -183,9 +217,7 @@ func helmOpts(q *ManifestRequest) helm.HelmTemplateOpts {
}
func kustomizeOpts(q *ManifestRequest) kustomize.KustomizeBuildOpts {
opts := kustomize.KustomizeBuildOpts{
Namespace: q.Namespace,
}
opts := kustomize.KustomizeBuildOpts{}
if q.ApplicationSource.Kustomize != nil {
opts.NamePrefix = q.ApplicationSource.Kustomize.NamePrefix
}
@@ -203,7 +235,7 @@ func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
switch appSourceType {
case v1alpha1.ApplicationSourceTypeKsonnet:
env := v1alpha1.KsonnetEnv(q.ApplicationSource)
targetObjs, params, dest, err = ksShow(appPath, env, q.ComponentParameterOverrides)
targetObjs, params, dest, err = ksShow(q.AppLabelKey, appPath, env, q.ComponentParameterOverrides)
case v1alpha1.ApplicationSourceTypeHelm:
h := helm.NewHelmApp(appPath, q.HelmRepos)
err := h.Init()
@@ -211,7 +243,7 @@ func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
return nil, err
}
opts := helmOpts(q)
targetObjs, err = h.Template(q.AppLabel, opts, q.ComponentParameterOverrides)
targetObjs, err = h.Template(q.AppLabelValue, opts, q.ComponentParameterOverrides)
if err != nil {
if !helm.IsMissingDependencyErr(err) {
return nil, err
@@ -220,7 +252,7 @@ func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
if err != nil {
return nil, err
}
targetObjs, err = h.Template(q.AppLabel, opts, q.ComponentParameterOverrides)
targetObjs, err = h.Template(q.AppLabelValue, opts, q.ComponentParameterOverrides)
if err != nil {
return nil, err
}
@@ -249,20 +281,21 @@ func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
if ok {
targets = append(targets, unstructuredObj)
return nil
} else {
return fmt.Errorf("resource list item has unexpected type")
}
return fmt.Errorf("resource list item has unexpected type")
})
if err != nil {
return nil, err
}
} else if isNullList(obj) {
// noop
} else {
targets = []*unstructured.Unstructured{obj}
}
for _, target := range targets {
if q.AppLabel != "" && !kube.IsCRD(target) {
err = kube.SetAppInstanceLabel(target, q.AppLabel)
if q.AppLabelKey != "" && q.AppLabelValue != "" && !kube.IsCRD(target) {
err = kube.SetAppInstanceLabel(target, q.AppLabelKey, q.AppLabelValue)
if err != nil {
return nil, err
}
@@ -305,6 +338,26 @@ func IdentifyAppSourceTypeByAppDir(appDirPath string) v1alpha1.ApplicationSource
return v1alpha1.ApplicationSourceTypeDirectory
}
// isNullList checks if the object is a "List" type where items is null instead of an empty list.
// Handles a corner case where obj.IsList() returns false when a manifest is like:
// ---
// apiVersion: v1
// items: null
// kind: ConfigMapList
func isNullList(obj *unstructured.Unstructured) bool {
if _, ok := obj.Object["spec"]; ok {
return false
}
if _, ok := obj.Object["status"]; ok {
return false
}
field, ok := obj.Object["items"]
if !ok {
return false
}
return field == nil
}
// checkoutRevision is a convenience function to initialize a repo, fetch, and checkout a revision
// Returns the 40 character commit SHA after the checkout has been performed
func checkoutRevision(gitClient git.Client, commitSHA string) (string, error) {
@@ -330,7 +383,7 @@ func manifestCacheKey(commitSHA string, q *ManifestRequest) string {
appSrcStr, _ := json.Marshal(appSrc)
pStr, _ := json.Marshal(q.ComponentParameterOverrides)
fnva := hash.FNVa(string(appSrcStr) + string(pStr))
return fmt.Sprintf("mfst|%s|%s|%s|%d", q.AppLabel, commitSHA, q.Namespace, fnva)
return fmt.Sprintf("mfst|%s|%s|%s|%s|%d", q.AppLabelKey, q.AppLabelValue, commitSHA, q.Namespace, fnva)
}
func listDirCacheKey(commitSHA string, q *ListDirRequest) string {
@@ -342,7 +395,7 @@ func getFileCacheKey(commitSHA string, q *GetFileRequest) string {
}
// ksShow runs `ks show` in an app directory after setting any component parameter overrides
func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, []*v1alpha1.ComponentParameter, *v1alpha1.ApplicationDestination, error) {
func ksShow(appLabelKey, appPath, envName string, overrides []*v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, []*v1alpha1.ComponentParameter, *v1alpha1.ApplicationDestination, error) {
ksApp, err := ksonnet.NewKsonnetApp(appPath)
if err != nil {
return nil, nil, nil, status.Errorf(codes.FailedPrecondition, "unable to load application from %s: %v", appPath, err)
@@ -364,6 +417,12 @@ func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) (
return nil, nil, nil, status.Errorf(codes.InvalidArgument, err.Error())
}
targetObjs, err := ksApp.Show(envName)
if err == nil && appLabelKey == common.LabelKeyLegacyApplicationName {
// Address https://github.com/ksonnet/ksonnet/issues/707
for _, d := range targetObjs {
kube.UnsetLabel(d, "ksonnet.io/component")
}
}
if err != nil {
return nil, nil, nil, err
}

View File

@@ -31,7 +31,9 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type ManifestRequest struct {
Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo" json:"repo,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
AppLabel string `protobuf:"bytes,5,opt,name=appLabel,proto3" json:"appLabel,omitempty"`
NoCache bool `protobuf:"varint,3,opt,name=noCache,proto3" json:"noCache,omitempty"`
AppLabelKey string `protobuf:"bytes,4,opt,name=appLabelKey,proto3" json:"appLabelKey,omitempty"`
AppLabelValue string `protobuf:"bytes,5,opt,name=appLabelValue,proto3" json:"appLabelValue,omitempty"`
ComponentParameterOverrides []*v1alpha1.ComponentParameter `protobuf:"bytes,6,rep,name=componentParameterOverrides" json:"componentParameterOverrides,omitempty"`
Namespace string `protobuf:"bytes,8,opt,name=namespace,proto3" json:"namespace,omitempty"`
ApplicationSource *v1alpha1.ApplicationSource `protobuf:"bytes,10,opt,name=applicationSource" json:"applicationSource,omitempty"`
@@ -45,7 +47,7 @@ func (m *ManifestRequest) Reset() { *m = ManifestRequest{} }
func (m *ManifestRequest) String() string { return proto.CompactTextString(m) }
func (*ManifestRequest) ProtoMessage() {}
func (*ManifestRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_47857c07b7787e09, []int{0}
return fileDescriptor_repository_d96dec39a34a8c7f, []int{0}
}
func (m *ManifestRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -88,9 +90,23 @@ func (m *ManifestRequest) GetRevision() string {
return ""
}
func (m *ManifestRequest) GetAppLabel() string {
func (m *ManifestRequest) GetNoCache() bool {
if m != nil {
return m.AppLabel
return m.NoCache
}
return false
}
func (m *ManifestRequest) GetAppLabelKey() string {
if m != nil {
return m.AppLabelKey
}
return ""
}
func (m *ManifestRequest) GetAppLabelValue() string {
if m != nil {
return m.AppLabelValue
}
return ""
}
@@ -138,7 +154,7 @@ func (m *ManifestResponse) Reset() { *m = ManifestResponse{} }
func (m *ManifestResponse) String() string { return proto.CompactTextString(m) }
func (*ManifestResponse) ProtoMessage() {}
func (*ManifestResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_47857c07b7787e09, []int{1}
return fileDescriptor_repository_d96dec39a34a8c7f, []int{1}
}
func (m *ManifestResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -216,7 +232,7 @@ func (m *ListDirRequest) Reset() { *m = ListDirRequest{} }
func (m *ListDirRequest) String() string { return proto.CompactTextString(m) }
func (*ListDirRequest) ProtoMessage() {}
func (*ListDirRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_47857c07b7787e09, []int{2}
return fileDescriptor_repository_d96dec39a34a8c7f, []int{2}
}
func (m *ListDirRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -278,7 +294,7 @@ func (m *FileList) Reset() { *m = FileList{} }
func (m *FileList) String() string { return proto.CompactTextString(m) }
func (*FileList) ProtoMessage() {}
func (*FileList) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_47857c07b7787e09, []int{3}
return fileDescriptor_repository_d96dec39a34a8c7f, []int{3}
}
func (m *FileList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -328,7 +344,7 @@ func (m *GetFileRequest) Reset() { *m = GetFileRequest{} }
func (m *GetFileRequest) String() string { return proto.CompactTextString(m) }
func (*GetFileRequest) ProtoMessage() {}
func (*GetFileRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_47857c07b7787e09, []int{4}
return fileDescriptor_repository_d96dec39a34a8c7f, []int{4}
}
func (m *GetFileRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -390,7 +406,7 @@ func (m *GetFileResponse) Reset() { *m = GetFileResponse{} }
func (m *GetFileResponse) String() string { return proto.CompactTextString(m) }
func (*GetFileResponse) ProtoMessage() {}
func (*GetFileResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_47857c07b7787e09, []int{5}
return fileDescriptor_repository_d96dec39a34a8c7f, []int{5}
}
func (m *GetFileResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -610,11 +626,27 @@ func (m *ManifestRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRepository(dAtA, i, uint64(len(m.Revision)))
i += copy(dAtA[i:], m.Revision)
}
if len(m.AppLabel) > 0 {
if m.NoCache {
dAtA[i] = 0x18
i++
if m.NoCache {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.AppLabelKey) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintRepository(dAtA, i, uint64(len(m.AppLabelKey)))
i += copy(dAtA[i:], m.AppLabelKey)
}
if len(m.AppLabelValue) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintRepository(dAtA, i, uint64(len(m.AppLabel)))
i += copy(dAtA[i:], m.AppLabel)
i = encodeVarintRepository(dAtA, i, uint64(len(m.AppLabelValue)))
i += copy(dAtA[i:], m.AppLabelValue)
}
if len(m.ComponentParameterOverrides) > 0 {
for _, msg := range m.ComponentParameterOverrides {
@@ -897,7 +929,14 @@ func (m *ManifestRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovRepository(uint64(l))
}
l = len(m.AppLabel)
if m.NoCache {
n += 2
}
l = len(m.AppLabelKey)
if l > 0 {
n += 1 + l + sovRepository(uint64(l))
}
l = len(m.AppLabelValue)
if l > 0 {
n += 1 + l + sovRepository(uint64(l))
}
@@ -1134,9 +1173,29 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error {
}
m.Revision = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRepository
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NoCache = bool(v != 0)
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AppLabel", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field AppLabelKey", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -1161,7 +1220,36 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AppLabel = string(dAtA[iNdEx:postIndex])
m.AppLabelKey = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AppLabelValue", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRepository
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRepository
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AppLabelValue = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
@@ -2059,47 +2147,49 @@ var (
)
func init() {
proto.RegisterFile("reposerver/repository/repository.proto", fileDescriptor_repository_47857c07b7787e09)
proto.RegisterFile("reposerver/repository/repository.proto", fileDescriptor_repository_d96dec39a34a8c7f)
}
var fileDescriptor_repository_47857c07b7787e09 = []byte{
// 602 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0xcf, 0x6e, 0xd3, 0x4e,
0x10, 0xae, 0xdb, 0x34, 0x6d, 0x36, 0x3f, 0xfd, 0xda, 0xae, 0x22, 0x64, 0x39, 0x51, 0x14, 0x59,
0x02, 0xe5, 0x82, 0xad, 0x84, 0x0b, 0x17, 0x84, 0x80, 0x42, 0xa9, 0x94, 0xaa, 0xc8, 0x3d, 0xc1,
0x05, 0x6d, 0x9c, 0xc1, 0x59, 0x62, 0x7b, 0x97, 0xdd, 0x8d, 0x25, 0x78, 0x09, 0x1e, 0x80, 0x27,
0xe1, 0x0d, 0x38, 0xf2, 0x08, 0x28, 0xb7, 0xbe, 0x05, 0xf2, 0xda, 0x8e, 0x9d, 0x3f, 0xea, 0x25,
0x42, 0x70, 0x9b, 0x9d, 0x59, 0x7f, 0xdf, 0xe7, 0x6f, 0xc6, 0x63, 0xf4, 0x40, 0x00, 0x67, 0x12,
0x44, 0x02, 0xc2, 0xd5, 0x21, 0x55, 0x4c, 0x7c, 0xae, 0x84, 0x0e, 0x17, 0x4c, 0x31, 0x8c, 0xca,
0x8c, 0xd5, 0x0a, 0x58, 0xc0, 0x74, 0xda, 0x4d, 0xa3, 0xec, 0x86, 0xd5, 0x09, 0x18, 0x0b, 0x42,
0x70, 0x09, 0xa7, 0x2e, 0x89, 0x63, 0xa6, 0x88, 0xa2, 0x2c, 0x96, 0x79, 0xd5, 0x9e, 0x3d, 0x96,
0x0e, 0x65, 0xba, 0xea, 0x33, 0x01, 0x6e, 0x32, 0x70, 0x03, 0x88, 0x41, 0x10, 0x05, 0x93, 0xfc,
0xce, 0x65, 0x40, 0xd5, 0x74, 0x3e, 0x76, 0x7c, 0x16, 0xb9, 0x44, 0x68, 0x8a, 0x8f, 0x3a, 0x78,
0xe8, 0x4f, 0x5c, 0x3e, 0x0b, 0xd2, 0x87, 0xa5, 0x4b, 0x38, 0x0f, 0xa9, 0xaf, 0xc1, 0xdd, 0x64,
0x40, 0x42, 0x3e, 0x25, 0x1b, 0x50, 0xf6, 0xf7, 0x1a, 0x3a, 0xb9, 0x22, 0x31, 0xfd, 0x00, 0x52,
0x79, 0xf0, 0x69, 0x0e, 0x52, 0xe1, 0xb7, 0xa8, 0x96, 0xbe, 0x84, 0x69, 0xf4, 0x8c, 0x7e, 0x73,
0xf8, 0xd2, 0x29, 0xd9, 0x9c, 0x82, 0x4d, 0x07, 0xef, 0xfd, 0x89, 0xc3, 0x67, 0x81, 0x93, 0xb2,
0x39, 0x15, 0x36, 0xa7, 0x60, 0x73, 0xbc, 0xa5, 0x17, 0x9e, 0x86, 0xc4, 0x16, 0x3a, 0x16, 0x90,
0x50, 0x49, 0x59, 0x6c, 0xee, 0xf7, 0x8c, 0x7e, 0xc3, 0x5b, 0x9e, 0xd3, 0x1a, 0xe1, 0x7c, 0x44,
0xc6, 0x10, 0x9a, 0x87, 0x59, 0xad, 0x38, 0xe3, 0xaf, 0x06, 0x6a, 0xfb, 0x2c, 0xe2, 0x2c, 0x86,
0x58, 0xbd, 0x21, 0x82, 0x44, 0xa0, 0x40, 0x5c, 0x27, 0x20, 0x04, 0x9d, 0x80, 0x34, 0xeb, 0xbd,
0x83, 0x7e, 0x73, 0x78, 0xb5, 0x83, 0xd4, 0x17, 0x1b, 0xe8, 0xde, 0x5d, 0x8c, 0xb8, 0x83, 0x1a,
0x31, 0x89, 0x40, 0x72, 0xe2, 0x83, 0x79, 0xac, 0xe5, 0x96, 0x09, 0xfc, 0x05, 0x9d, 0x55, 0x58,
0x6e, 0xd8, 0x5c, 0xf8, 0x60, 0x22, 0xed, 0xe7, 0x68, 0x07, 0x91, 0xcf, 0xd6, 0x31, 0xbd, 0x4d,
0x1a, 0x1c, 0xa0, 0xc6, 0x14, 0xc2, 0x48, 0x7b, 0x6f, 0x36, 0xb5, 0x31, 0x97, 0x3b, 0x70, 0xbe,
0x2e, 0xb0, 0xb2, 0x3e, 0x96, 0xd8, 0xf6, 0xad, 0x81, 0x4e, 0xcb, 0xd9, 0x91, 0x9c, 0xc5, 0x12,
0x52, 0x5f, 0xa2, 0x3c, 0x27, 0x4d, 0xa3, 0x77, 0x90, 0xfa, 0xb2, 0x4c, 0xac, 0xba, 0xb6, 0xbf,
0xee, 0xda, 0x3d, 0x54, 0xcf, 0xbe, 0x30, 0xf3, 0x40, 0x97, 0xf2, 0xd3, 0xca, 0xd4, 0xd4, 0xd6,
0xa6, 0x06, 0x50, 0x9d, 0xa7, 0xdd, 0x91, 0xe6, 0xe1, 0x9f, 0x98, 0x81, 0x1c, 0xdc, 0xfe, 0x66,
0xa0, 0xff, 0x47, 0x54, 0xaa, 0x73, 0x2a, 0xfe, 0xf2, 0x67, 0x82, 0x51, 0x8d, 0x13, 0x35, 0xcd,
0x2d, 0xd2, 0xb1, 0xdd, 0x43, 0xc7, 0xaf, 0x68, 0x08, 0xa9, 0x40, 0xdc, 0x42, 0x87, 0x54, 0x41,
0x54, 0x98, 0x9f, 0x1d, 0xb4, 0xfe, 0x0b, 0x50, 0xe9, 0xad, 0x7f, 0x50, 0xff, 0x7d, 0x74, 0xb2,
0x14, 0x97, 0xcf, 0x11, 0x46, 0xb5, 0x09, 0x51, 0x44, 0xab, 0xfb, 0xcf, 0xd3, 0xf1, 0xf0, 0xd6,
0x40, 0x67, 0x25, 0xd7, 0x0d, 0x88, 0x84, 0xfa, 0x80, 0xaf, 0xd1, 0xe9, 0x45, 0xbe, 0xd5, 0x8a,
0x69, 0xc4, 0x6d, 0xa7, 0xb2, 0x98, 0xd7, 0xf6, 0x9b, 0xd5, 0xd9, 0x5e, 0xcc, 0x88, 0xed, 0x3d,
0xfc, 0x04, 0x1d, 0xe5, 0xad, 0xc6, 0x56, 0xf5, 0xea, 0x6a, 0xff, 0xad, 0x56, 0xb5, 0x56, 0xd8,
0x6f, 0xef, 0xe1, 0x73, 0x74, 0x94, 0xbf, 0xcc, 0xea, 0xe3, 0xab, 0xf6, 0x5b, 0xed, 0xad, 0xb5,
0x42, 0xc4, 0xf3, 0xa7, 0x3f, 0x16, 0x5d, 0xe3, 0xe7, 0xa2, 0x6b, 0xfc, 0x5a, 0x74, 0x8d, 0x77,
0x83, 0xbb, 0x36, 0xfe, 0xd6, 0x3f, 0xd3, 0xb8, 0xae, 0x17, 0xfc, 0xa3, 0xdf, 0x01, 0x00, 0x00,
0xff, 0xff, 0x5a, 0x5f, 0xbd, 0x73, 0xb9, 0x06, 0x00, 0x00,
var fileDescriptor_repository_d96dec39a34a8c7f = []byte{
// 636 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0xdf, 0x4e, 0xd4, 0x4e,
0x14, 0xa6, 0xb0, 0x2c, 0xbb, 0x67, 0x7f, 0x3f, 0x81, 0x09, 0x31, 0x93, 0x42, 0x48, 0xd3, 0xa8,
0xd9, 0x1b, 0xdb, 0x80, 0x37, 0xde, 0x18, 0xa3, 0xa0, 0x48, 0x84, 0x60, 0x4a, 0x62, 0xa2, 0x37,
0x66, 0xe8, 0x1e, 0xbb, 0x23, 0x6d, 0x67, 0x9c, 0x99, 0xdd, 0x04, 0x5f, 0xc2, 0x07, 0xf0, 0x85,
0xbc, 0xf4, 0x11, 0x0c, 0x77, 0x3c, 0x85, 0xa6, 0xb3, 0xed, 0x6e, 0x77, 0x21, 0xdc, 0x6c, 0x8c,
0xde, 0x9d, 0x7f, 0x3d, 0xdf, 0x99, 0xef, 0x7c, 0x9d, 0x81, 0x07, 0x0a, 0xa5, 0xd0, 0xa8, 0x86,
0xa8, 0x42, 0x6b, 0x72, 0x23, 0xd4, 0x45, 0xcd, 0x0c, 0xa4, 0x12, 0x46, 0x10, 0x98, 0x44, 0xdc,
0x8d, 0x44, 0x24, 0xc2, 0x86, 0xc3, 0xc2, 0x1a, 0x55, 0xb8, 0x5b, 0x89, 0x10, 0x49, 0x8a, 0x21,
0x93, 0x3c, 0x64, 0x79, 0x2e, 0x0c, 0x33, 0x5c, 0xe4, 0xba, 0xcc, 0xfa, 0xe7, 0x8f, 0x75, 0xc0,
0x85, 0xcd, 0xc6, 0x42, 0x61, 0x38, 0xdc, 0x09, 0x13, 0xcc, 0x51, 0x31, 0x83, 0xbd, 0xb2, 0xe6,
0x30, 0xe1, 0xa6, 0x3f, 0x38, 0x0b, 0x62, 0x91, 0x85, 0x4c, 0x59, 0x88, 0x4f, 0xd6, 0x78, 0x18,
0xf7, 0x42, 0x79, 0x9e, 0x14, 0x1f, 0xeb, 0x90, 0x49, 0x99, 0xf2, 0xd8, 0x36, 0x0f, 0x87, 0x3b,
0x2c, 0x95, 0x7d, 0x76, 0xad, 0x95, 0xff, 0xab, 0x01, 0xab, 0xc7, 0x2c, 0xe7, 0x1f, 0x51, 0x9b,
0x08, 0x3f, 0x0f, 0x50, 0x1b, 0xf2, 0x0e, 0x1a, 0xc5, 0x21, 0xa8, 0xe3, 0x39, 0xdd, 0xce, 0xee,
0x8b, 0x60, 0x82, 0x16, 0x54, 0x68, 0xd6, 0xf8, 0x10, 0xf7, 0x02, 0x79, 0x9e, 0x04, 0x05, 0x5a,
0x50, 0x43, 0x0b, 0x2a, 0xb4, 0x20, 0x1a, 0x73, 0x11, 0xd9, 0x96, 0xc4, 0x85, 0x96, 0xc2, 0x21,
0xd7, 0x5c, 0xe4, 0x74, 0xd1, 0x73, 0xba, 0xed, 0x68, 0xec, 0x13, 0x0a, 0x2b, 0xb9, 0xd8, 0x63,
0x71, 0x1f, 0xe9, 0x92, 0xe7, 0x74, 0x5b, 0x51, 0xe5, 0x12, 0x0f, 0x3a, 0x4c, 0xca, 0x23, 0x76,
0x86, 0xe9, 0x6b, 0xbc, 0xa0, 0x0d, 0xfb, 0x61, 0x3d, 0x44, 0xee, 0xc1, 0xff, 0x95, 0xfb, 0x96,
0xa5, 0x03, 0xa4, 0xcb, 0xb6, 0x66, 0x3a, 0x48, 0xbe, 0x3a, 0xb0, 0x19, 0x8b, 0x4c, 0x8a, 0x1c,
0x73, 0xf3, 0x86, 0x29, 0x96, 0xa1, 0x41, 0x75, 0x32, 0x44, 0xa5, 0x78, 0x0f, 0x35, 0x6d, 0x7a,
0x4b, 0xdd, 0xce, 0xee, 0xf1, 0x1c, 0x07, 0xde, 0xbb, 0xd6, 0x3d, 0xba, 0x0d, 0x91, 0x6c, 0x41,
0x3b, 0x67, 0x19, 0x6a, 0xc9, 0x62, 0xa4, 0x2d, 0x3b, 0xf3, 0x24, 0x40, 0xbe, 0xc0, 0x7a, 0x0d,
0xe5, 0x54, 0x0c, 0x54, 0x8c, 0x14, 0xec, 0x56, 0x8e, 0xe6, 0x18, 0xf2, 0xd9, 0x6c, 0xcf, 0xe8,
0x3a, 0x0c, 0x49, 0xa0, 0xdd, 0xc7, 0x34, 0xb3, 0x1b, 0xa4, 0x1d, 0x4b, 0xcc, 0xe1, 0x1c, 0x98,
0xaf, 0xaa, 0x5e, 0x23, 0x35, 0x4c, 0x7a, 0xfb, 0x57, 0x0e, 0xac, 0x4d, 0x14, 0xa8, 0xa5, 0xc8,
0x35, 0x16, 0xbc, 0x64, 0x65, 0x4c, 0x53, 0xc7, 0x5b, 0x2a, 0x78, 0x19, 0x07, 0xa6, 0x59, 0x5b,
0x9c, 0x65, 0xed, 0x2e, 0x34, 0x47, 0xff, 0xa9, 0x95, 0x51, 0x3b, 0x2a, 0xbd, 0x29, 0xed, 0x35,
0x66, 0xb4, 0x87, 0xd0, 0x94, 0xc5, 0x76, 0x34, 0x5d, 0xfe, 0x13, 0x1a, 0x28, 0x9b, 0xfb, 0xdf,
0x1c, 0xb8, 0x73, 0xc4, 0xb5, 0xd9, 0xe7, 0xea, 0x2f, 0xff, 0x6c, 0x04, 0x1a, 0x92, 0x99, 0x7e,
0x49, 0x91, 0xb5, 0x7d, 0x0f, 0x5a, 0x2f, 0x79, 0x8a, 0xc5, 0x80, 0x64, 0x03, 0x96, 0xb9, 0xc1,
0xac, 0x22, 0x7f, 0xe4, 0xd8, 0xf9, 0x0f, 0xd0, 0x14, 0x55, 0xff, 0xe0, 0xfc, 0xf7, 0x61, 0x75,
0x3c, 0x5c, 0xa9, 0x23, 0x02, 0x8d, 0x1e, 0x33, 0xcc, 0x4e, 0xf7, 0x5f, 0x64, 0xed, 0xdd, 0x2b,
0x07, 0xd6, 0x27, 0x58, 0xa7, 0xa8, 0x86, 0x3c, 0x46, 0x72, 0x02, 0x6b, 0x07, 0xe5, 0xdd, 0x58,
0xa9, 0x91, 0x6c, 0x06, 0xb5, 0xeb, 0x7d, 0xe6, 0x96, 0x74, 0xb7, 0x6e, 0x4e, 0x8e, 0x80, 0xfd,
0x05, 0xf2, 0x04, 0x56, 0xca, 0x55, 0x13, 0xb7, 0x5e, 0x3a, 0xbd, 0x7f, 0x77, 0xa3, 0x9e, 0xab,
0xe8, 0xf7, 0x17, 0xc8, 0x3e, 0xac, 0x94, 0x87, 0x99, 0xfe, 0x7c, 0x9a, 0x7e, 0x77, 0xf3, 0xc6,
0x5c, 0x35, 0xc4, 0xf3, 0xa7, 0xdf, 0x2f, 0xb7, 0x9d, 0x1f, 0x97, 0xdb, 0xce, 0xcf, 0xcb, 0x6d,
0xe7, 0xfd, 0xce, 0x6d, 0xef, 0xc6, 0x8d, 0xef, 0xdb, 0x59, 0xd3, 0x3e, 0x13, 0x8f, 0x7e, 0x07,
0x00, 0x00, 0xff, 0xff, 0x0d, 0x33, 0xc2, 0xf4, 0xff, 0x06, 0x00, 0x00,
}

View File

@@ -12,11 +12,14 @@ import "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1/generated.prot
message ManifestRequest {
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository repo = 1;
string revision = 2;
string appLabel = 5;
bool noCache = 3;
string appLabelKey = 4;
string appLabelValue = 5;
repeated github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ComponentParameter componentParameterOverrides = 6;
string namespace = 8;
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ApplicationSource applicationSource = 10;
repeated github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.HelmRepository helmRepos = 11;
}
message ManifestResponse {

View File

@@ -55,3 +55,22 @@ func TestGenerateHelmChartWithDependencies(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, 12, len(res1.Manifests))
}
func TestGenerateNullList(t *testing.T) {
q := ManifestRequest{
ApplicationSource: &argoappv1.ApplicationSource{},
}
res1, err := generateManifests("./testdata/null-list", &q)
assert.Nil(t, err)
assert.Equal(t, len(res1.Manifests), 1)
assert.Contains(t, res1.Manifests[0], "prometheus-operator-operator")
res1, err = generateManifests("./testdata/empty-list", &q)
assert.Nil(t, err)
assert.Equal(t, len(res1.Manifests), 1)
assert.Contains(t, res1.Manifests[0], "prometheus-operator-operator")
res2, err := generateManifests("./testdata/weird-list", &q)
assert.Nil(t, err)
assert.Equal(t, 2, len(res2.Manifests))
}

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMapList
items: []
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-operator-operator

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMapList
items:
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-operator-operator

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: NotAList
items:
spec:
foo: bar
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-operator-operator

View File

@@ -9,6 +9,7 @@ import (
"github.com/argoproj/argo-cd/util/git"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
tlsutil "github.com/argoproj/argo-cd/util/tls"
"github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
log "github.com/sirupsen/logrus"
@@ -19,14 +20,15 @@ import (
// ArgoCDRepoServer is the repo server implementation
type ArgoCDRepoServer struct {
log *log.Entry
gitFactory git.ClientFactory
cache cache.Cache
opts []grpc.ServerOption
log *log.Entry
gitFactory git.ClientFactory
cache cache.Cache
opts []grpc.ServerOption
parallelismLimit int64
}
// NewServer returns a new instance of the Argo CD Repo server
func NewServer(gitFactory git.ClientFactory, cache cache.Cache, tlsConfCustomizer tlsutil.ConfigCustomizer) (*ArgoCDRepoServer, error) {
func NewServer(gitFactory git.ClientFactory, cache cache.Cache, tlsConfCustomizer tlsutil.ConfigCustomizer, parallelismLimit int64) (*ArgoCDRepoServer, error) {
// generate TLS cert
hosts := []string{
"localhost",
@@ -45,31 +47,28 @@ func NewServer(gitFactory git.ClientFactory, cache cache.Cache, tlsConfCustomize
tlsConfig := &tls.Config{Certificates: []tls.Certificate{*cert}}
tlsConfCustomizer(tlsConfig)
opts := []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsConfig))}
serverLog := log.NewEntry(log.New())
streamInterceptors := []grpc.StreamServerInterceptor{grpc_logrus.StreamServerInterceptor(serverLog), grpc_util.PanicLoggerStreamServerInterceptor(serverLog)}
unaryInterceptors := []grpc.UnaryServerInterceptor{grpc_logrus.UnaryServerInterceptor(serverLog), grpc_util.PanicLoggerUnaryServerInterceptor(serverLog)}
return &ArgoCDRepoServer{
log: log.NewEntry(log.New()),
gitFactory: gitFactory,
cache: cache,
opts: opts,
log: serverLog,
gitFactory: gitFactory,
cache: cache,
parallelismLimit: parallelismLimit,
opts: []grpc.ServerOption{
grpc.Creds(credentials.NewTLS(tlsConfig)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)),
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)),
},
}, nil
}
// CreateGRPC creates new configured grpc server
func (a *ArgoCDRepoServer) CreateGRPC() *grpc.Server {
server := grpc.NewServer(
append(a.opts,
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_logrus.StreamServerInterceptor(a.log),
grpc_util.PanicLoggerStreamServerInterceptor(a.log),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_logrus.UnaryServerInterceptor(a.log),
grpc_util.PanicLoggerUnaryServerInterceptor(a.log),
)))...,
)
server := grpc.NewServer(a.opts...)
version.RegisterVersionServiceServer(server, &version.Server{})
manifestService := repository.NewService(a.gitFactory, a.cache)
manifestService := repository.NewService(a.gitFactory, a.cache, a.parallelismLimit)
repository.RegisterRepositoryServiceServer(server, manifestService)
// Register reflection service on gRPC server.

View File

@@ -4,6 +4,7 @@ import (
"time"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -59,7 +60,7 @@ func (s *Server) UpdatePassword(ctx context.Context, q *UpdatePasswordRequest) (
if err != nil {
return nil, err
}
log.Infof("user '%s' updated password", username)
return &UpdatePasswordResponse{}, nil
}

View File

@@ -37,7 +37,7 @@ func (m *UpdatePasswordRequest) Reset() { *m = UpdatePasswordRequest{} }
func (m *UpdatePasswordRequest) String() string { return proto.CompactTextString(m) }
func (*UpdatePasswordRequest) ProtoMessage() {}
func (*UpdatePasswordRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_account_3e64cf795478a98b, []int{0}
return fileDescriptor_account_c12a236fbb4926f3, []int{0}
}
func (m *UpdatePasswordRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -90,7 +90,7 @@ func (m *UpdatePasswordResponse) Reset() { *m = UpdatePasswordResponse{}
func (m *UpdatePasswordResponse) String() string { return proto.CompactTextString(m) }
func (*UpdatePasswordResponse) ProtoMessage() {}
func (*UpdatePasswordResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_account_3e64cf795478a98b, []int{1}
return fileDescriptor_account_c12a236fbb4926f3, []int{1}
}
func (m *UpdatePasswordResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -566,10 +566,10 @@ var (
)
func init() {
proto.RegisterFile("server/account/account.proto", fileDescriptor_account_3e64cf795478a98b)
proto.RegisterFile("server/account/account.proto", fileDescriptor_account_c12a236fbb4926f3)
}
var fileDescriptor_account_3e64cf795478a98b = []byte{
var fileDescriptor_account_c12a236fbb4926f3 = []byte{
// 268 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x4e, 0x2d, 0x2a,
0x4b, 0x2d, 0xd2, 0x4f, 0x4c, 0x4e, 0xce, 0x2f, 0xcd, 0x2b, 0x81, 0xd1, 0x7a, 0x05, 0x45, 0xf9,

View File

@@ -0,0 +1,84 @@
package account
import (
"context"
"testing"
jwt "github.com/dgrijalva/jwt-go"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/server/session"
"github.com/argoproj/argo-cd/util/password"
sessionutil "github.com/argoproj/argo-cd/util/session"
"github.com/argoproj/argo-cd/util/settings"
)
const (
testNamespace = "default"
)
// return an AccountServer which returns fake data
func newTestAccountServer(ctx context.Context, objects ...runtime.Object) (*fake.Clientset, *Server, *session.Server) {
bcrypt, err := password.HashPassword("oldpassword")
errors.CheckError(err)
kubeclientset := fake.NewSimpleClientset(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-cm",
Namespace: testNamespace,
},
}, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-secret",
Namespace: testNamespace,
},
Data: map[string][]byte{
"admin.password": []byte(bcrypt),
"server.secretkey": []byte("test"),
},
})
settingsMgr := settings.NewSettingsManager(ctx, kubeclientset, testNamespace)
sessionMgr := sessionutil.NewSessionManager(settingsMgr)
return kubeclientset, NewServer(sessionMgr, settingsMgr), session.NewServer(sessionMgr)
}
func TestUpdatePassword(t *testing.T) {
ctx := context.Background()
_, accountServer, sessionServer := newTestAccountServer(ctx)
ctx = context.WithValue(ctx, "claims", &jwt.StandardClaims{Subject: "admin"})
var err error
// ensure password is not allowed to be updated if given bad password
_, err = accountServer.UpdatePassword(ctx, &UpdatePasswordRequest{CurrentPassword: "badpassword", NewPassword: "newpassword"})
assert.Error(t, err)
assert.NoError(t, accountServer.sessionMgr.VerifyUsernamePassword("admin", "oldpassword"))
assert.Error(t, accountServer.sessionMgr.VerifyUsernamePassword("admin", "newpassword"))
// verify old password works
_, err = sessionServer.Create(ctx, &session.SessionCreateRequest{Username: "admin", Password: "oldpassword"})
assert.NoError(t, err)
// verify new password doesn't
_, err = sessionServer.Create(ctx, &session.SessionCreateRequest{Username: "admin", Password: "newpassword"})
assert.Error(t, err)
// ensure password can be updated with valid password and immediately be used
settings, err := accountServer.settingsMgr.GetSettings()
assert.NoError(t, err)
prevHash := settings.AdminPasswordHash
_, err = accountServer.UpdatePassword(ctx, &UpdatePasswordRequest{CurrentPassword: "oldpassword", NewPassword: "newpassword"})
assert.NoError(t, err)
settings, err = accountServer.settingsMgr.GetSettings()
assert.NoError(t, err)
assert.NotEqual(t, prevHash, settings.AdminPasswordHash)
assert.NoError(t, accountServer.sessionMgr.VerifyUsernamePassword("admin", "newpassword"))
assert.Error(t, accountServer.sessionMgr.VerifyUsernamePassword("admin", "oldpassword"))
// verify old password is invalid
_, err = sessionServer.Create(ctx, &session.SessionCreateRequest{Username: "admin", Password: "oldpassword"})
assert.Error(t, err)
// verify new password works
_, err = sessionServer.Create(ctx, &session.SessionCreateRequest{Username: "admin", Password: "newpassword"})
assert.NoError(t, err)
}

View File

@@ -8,15 +8,14 @@ import (
"strings"
"time"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
@@ -38,6 +37,7 @@ import (
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/rbac"
"github.com/argoproj/argo-cd/util/session"
"github.com/argoproj/argo-cd/util/settings"
)
// Server provides a Application service
@@ -53,6 +53,7 @@ type Server struct {
projectLock *util.KeyLock
auditLogger *argo.AuditLogger
gitFactory git.ClientFactory
settingsMgr *settings.SettingsManager
}
// NewServer returns a new instance of the Application service
@@ -66,6 +67,7 @@ func NewServer(
db db.ArgoDB,
enf *rbac.Enforcer,
projectLock *util.KeyLock,
settingsMgr *settings.SettingsManager,
) ApplicationServiceServer {
return &Server{
@@ -80,6 +82,7 @@ func NewServer(
projectLock: projectLock,
auditLogger: argo.NewAuditLogger(namespace, kubeclientset, "argocd-server"),
gitFactory: git.NewFactory(),
settingsMgr: settingsMgr,
}
}
@@ -120,7 +123,7 @@ func (s *Server) Create(ctx context.Context, q *ApplicationCreateRequest) (*appv
a := q.Application
a.Spec = *argo.NormalizeApplicationSpec(&a.Spec)
err := s.validateApp(ctx, &a.Spec)
err := s.validateApp(ctx, &a)
if err != nil {
return nil, err
}
@@ -179,13 +182,23 @@ func (s *Server) GetManifests(ctx context.Context, q *ApplicationManifestQuery)
if q.Revision != "" {
revision = q.Revision
}
settings, err := s.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
helmRepos, err := s.db.ListHelmRepos(ctx)
if err != nil {
return nil, err
}
manifestInfo, err := repoClient.GenerateManifest(ctx, &repository.ManifestRequest{
Repo: repo,
Revision: revision,
ComponentParameterOverrides: overrides,
AppLabel: a.Name,
AppLabelKey: settings.GetAppInstanceLabelKey(),
AppLabelValue: a.Name,
Namespace: a.Spec.Destination.Namespace,
ApplicationSource: &a.Spec.Source,
HelmRepos: helmRepos,
})
if err != nil {
return nil, err
@@ -204,8 +217,12 @@ func (s *Server) Get(ctx context.Context, q *ApplicationQuery) (*appv1.Applicati
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
if q.Refresh {
_, err = argoutil.RefreshApp(appIf, *q.Name)
if q.Refresh != nil {
refreshType := appv1.RefreshTypeNormal
if *q.Refresh == string(appv1.RefreshTypeHard) {
refreshType = appv1.RefreshTypeHard
}
_, err = argoutil.RefreshApp(appIf, *q.Name, refreshType)
if err != nil {
return nil, err
}
@@ -276,7 +293,7 @@ func (s *Server) Update(ctx context.Context, q *ApplicationUpdateRequest) (*appv
a := q.Application
a.Spec = *argo.NormalizeApplicationSpec(&a.Spec)
err := s.validateApp(ctx, &a.Spec)
err := s.validateApp(ctx, a)
if err != nil {
return nil, err
}
@@ -300,7 +317,8 @@ func (s *Server) UpdateSpec(ctx context.Context, q *ApplicationUpdateSpecRequest
return nil, grpc.ErrPermissionDenied
}
q.Spec = *argo.NormalizeApplicationSpec(&q.Spec)
err = s.validateApp(ctx, &q.Spec)
a.Spec = q.Spec
err = s.validateApp(ctx, a)
if err != nil {
return nil, err
}
@@ -412,18 +430,35 @@ func (s *Server) Watch(q *ApplicationQuery, ws ApplicationService_WatchServer) e
return nil
}
func (s *Server) validateApp(ctx context.Context, spec *appv1.ApplicationSpec) error {
proj, err := argo.GetAppProject(spec, s.appclientset, s.ns)
func (s *Server) validateApp(ctx context.Context, app *appv1.Application) error {
proj, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Get(app.Spec.GetProject(), metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
return status.Errorf(codes.InvalidArgument, "application referencing project %s which does not exist", spec.Project)
return status.Errorf(codes.InvalidArgument, "application references project %s which does not exist", app.Spec.Project)
}
return err
}
if !s.enf.Enforce(ctx.Value("claims"), "projects", rbacpolicy.ActionGet, proj.Name) {
return status.Errorf(codes.PermissionDenied, "permission denied for project %s", proj.Name)
currApp, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(app.Name, metav1.GetOptions{})
if err != nil {
if !apierr.IsNotFound(err) {
return err
}
// Kubernetes go-client will return a pointer to a zero-value app instead of nil, even
// though the API response was NotFound. This behavior was confirmed via logs.
currApp = nil
}
conditions, err := argo.GetSpecErrors(ctx, spec, proj, s.repoClientset, s.db)
if currApp != nil && currApp.Spec.GetProject() != app.Spec.GetProject() {
// When changing projects, caller must have application create & update privileges in new project
// NOTE: the update check was already verified in the caller to this function
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionCreate, appRBACName(*app)) {
return grpc.ErrPermissionDenied
}
// They also need 'update' privileges in the old project
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(*currApp)) {
return grpc.ErrPermissionDenied
}
}
conditions, err := argo.GetSpecErrors(ctx, &app.Spec, proj, s.repoClientset, s.db)
if err != nil {
return err
}
@@ -762,6 +797,11 @@ func (s *Server) Rollback(ctx context.Context, rollbackReq *ApplicationRollbackR
if deploymentInfo == nil {
return nil, fmt.Errorf("application %s does not have deployment with id %v", a.Name, rollbackReq.ID)
}
overrides := deploymentInfo.ComponentParameterOverrides
// Nil overrides in deployment history means no overrides, so sync operation request should contains empty overrides set
if overrides == nil {
overrides = make([]appv1.ComponentParameter, 0)
}
// Rollback is just a convenience around Sync
op := appv1.Operation{
Sync: &appv1.SyncOperation{
@@ -769,7 +809,7 @@ func (s *Server) Rollback(ctx context.Context, rollbackReq *ApplicationRollbackR
DryRun: rollbackReq.DryRun,
Prune: rollbackReq.Prune,
SyncStrategy: &appv1.SyncStrategy{Apply: &appv1.SyncStrategyApply{}},
ParameterOverrides: deploymentInfo.ComponentParameterOverrides,
ParameterOverrides: overrides,
},
}
a, err = argo.SetAppOperation(appIf, *rollbackReq.Name, &op)

View File

@@ -41,7 +41,7 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// ApplicationQuery is a query for application resources
type ApplicationQuery struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Refresh bool `protobuf:"varint,2,opt,name=refresh" json:"refresh"`
Refresh *string `protobuf:"bytes,2,opt,name=refresh" json:"refresh,omitempty"`
Projects []string `protobuf:"bytes,3,rep,name=project" json:"project,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -52,7 +52,7 @@ func (m *ApplicationQuery) Reset() { *m = ApplicationQuery{} }
func (m *ApplicationQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationQuery) ProtoMessage() {}
func (*ApplicationQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{0}
return fileDescriptor_application_4bf1d189f28cefe9, []int{0}
}
func (m *ApplicationQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -88,11 +88,11 @@ func (m *ApplicationQuery) GetName() string {
return ""
}
func (m *ApplicationQuery) GetRefresh() bool {
if m != nil {
return m.Refresh
func (m *ApplicationQuery) GetRefresh() string {
if m != nil && m.Refresh != nil {
return *m.Refresh
}
return false
return ""
}
func (m *ApplicationQuery) GetProjects() []string {
@@ -117,7 +117,7 @@ func (m *ApplicationResourceEventsQuery) Reset() { *m = ApplicationResou
func (m *ApplicationResourceEventsQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationResourceEventsQuery) ProtoMessage() {}
func (*ApplicationResourceEventsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{1}
return fileDescriptor_application_4bf1d189f28cefe9, []int{1}
}
func (m *ApplicationResourceEventsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -187,7 +187,7 @@ func (m *ApplicationManifestQuery) Reset() { *m = ApplicationManifestQue
func (m *ApplicationManifestQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationManifestQuery) ProtoMessage() {}
func (*ApplicationManifestQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{2}
return fileDescriptor_application_4bf1d189f28cefe9, []int{2}
}
func (m *ApplicationManifestQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -240,7 +240,7 @@ func (m *ApplicationResponse) Reset() { *m = ApplicationResponse{} }
func (m *ApplicationResponse) String() string { return proto.CompactTextString(m) }
func (*ApplicationResponse) ProtoMessage() {}
func (*ApplicationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{3}
return fileDescriptor_application_4bf1d189f28cefe9, []int{3}
}
func (m *ApplicationResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -281,7 +281,7 @@ func (m *ApplicationCreateRequest) Reset() { *m = ApplicationCreateReque
func (m *ApplicationCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationCreateRequest) ProtoMessage() {}
func (*ApplicationCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{4}
return fileDescriptor_application_4bf1d189f28cefe9, []int{4}
}
func (m *ApplicationCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -335,7 +335,7 @@ func (m *ApplicationUpdateRequest) Reset() { *m = ApplicationUpdateReque
func (m *ApplicationUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationUpdateRequest) ProtoMessage() {}
func (*ApplicationUpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{5}
return fileDescriptor_application_4bf1d189f28cefe9, []int{5}
}
func (m *ApplicationUpdateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -383,7 +383,7 @@ func (m *ApplicationDeleteRequest) Reset() { *m = ApplicationDeleteReque
func (m *ApplicationDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationDeleteRequest) ProtoMessage() {}
func (*ApplicationDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{6}
return fileDescriptor_application_4bf1d189f28cefe9, []int{6}
}
func (m *ApplicationDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -444,7 +444,7 @@ func (m *ApplicationSyncRequest) Reset() { *m = ApplicationSyncRequest{}
func (m *ApplicationSyncRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationSyncRequest) ProtoMessage() {}
func (*ApplicationSyncRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{7}
return fileDescriptor_application_4bf1d189f28cefe9, []int{7}
}
func (m *ApplicationSyncRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -535,7 +535,7 @@ func (m *ParameterOverrides) Reset() { *m = ParameterOverrides{} }
func (m *ParameterOverrides) String() string { return proto.CompactTextString(m) }
func (*ParameterOverrides) ProtoMessage() {}
func (*ParameterOverrides) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{8}
return fileDescriptor_application_4bf1d189f28cefe9, []int{8}
}
func (m *ParameterOverrides) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -584,7 +584,7 @@ func (m *Parameter) Reset() { *m = Parameter{} }
func (m *Parameter) String() string { return proto.CompactTextString(m) }
func (*Parameter) ProtoMessage() {}
func (*Parameter) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{9}
return fileDescriptor_application_4bf1d189f28cefe9, []int{9}
}
func (m *Parameter) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -647,7 +647,7 @@ func (m *ApplicationUpdateSpecRequest) Reset() { *m = ApplicationUpdateS
func (m *ApplicationUpdateSpecRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationUpdateSpecRequest) ProtoMessage() {}
func (*ApplicationUpdateSpecRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{10}
return fileDescriptor_application_4bf1d189f28cefe9, []int{10}
}
func (m *ApplicationUpdateSpecRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -704,7 +704,7 @@ func (m *ApplicationRollbackRequest) Reset() { *m = ApplicationRollbackR
func (m *ApplicationRollbackRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationRollbackRequest) ProtoMessage() {}
func (*ApplicationRollbackRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{11}
return fileDescriptor_application_4bf1d189f28cefe9, []int{11}
}
func (m *ApplicationRollbackRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -777,7 +777,7 @@ func (m *ApplicationResourceRequest) Reset() { *m = ApplicationResourceR
func (m *ApplicationResourceRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationResourceRequest) ProtoMessage() {}
func (*ApplicationResourceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{12}
return fileDescriptor_application_4bf1d189f28cefe9, []int{12}
}
func (m *ApplicationResourceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -865,7 +865,7 @@ func (m *ApplicationResourceDeleteRequest) Reset() { *m = ApplicationRes
func (m *ApplicationResourceDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationResourceDeleteRequest) ProtoMessage() {}
func (*ApplicationResourceDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{13}
return fileDescriptor_application_4bf1d189f28cefe9, []int{13}
}
func (m *ApplicationResourceDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -954,7 +954,7 @@ func (m *ApplicationResourceResponse) Reset() { *m = ApplicationResource
func (m *ApplicationResourceResponse) String() string { return proto.CompactTextString(m) }
func (*ApplicationResourceResponse) ProtoMessage() {}
func (*ApplicationResourceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{14}
return fileDescriptor_application_4bf1d189f28cefe9, []int{14}
}
func (m *ApplicationResourceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1008,7 +1008,7 @@ func (m *ApplicationPodLogsQuery) Reset() { *m = ApplicationPodLogsQuery
func (m *ApplicationPodLogsQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationPodLogsQuery) ProtoMessage() {}
func (*ApplicationPodLogsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{15}
return fileDescriptor_application_4bf1d189f28cefe9, []int{15}
}
func (m *ApplicationPodLogsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1105,7 +1105,7 @@ func (m *LogEntry) Reset() { *m = LogEntry{} }
func (m *LogEntry) String() string { return proto.CompactTextString(m) }
func (*LogEntry) ProtoMessage() {}
func (*LogEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{16}
return fileDescriptor_application_4bf1d189f28cefe9, []int{16}
}
func (m *LogEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1159,7 +1159,7 @@ func (m *OperationTerminateRequest) Reset() { *m = OperationTerminateReq
func (m *OperationTerminateRequest) String() string { return proto.CompactTextString(m) }
func (*OperationTerminateRequest) ProtoMessage() {}
func (*OperationTerminateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{17}
return fileDescriptor_application_4bf1d189f28cefe9, []int{17}
}
func (m *OperationTerminateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1205,7 +1205,7 @@ func (m *OperationTerminateResponse) Reset() { *m = OperationTerminateRe
func (m *OperationTerminateResponse) String() string { return proto.CompactTextString(m) }
func (*OperationTerminateResponse) ProtoMessage() {}
func (*OperationTerminateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_application_1f706f7a3fcdb3d9, []int{18}
return fileDescriptor_application_4bf1d189f28cefe9, []int{18}
}
func (m *OperationTerminateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1962,14 +1962,12 @@ func (m *ApplicationQuery) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintApplication(dAtA, i, uint64(len(*m.Name)))
i += copy(dAtA[i:], *m.Name)
}
dAtA[i] = 0x10
i++
if m.Refresh {
dAtA[i] = 1
} else {
dAtA[i] = 0
if m.Refresh != nil {
dAtA[i] = 0x12
i++
i = encodeVarintApplication(dAtA, i, uint64(len(*m.Refresh)))
i += copy(dAtA[i:], *m.Refresh)
}
i++
if len(m.Projects) > 0 {
for _, s := range m.Projects {
dAtA[i] = 0x1a
@@ -2730,7 +2728,10 @@ func (m *ApplicationQuery) Size() (n int) {
l = len(*m.Name)
n += 1 + l + sovApplication(uint64(l))
}
n += 2
if m.Refresh != nil {
l = len(*m.Refresh)
n += 1 + l + sovApplication(uint64(l))
}
if len(m.Projects) > 0 {
for _, s := range m.Projects {
l = len(s)
@@ -3117,10 +3118,10 @@ func (m *ApplicationQuery) Unmarshal(dAtA []byte) error {
m.Name = &s
iNdEx = postIndex
case 2:
if wireType != 0 {
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
}
var v int
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApplication
@@ -3130,12 +3131,22 @@ func (m *ApplicationQuery) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Refresh = bool(v != 0)
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthApplication
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Refresh = &s
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Projects", wireType)
@@ -5846,112 +5857,112 @@ var (
)
func init() {
proto.RegisterFile("server/application/application.proto", fileDescriptor_application_1f706f7a3fcdb3d9)
proto.RegisterFile("server/application/application.proto", fileDescriptor_application_4bf1d189f28cefe9)
}
var fileDescriptor_application_1f706f7a3fcdb3d9 = []byte{
// 1635 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0xdc, 0xc6,
0x15, 0xef, 0xec, 0x4a, 0x5a, 0xed, 0x93, 0x50, 0xb8, 0x53, 0x5b, 0x65, 0x69, 0x59, 0x5a, 0xd0,
0xb2, 0xbc, 0x96, 0x2b, 0xd2, 0x12, 0x8c, 0xd6, 0x10, 0x5c, 0xb8, 0x56, 0xad, 0xca, 0x32, 0x64,
0x5b, 0x5d, 0xd9, 0x28, 0xd0, 0x4b, 0x31, 0x26, 0x47, 0x2b, 0x56, 0xbb, 0x1c, 0x76, 0xc8, 0xdd,
0x62, 0x6b, 0xf8, 0x50, 0x23, 0xc8, 0x29, 0x88, 0x11, 0x24, 0x87, 0xdc, 0x12, 0xf8, 0x9c, 0x5b,
0x6e, 0x39, 0xe4, 0x16, 0xc0, 0xa7, 0x20, 0x40, 0x7c, 0x36, 0x0c, 0x21, 0x7f, 0x43, 0xce, 0xc1,
0x0c, 0xbf, 0x86, 0xd2, 0x2e, 0xd7, 0xb2, 0x37, 0x87, 0xdc, 0xc8, 0x37, 0x6f, 0xde, 0xfb, 0xbd,
0x4f, 0xbe, 0x47, 0x58, 0x08, 0x28, 0xef, 0x52, 0x6e, 0x11, 0xdf, 0x6f, 0xb9, 0x36, 0x09, 0x5d,
0xe6, 0xa9, 0xcf, 0xa6, 0xcf, 0x59, 0xc8, 0xf0, 0x94, 0x42, 0xd2, 0x4f, 0x37, 0x59, 0x93, 0x49,
0xba, 0x25, 0x9e, 0x22, 0x16, 0x7d, 0xb6, 0xc9, 0x58, 0xb3, 0x45, 0x2d, 0xe2, 0xbb, 0x16, 0xf1,
0x3c, 0x16, 0x4a, 0xe6, 0x20, 0x3e, 0x35, 0x0e, 0xae, 0x05, 0xa6, 0xcb, 0xe4, 0xa9, 0xcd, 0x38,
0xb5, 0xba, 0x2b, 0x56, 0x93, 0x7a, 0x94, 0x93, 0x90, 0x3a, 0x31, 0xcf, 0xd5, 0x8c, 0xa7, 0x4d,
0xec, 0x7d, 0xd7, 0xa3, 0xbc, 0x67, 0xf9, 0x07, 0x4d, 0x41, 0x08, 0xac, 0x36, 0x0d, 0x49, 0xbf,
0x5b, 0x5b, 0x4d, 0x37, 0xdc, 0xef, 0x3c, 0x32, 0x6d, 0xd6, 0xb6, 0x08, 0x97, 0xc0, 0xfe, 0x2d,
0x1f, 0x96, 0x6d, 0x27, 0xbb, 0xad, 0x9a, 0xd7, 0x5d, 0x21, 0x2d, 0x7f, 0x9f, 0x1c, 0x17, 0xb5,
0x5e, 0x24, 0x8a, 0x53, 0x9f, 0xc5, 0xbe, 0x92, 0x8f, 0x6e, 0xc8, 0x78, 0x4f, 0x79, 0x8c, 0x65,
0xdc, 0x2c, 0x92, 0x61, 0x33, 0x2f, 0xe4, 0xac, 0xd5, 0xa2, 0xdc, 0x12, 0xa2, 0x5c, 0x9b, 0x06,
0xc7, 0x9d, 0x6d, 0x78, 0x70, 0xea, 0x66, 0x46, 0xfc, 0x7b, 0x87, 0xf2, 0x1e, 0xc6, 0x30, 0xe6,
0x91, 0x36, 0xd5, 0x50, 0x0d, 0xd5, 0xab, 0x0d, 0xf9, 0x8c, 0xe7, 0xa0, 0xc2, 0xe9, 0x1e, 0xa7,
0xc1, 0xbe, 0x56, 0xaa, 0xa1, 0xfa, 0xe4, 0xfa, 0xd8, 0x8b, 0x57, 0xf3, 0xbf, 0x6a, 0x24, 0x44,
0xbc, 0x08, 0x15, 0xa1, 0x9d, 0xda, 0xa1, 0x56, 0xae, 0x95, 0xeb, 0xd5, 0xf5, 0xe9, 0xc3, 0x57,
0xf3, 0x93, 0x3b, 0x11, 0x29, 0x68, 0x24, 0x87, 0xc6, 0x57, 0x08, 0xe6, 0x14, 0x85, 0x0d, 0x1a,
0xb0, 0x0e, 0xb7, 0xe9, 0x46, 0x97, 0x7a, 0x61, 0x70, 0x54, 0x7d, 0x29, 0x55, 0xbf, 0x0a, 0xbf,
0xe1, 0x31, 0xeb, 0x3d, 0xd2, 0xa6, 0x81, 0x4f, 0x6c, 0xaa, 0x95, 0x04, 0x43, 0x0c, 0xe4, 0xf8,
0x31, 0xae, 0xc3, 0xb4, 0x4a, 0xd4, 0xca, 0x0a, 0x7b, 0xee, 0x04, 0x2f, 0xc2, 0x54, 0xf2, 0xfe,
0x70, 0xeb, 0x96, 0x36, 0xa6, 0x30, 0xaa, 0x07, 0xc6, 0x0e, 0x68, 0x0a, 0xf6, 0xbb, 0xc4, 0x73,
0xf7, 0x68, 0x10, 0x0e, 0x46, 0x5d, 0x83, 0x49, 0x4e, 0xbb, 0x6e, 0xe0, 0x32, 0x4f, 0x7a, 0x2d,
0x11, 0x9a, 0x52, 0x8d, 0x33, 0xf0, 0xdb, 0xbc, 0x37, 0x7c, 0xe6, 0x05, 0xd4, 0x78, 0x8e, 0x72,
0x9a, 0xfe, 0xca, 0x29, 0x09, 0x69, 0x83, 0xfe, 0xa7, 0x43, 0x83, 0x10, 0x7b, 0xa0, 0x56, 0x88,
0x54, 0x38, 0xb5, 0xfa, 0x37, 0x33, 0xcb, 0x05, 0x33, 0xc9, 0x05, 0xf9, 0xf0, 0x2f, 0xdb, 0x31,
0xfd, 0x83, 0xa6, 0x29, 0x52, 0xd3, 0x54, 0x13, 0x20, 0x49, 0x4d, 0x53, 0xd1, 0x94, 0x58, 0xad,
0xf0, 0xe1, 0x19, 0x98, 0xe8, 0xf8, 0x01, 0xe5, 0x61, 0x14, 0xf9, 0x46, 0xfc, 0x66, 0xbc, 0x97,
0x07, 0xf9, 0xd0, 0x77, 0x14, 0x90, 0xfb, 0x3f, 0x23, 0xc8, 0x1c, 0x3c, 0xe3, 0x76, 0x0e, 0xc5,
0x2d, 0xda, 0xa2, 0x19, 0x8a, 0x7e, 0x41, 0xd1, 0xa0, 0x62, 0x93, 0xc0, 0x26, 0x0e, 0x8d, 0xed,
0x49, 0x5e, 0x8d, 0xe7, 0x65, 0x98, 0x51, 0x44, 0xed, 0xf6, 0x3c, 0xbb, 0x48, 0xd0, 0xd0, 0xe8,
0xe2, 0x59, 0x98, 0x70, 0x78, 0xaf, 0xd1, 0xf1, 0xb4, 0xb2, 0x52, 0x33, 0x31, 0x0d, 0xeb, 0x30,
0xee, 0xf3, 0x8e, 0x47, 0xb5, 0x31, 0xe5, 0x30, 0x22, 0x61, 0x1b, 0x26, 0x83, 0x50, 0xb4, 0x8b,
0x66, 0x4f, 0x1b, 0xaf, 0xa1, 0xfa, 0xd4, 0xea, 0xe6, 0x3b, 0xf8, 0x4e, 0x58, 0xb2, 0x1b, 0x8b,
0x6b, 0xa4, 0x82, 0xf1, 0x9f, 0xa1, 0xea, 0x13, 0x4e, 0xda, 0x34, 0xa4, 0x5c, 0x9b, 0x90, 0x5a,
0xe6, 0x73, 0x02, 0x76, 0x92, 0xd3, 0xfb, 0x5d, 0xca, 0xb9, 0xeb, 0xd0, 0xa0, 0x91, 0xdd, 0xc0,
0x21, 0x54, 0x93, 0xe2, 0x08, 0xb4, 0x4a, 0xad, 0x5c, 0x9f, 0x5a, 0xdd, 0x79, 0x47, 0x90, 0xf7,
0x7d, 0xd1, 0x23, 0x95, 0xbe, 0x10, 0x7b, 0x25, 0x53, 0x64, 0xdc, 0x01, 0x7c, 0x1c, 0x16, 0xbe,
0x0a, 0x55, 0x96, 0xbc, 0x68, 0x48, 0x62, 0x99, 0xe9, 0x6f, 0x4a, 0x23, 0x63, 0x34, 0x28, 0x54,
0x53, 0x3a, 0xd6, 0xd4, 0x10, 0xc7, 0x7a, 0xa3, 0x40, 0xeb, 0x30, 0xde, 0x25, 0xad, 0x0e, 0xcd,
0x45, 0x39, 0x22, 0x61, 0x03, 0xaa, 0x36, 0x6b, 0xfb, 0xcc, 0xa3, 0x5e, 0x28, 0xa3, 0x9c, 0x9c,
0x67, 0x64, 0xe3, 0x53, 0x04, 0xb3, 0xc7, 0x0a, 0x65, 0xd7, 0xa7, 0x85, 0xd9, 0xe5, 0xc0, 0x58,
0xe0, 0x53, 0x5b, 0x36, 0xb9, 0xa9, 0xd5, 0x3b, 0xa3, 0xa9, 0x1c, 0xa1, 0x34, 0x31, 0x4d, 0x48,
0x37, 0xde, 0x47, 0xa0, 0xab, 0x95, 0xc5, 0x5a, 0xad, 0x47, 0xc4, 0x3e, 0x28, 0x02, 0xa6, 0x43,
0xc9, 0x75, 0x24, 0xac, 0xf2, 0x3a, 0x08, 0x51, 0x87, 0xaf, 0xe6, 0x4b, 0x5b, 0xb7, 0x1a, 0x25,
0xd7, 0x79, 0xfb, 0x84, 0x37, 0x5e, 0x1e, 0x01, 0x12, 0xc7, 0xbb, 0x08, 0x88, 0x01, 0x55, 0xaf,
0xef, 0xb7, 0x20, 0x23, 0x9f, 0xe0, 0x1b, 0x30, 0x07, 0x95, 0x2e, 0xe5, 0xb2, 0x98, 0xd5, 0xfe,
0x9f, 0x10, 0x05, 0xf8, 0x26, 0x67, 0x1d, 0x5f, 0x1b, 0x57, 0x4e, 0x23, 0x92, 0x48, 0x9d, 0x03,
0xd7, 0x73, 0xb4, 0x09, 0x35, 0x75, 0x04, 0xc5, 0xf8, 0x11, 0x41, 0xad, 0x8f, 0x59, 0xc3, 0xbb,
0xd4, 0x2f, 0xc0, 0x38, 0x71, 0x6b, 0x8f, 0x71, 0x9b, 0x6a, 0x95, 0x34, 0x9e, 0xa8, 0x11, 0x91,
0x8c, 0x1b, 0x70, 0xb6, 0x6f, 0x38, 0xa3, 0x0f, 0x9c, 0xe8, 0x9d, 0xed, 0xf8, 0xf3, 0x99, 0x2b,
0xb8, 0x94, 0x6a, 0x7c, 0x53, 0x82, 0xdf, 0x29, 0x12, 0x76, 0x98, 0xb3, 0xcd, 0x9a, 0x05, 0x13,
0xc2, 0x9b, 0x38, 0x4c, 0x83, 0x8a, 0xcf, 0x9c, 0xcc, 0x57, 0x8d, 0xe4, 0x35, 0x2a, 0x63, 0x2f,
0x24, 0x62, 0x0e, 0xcc, 0xb9, 0x28, 0x23, 0x0b, 0x77, 0x07, 0xae, 0x67, 0xd3, 0x5d, 0x6a, 0x33,
0xcf, 0x09, 0xa4, 0xaf, 0xca, 0x89, 0xbb, 0xd5, 0x13, 0x7c, 0x1b, 0xaa, 0xf2, 0xfd, 0x81, 0xdb,
0xa6, 0x71, 0x63, 0x5d, 0x32, 0xa3, 0x81, 0xd3, 0x54, 0x07, 0xce, 0xac, 0x70, 0xc5, 0xc0, 0x69,
0x76, 0x57, 0x4c, 0x71, 0xa3, 0x91, 0x5d, 0x16, 0xb8, 0x42, 0xe2, 0xb6, 0xb6, 0x5d, 0x4f, 0xf6,
0xd8, 0x4c, 0x61, 0x46, 0x16, 0x45, 0xb7, 0xc7, 0x5a, 0x2d, 0xf6, 0x5f, 0x6d, 0xb2, 0x56, 0xca,
0x8a, 0x2e, 0xa2, 0x19, 0xff, 0x83, 0xc9, 0x6d, 0xd6, 0xdc, 0xf0, 0x42, 0xde, 0x13, 0x69, 0x20,
0xcc, 0x11, 0xad, 0x4a, 0x75, 0x7a, 0x42, 0xc4, 0xf7, 0xa0, 0x1a, 0xba, 0x6d, 0xba, 0x1b, 0x92,
0xb6, 0x1f, 0x37, 0x9e, 0x13, 0xe0, 0x4e, 0x91, 0x25, 0x22, 0x0c, 0x0b, 0x7e, 0x9f, 0x76, 0xf4,
0x07, 0x94, 0xb7, 0x5d, 0x8f, 0x14, 0x66, 0xbd, 0x31, 0x0b, 0x7a, 0xbf, 0x0b, 0x51, 0xd2, 0xac,
0x7e, 0x7b, 0x06, 0xb0, 0xda, 0xcc, 0xa2, 0xa9, 0x16, 0x3f, 0x43, 0x30, 0xb6, 0xed, 0x06, 0x21,
0x3e, 0x97, 0xeb, 0x7f, 0x47, 0xc7, 0x5a, 0x7d, 0x44, 0x3d, 0x54, 0xa8, 0x32, 0x66, 0x9f, 0x7e,
0xff, 0xc3, 0xc7, 0xa5, 0x19, 0x7c, 0x5a, 0x2e, 0x19, 0xdd, 0x15, 0x75, 0xb2, 0x0e, 0xf0, 0x07,
0x08, 0xb0, 0x60, 0xcb, 0x4f, 0xb7, 0xf8, 0xf2, 0x20, 0x7c, 0x7d, 0xa6, 0x60, 0xfd, 0x9c, 0xe2,
0x78, 0x53, 0x6c, 0x31, 0xc2, 0xcd, 0x92, 0x41, 0x02, 0x58, 0x92, 0x00, 0x16, 0xb0, 0xd1, 0x0f,
0x80, 0xf5, 0x58, 0x78, 0xf3, 0x89, 0x45, 0x23, 0xbd, 0x9f, 0x21, 0x18, 0xff, 0x07, 0x09, 0xed,
0xfd, 0x61, 0x1e, 0xda, 0x19, 0x8d, 0x87, 0xa4, 0x2e, 0x09, 0xd5, 0x38, 0x2f, 0x61, 0x9e, 0xc3,
0x67, 0x13, 0x98, 0x41, 0xc8, 0x29, 0x69, 0xe7, 0xd0, 0x5e, 0x41, 0xf8, 0x39, 0x82, 0x89, 0x68,
0xc8, 0xc5, 0x17, 0x06, 0x41, 0xcc, 0x0d, 0xc1, 0xfa, 0x88, 0x46, 0x49, 0xe3, 0x92, 0x04, 0x78,
0xde, 0xe8, 0x1b, 0xc8, 0xb5, 0xdc, 0x1c, 0xfc, 0x11, 0x82, 0xf2, 0x26, 0x1d, 0x9a, 0x66, 0xa3,
0x42, 0x76, 0xcc, 0x75, 0x7d, 0x22, 0x8c, 0x9f, 0x22, 0x98, 0xde, 0xa4, 0x61, 0xb2, 0x8a, 0x04,
0x83, 0xdd, 0x97, 0xdb, 0x56, 0xf4, 0x59, 0x53, 0x59, 0x26, 0x93, 0xa3, 0x74, 0xfd, 0x58, 0x96,
0xaa, 0x2f, 0xe2, 0x0b, 0x45, 0xc9, 0xd5, 0x4e, 0x75, 0x7e, 0x8d, 0x60, 0x22, 0x1a, 0x6a, 0x06,
0xab, 0xcf, 0x6d, 0x07, 0x23, 0xf3, 0xd1, 0x86, 0x04, 0x7a, 0x43, 0xbf, 0xd2, 0x1f, 0xa8, 0x7a,
0x5f, 0x74, 0x2a, 0x87, 0x84, 0xc4, 0x94, 0xe8, 0xf3, 0x91, 0xfd, 0x12, 0x01, 0x64, 0x53, 0x19,
0xbe, 0x54, 0x6c, 0x84, 0x32, 0xb9, 0xe9, 0x23, 0x9c, 0xcb, 0x0c, 0x53, 0x1a, 0x53, 0xd7, 0x6b,
0x45, 0x5e, 0x17, 0x53, 0xdb, 0x9a, 0x9c, 0xdd, 0x70, 0x17, 0x26, 0xa2, 0x39, 0x62, 0xb0, 0xd7,
0x73, 0x73, 0x86, 0x5e, 0x2b, 0xe8, 0x3f, 0x51, 0xe0, 0xe3, 0x9c, 0x5b, 0x2a, 0xcc, 0xb9, 0xcf,
0x11, 0x8c, 0x89, 0x61, 0x1d, 0x9f, 0x1f, 0x24, 0x4f, 0xd9, 0x9c, 0x46, 0x16, 0xea, 0xcb, 0x12,
0xda, 0x05, 0xa3, 0xd8, 0x3b, 0x3d, 0xcf, 0x5e, 0x43, 0x4b, 0xf8, 0x35, 0x82, 0x53, 0x77, 0x89,
0x47, 0x9a, 0xd4, 0x49, 0x7a, 0x6b, 0x80, 0x6f, 0x14, 0x22, 0xc9, 0x7e, 0x97, 0x98, 0xc9, 0xef,
0x12, 0x33, 0xbd, 0x1b, 0x15, 0xcd, 0xd6, 0x89, 0x05, 0x1c, 0xc5, 0x90, 0x3a, 0xfa, 0x2f, 0xd2,
0x9a, 0x35, 0x7c, 0x6d, 0x68, 0xe2, 0xde, 0x4b, 0x8a, 0x4d, 0x08, 0x5a, 0x4e, 0xf7, 0x20, 0xfc,
0x12, 0xc1, 0x74, 0x22, 0xf7, 0x01, 0xa7, 0xf4, 0xdd, 0xcd, 0xdb, 0x78, 0x6b, 0x01, 0x42, 0x7f,
0x6a, 0xda, 0x75, 0x69, 0xda, 0x1f, 0xf1, 0xd5, 0x37, 0x34, 0x2d, 0x31, 0x69, 0x39, 0x14, 0x56,
0x7c, 0x81, 0x60, 0x32, 0xd9, 0x42, 0xf0, 0xc5, 0x81, 0x09, 0x9b, 0xdf, 0x53, 0x46, 0x96, 0x64,
0x96, 0xc4, 0x7e, 0xc9, 0x58, 0x28, 0x4a, 0x32, 0x1e, 0x2b, 0x17, 0x89, 0xf6, 0x09, 0x02, 0x9c,
0x0e, 0x2a, 0xe9, 0xe8, 0x82, 0x17, 0x73, 0xaa, 0x06, 0xce, 0x40, 0xfa, 0xc5, 0xa1, 0x7c, 0xf9,
0x8e, 0xbc, 0x54, 0xd8, 0x91, 0x59, 0xaa, 0xff, 0x43, 0x04, 0x53, 0x9b, 0x34, 0x9d, 0x3f, 0x0a,
0x1c, 0x99, 0xdf, 0xb3, 0xf4, 0xfa, 0x70, 0xc6, 0x18, 0xd1, 0x1f, 0x24, 0xa2, 0x45, 0x5c, 0xec,
0xaa, 0x04, 0xc0, 0x33, 0x04, 0xbf, 0x4e, 0x9a, 0x51, 0x4c, 0x5a, 0x1e, 0xa6, 0xea, 0xa4, 0xcd,
0x2b, 0x46, 0xb4, 0xf4, 0x66, 0x88, 0xfe, 0x8f, 0xa0, 0x12, 0x2f, 0x15, 0x78, 0x61, 0x90, 0x6c,
0x75, 0xeb, 0xd0, 0xcf, 0xe4, 0xb8, 0x92, 0xa1, 0xda, 0xf8, 0x93, 0x54, 0xbb, 0x82, 0xad, 0x22,
0xb5, 0x3e, 0x73, 0x02, 0xeb, 0x71, 0xbc, 0x6d, 0x3c, 0xb1, 0x5a, 0xac, 0x19, 0x5c, 0x41, 0xeb,
0xd7, 0x5f, 0x1c, 0xce, 0xa1, 0xef, 0x0e, 0xe7, 0xd0, 0xeb, 0xc3, 0x39, 0xf4, 0x4f, 0xb3, 0xe8,
0x6f, 0xee, 0xf1, 0x3f, 0xe7, 0x3f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x69, 0x57, 0x6d, 0xdd, 0x4e,
0x17, 0x00, 0x00,
var fileDescriptor_application_4bf1d189f28cefe9 = []byte{
// 1634 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0x14, 0x47,
0x16, 0xdf, 0x9a, 0xb1, 0x3d, 0x9e, 0x67, 0x6b, 0xc5, 0xd6, 0x82, 0xb7, 0xb7, 0x31, 0xf6, 0xa8,
0x31, 0xc6, 0x98, 0x75, 0x37, 0xb6, 0xd0, 0x2e, 0xb2, 0x58, 0xb1, 0x78, 0xf1, 0x1a, 0x23, 0x03,
0xde, 0x31, 0x28, 0x52, 0x2e, 0x51, 0xd1, 0x5d, 0x1e, 0x77, 0x3c, 0xd3, 0xd5, 0xa9, 0xee, 0x99,
0x68, 0x82, 0x38, 0x04, 0x45, 0x39, 0x45, 0x41, 0x51, 0x72, 0xc8, 0x2d, 0x11, 0xe7, 0xdc, 0x72,
0xcb, 0x21, 0xb7, 0x48, 0x9c, 0xa2, 0x48, 0xe1, 0x8c, 0x90, 0x95, 0xbf, 0x21, 0xe7, 0xa8, 0xaa,
0xbf, 0xaa, 0xed, 0x99, 0x1e, 0x0c, 0x93, 0x43, 0x6e, 0x55, 0xaf, 0x5e, 0xbd, 0xf7, 0x7b, 0x1f,
0xf5, 0xfa, 0xbd, 0x86, 0xb9, 0x80, 0xf2, 0x0e, 0xe5, 0x16, 0xf1, 0xfd, 0xa6, 0x6b, 0x93, 0xd0,
0x65, 0x9e, 0xba, 0x36, 0x7d, 0xce, 0x42, 0x86, 0x27, 0x14, 0x92, 0x7e, 0xb2, 0xc1, 0x1a, 0x4c,
0xd2, 0x2d, 0xb1, 0x8a, 0x58, 0xf4, 0xe9, 0x06, 0x63, 0x8d, 0x26, 0xb5, 0x88, 0xef, 0x5a, 0xc4,
0xf3, 0x58, 0x28, 0x99, 0x83, 0xf8, 0xd4, 0xd8, 0xbf, 0x12, 0x98, 0x2e, 0x93, 0xa7, 0x36, 0xe3,
0xd4, 0xea, 0x2c, 0x5b, 0x0d, 0xea, 0x51, 0x4e, 0x42, 0xea, 0xc4, 0x3c, 0x97, 0x33, 0x9e, 0x16,
0xb1, 0xf7, 0x5c, 0x8f, 0xf2, 0xae, 0xe5, 0xef, 0x37, 0x04, 0x21, 0xb0, 0x5a, 0x34, 0x24, 0xbd,
0x6e, 0x6d, 0x36, 0xdc, 0x70, 0xaf, 0xfd, 0xc0, 0xb4, 0x59, 0xcb, 0x22, 0x5c, 0x02, 0x7b, 0x57,
0x2e, 0x96, 0x6c, 0x27, 0xbb, 0xad, 0x9a, 0xd7, 0x59, 0x26, 0x4d, 0x7f, 0x8f, 0x1c, 0x15, 0xb5,
0x56, 0x24, 0x8a, 0x53, 0x9f, 0xc5, 0xbe, 0x92, 0x4b, 0x37, 0x64, 0xbc, 0xab, 0x2c, 0x63, 0x19,
0xd7, 0x8b, 0x64, 0xd8, 0xcc, 0x0b, 0x39, 0x6b, 0x36, 0x29, 0xb7, 0x84, 0x28, 0xd7, 0xa6, 0xc1,
0x51, 0x67, 0x1b, 0x7b, 0x70, 0xe2, 0x7a, 0x46, 0xfc, 0x7f, 0x9b, 0xf2, 0x2e, 0xc6, 0x30, 0xe2,
0x91, 0x16, 0xd5, 0x50, 0x0d, 0x2d, 0x54, 0xeb, 0x72, 0x8d, 0x35, 0xa8, 0x70, 0xba, 0xcb, 0x69,
0xb0, 0xa7, 0x95, 0x24, 0x39, 0xd9, 0xe2, 0x79, 0xa8, 0x08, 0xbd, 0xd4, 0x0e, 0xb5, 0x72, 0xad,
0xbc, 0x50, 0x5d, 0x9b, 0x3c, 0x78, 0x31, 0x3b, 0xbe, 0x1d, 0x91, 0x82, 0x7a, 0x72, 0x68, 0x7c,
0x87, 0x60, 0x46, 0x51, 0x55, 0xa7, 0x01, 0x6b, 0x73, 0x9b, 0xae, 0x77, 0xa8, 0x17, 0x06, 0x87,
0x15, 0x97, 0x52, 0xc5, 0x2b, 0xf0, 0x17, 0x1e, 0xb3, 0xde, 0x21, 0x2d, 0x1a, 0xf8, 0xc4, 0xa6,
0x5a, 0x49, 0x30, 0xac, 0x8d, 0x3c, 0x7b, 0x31, 0xfb, 0xa7, 0xfa, 0xd1, 0x63, 0xbc, 0x00, 0x93,
0x2a, 0x51, 0x2b, 0x2b, 0xec, 0xb9, 0x13, 0x3c, 0x0f, 0x13, 0xc9, 0xfe, 0xfe, 0xe6, 0x0d, 0x6d,
0x44, 0x61, 0x54, 0x0f, 0x8c, 0x6d, 0xd0, 0x14, 0xec, 0xb7, 0x89, 0xe7, 0xee, 0xd2, 0x20, 0xec,
0x8f, 0xba, 0x06, 0xe3, 0x9c, 0x76, 0xdc, 0xc0, 0x65, 0x5e, 0xe4, 0xaf, 0x58, 0x68, 0x4a, 0x35,
0x4e, 0xc1, 0x5f, 0xf3, 0xde, 0xf0, 0x99, 0x17, 0x50, 0xe3, 0x29, 0xca, 0x69, 0xfa, 0x2f, 0xa7,
0x24, 0xa4, 0x75, 0xfa, 0x5e, 0x9b, 0x06, 0x21, 0xf6, 0x40, 0x7d, 0x1b, 0x52, 0xe1, 0xc4, 0xca,
0xff, 0xcc, 0x2c, 0x0b, 0xcc, 0x24, 0x0b, 0xe4, 0xe2, 0x1d, 0xdb, 0x31, 0xfd, 0xfd, 0x86, 0x29,
0x92, 0xd2, 0x54, 0x43, 0x9f, 0x24, 0xa5, 0xa9, 0x68, 0x4a, 0xac, 0x56, 0xf8, 0xf0, 0x14, 0x8c,
0xb5, 0xfd, 0x80, 0xf2, 0x50, 0xda, 0x30, 0x5e, 0x8f, 0x77, 0xc6, 0x47, 0x79, 0x90, 0xf7, 0x7d,
0x47, 0x01, 0xb9, 0xf7, 0x3b, 0x82, 0xcc, 0xc1, 0x33, 0x6e, 0xe6, 0x50, 0xdc, 0xa0, 0x4d, 0x9a,
0xa1, 0xe8, 0x15, 0x14, 0x0d, 0x2a, 0x36, 0x09, 0x6c, 0xe2, 0xd0, 0xd8, 0x9e, 0x64, 0x6b, 0x3c,
0x2d, 0xc3, 0x94, 0x22, 0x6a, 0xa7, 0xeb, 0xd9, 0x45, 0x82, 0x06, 0x46, 0x17, 0x4f, 0xc3, 0x98,
0xc3, 0xbb, 0xf5, 0xb6, 0xa7, 0x95, 0x85, 0xa6, 0xf8, 0x3c, 0xa6, 0x61, 0x1d, 0x46, 0x7d, 0xde,
0xf6, 0xa8, 0x36, 0xa2, 0x1c, 0x46, 0x24, 0x6c, 0xc3, 0x78, 0x10, 0x8a, 0x42, 0xd1, 0xe8, 0x6a,
0xa3, 0x35, 0xb4, 0x30, 0xb1, 0xb2, 0xf1, 0x06, 0xbe, 0x13, 0x96, 0xec, 0xc4, 0xe2, 0xea, 0xa9,
0x60, 0xfc, 0x6f, 0xa8, 0xfa, 0x84, 0x93, 0x16, 0x0d, 0x29, 0xd7, 0xc6, 0xa4, 0x96, 0xd9, 0x9c,
0x80, 0xed, 0xe4, 0xf4, 0x6e, 0x87, 0x72, 0xee, 0x3a, 0x34, 0xa8, 0x67, 0x37, 0x70, 0x08, 0xd5,
0xe4, 0x71, 0x04, 0x5a, 0xa5, 0x56, 0x5e, 0x98, 0x58, 0xd9, 0x7e, 0x43, 0x90, 0x77, 0x7d, 0x51,
0x1d, 0x95, 0xba, 0x10, 0x7b, 0x25, 0x53, 0x64, 0xdc, 0x02, 0x7c, 0x14, 0x16, 0xbe, 0x0c, 0x55,
0x96, 0x6c, 0x34, 0x24, 0xb1, 0x4c, 0xf5, 0x36, 0xa5, 0x9e, 0x31, 0x1a, 0x14, 0xaa, 0x29, 0x1d,
0x6b, 0x6a, 0x88, 0x63, 0xbd, 0x51, 0xa0, 0x75, 0x18, 0xed, 0x90, 0x66, 0x9b, 0xe6, 0xa2, 0x1c,
0x91, 0xb0, 0x01, 0x55, 0x9b, 0xb5, 0x7c, 0xe6, 0x51, 0x2f, 0x94, 0x51, 0x4e, 0xce, 0x33, 0xb2,
0xf1, 0x25, 0x82, 0xe9, 0x23, 0x0f, 0x65, 0xc7, 0xa7, 0x85, 0xd9, 0xe5, 0xc0, 0x48, 0xe0, 0x53,
0x5b, 0x16, 0xb9, 0x89, 0x95, 0x5b, 0xc3, 0x79, 0x39, 0x42, 0x69, 0x62, 0x9a, 0x90, 0x6e, 0x7c,
0x8c, 0x40, 0x57, 0x5f, 0x16, 0x6b, 0x36, 0x1f, 0x10, 0x7b, 0xbf, 0x08, 0x98, 0x0e, 0x25, 0xd7,
0x91, 0xb0, 0xca, 0x6b, 0x20, 0x44, 0x1d, 0xbc, 0x98, 0x2d, 0x6d, 0xde, 0xa8, 0x97, 0x5c, 0xe7,
0xf5, 0x13, 0xde, 0x78, 0x7e, 0x08, 0x48, 0x1c, 0xef, 0x22, 0x20, 0x06, 0x54, 0xbd, 0x9e, 0xdf,
0x82, 0x8c, 0x7c, 0x8c, 0x6f, 0xc0, 0x0c, 0x54, 0x3a, 0x94, 0xcb, 0xc7, 0xac, 0xd6, 0xff, 0x84,
0x28, 0xc0, 0x37, 0x38, 0x6b, 0xfb, 0xda, 0xa8, 0x72, 0x1a, 0x91, 0x44, 0xea, 0xec, 0xbb, 0x9e,
0xa3, 0x8d, 0xa9, 0xa9, 0x23, 0x28, 0xc6, 0xaf, 0x08, 0x6a, 0x3d, 0xcc, 0x1a, 0x5c, 0xa5, 0xfe,
0x00, 0xc6, 0x89, 0x5b, 0xbb, 0x8c, 0xdb, 0x54, 0xab, 0xa4, 0xf1, 0x44, 0xf5, 0x88, 0x64, 0x5c,
0x83, 0xd3, 0x3d, 0xc3, 0x19, 0x7d, 0xe0, 0x44, 0xed, 0x6c, 0xc5, 0x9f, 0xcf, 0xdc, 0x83, 0x4b,
0xa9, 0xc6, 0x0f, 0x25, 0xf8, 0x9b, 0x22, 0x61, 0x9b, 0x39, 0x5b, 0xac, 0x51, 0xd0, 0x21, 0xbc,
0x8a, 0xc3, 0x34, 0xa8, 0xf8, 0xcc, 0xc9, 0x7c, 0x55, 0x4f, 0xb6, 0xd1, 0x33, 0xf6, 0x42, 0x22,
0x3a, 0xc0, 0x9c, 0x8b, 0x32, 0xb2, 0x70, 0x77, 0xe0, 0x7a, 0x36, 0xdd, 0xa1, 0x36, 0xf3, 0x9c,
0x40, 0xfa, 0xaa, 0x9c, 0xb8, 0x5b, 0x3d, 0xc1, 0x37, 0xa1, 0x2a, 0xf7, 0xf7, 0xdc, 0x16, 0x8d,
0x0b, 0xeb, 0xa2, 0x19, 0xb5, 0x9a, 0xa6, 0xda, 0x6a, 0x66, 0x0f, 0x57, 0xb4, 0x9a, 0x66, 0x67,
0xd9, 0x14, 0x37, 0xea, 0xd9, 0x65, 0x81, 0x2b, 0x24, 0x6e, 0x73, 0xcb, 0xf5, 0x64, 0x8d, 0xcd,
0x14, 0x66, 0x64, 0xf1, 0xe8, 0x76, 0x59, 0xb3, 0xc9, 0xde, 0xd7, 0xc6, 0x6b, 0xa5, 0xec, 0xd1,
0x45, 0x34, 0xe3, 0x03, 0x18, 0xdf, 0x62, 0x8d, 0x75, 0x2f, 0xe4, 0x5d, 0x91, 0x06, 0xc2, 0x1c,
0x51, 0xaa, 0x54, 0xa7, 0x27, 0x44, 0x7c, 0x07, 0xaa, 0xa1, 0xdb, 0xa2, 0x3b, 0x21, 0x69, 0xf9,
0x71, 0xe1, 0x39, 0x06, 0xee, 0x14, 0x59, 0x22, 0xc2, 0xb0, 0xe0, 0xef, 0x69, 0x45, 0xbf, 0x47,
0x79, 0xcb, 0xf5, 0x48, 0x61, 0xd6, 0x1b, 0xd3, 0xa0, 0xf7, 0xba, 0x10, 0x25, 0xcd, 0xca, 0x8f,
0xa7, 0x00, 0xab, 0xc5, 0x2c, 0xea, 0x67, 0xf1, 0x13, 0x04, 0x23, 0x5b, 0x6e, 0x10, 0xe2, 0x33,
0xb9, 0xfa, 0x77, 0xb8, 0xa1, 0xd5, 0x87, 0x54, 0x43, 0x85, 0x2a, 0x63, 0xfa, 0xf1, 0xcf, 0xbf,
0x7c, 0x5e, 0x9a, 0xc2, 0x27, 0xe5, 0x78, 0xd1, 0x59, 0x56, 0x7b, 0xea, 0x00, 0x7f, 0x82, 0x00,
0x0b, 0xb6, 0x7c, 0x77, 0x8b, 0x2f, 0xf6, 0xc3, 0xd7, 0xa3, 0x0b, 0xd6, 0xcf, 0x28, 0x8e, 0x37,
0xc5, 0xfc, 0x22, 0xdc, 0x2c, 0x19, 0x24, 0x80, 0x45, 0x09, 0x60, 0x0e, 0x1b, 0xbd, 0x00, 0x58,
0x0f, 0x85, 0x37, 0x1f, 0x59, 0x34, 0xd2, 0xfb, 0x15, 0x82, 0xd1, 0xb7, 0x48, 0x68, 0xef, 0x0d,
0xf2, 0xd0, 0xf6, 0x70, 0x3c, 0x24, 0x75, 0x49, 0xa8, 0xc6, 0x59, 0x09, 0xf3, 0x0c, 0x3e, 0x9d,
0xc0, 0x0c, 0x42, 0x4e, 0x49, 0x2b, 0x87, 0xf6, 0x12, 0xc2, 0x4f, 0x11, 0x8c, 0x45, 0x4d, 0x2e,
0x3e, 0xd7, 0x0f, 0x62, 0xae, 0x09, 0xd6, 0x87, 0xd4, 0x4a, 0x1a, 0x17, 0x24, 0xc0, 0xb3, 0x46,
0xcf, 0x40, 0xae, 0xe6, 0xfa, 0xe0, 0xcf, 0x10, 0x94, 0x37, 0xe8, 0xc0, 0x34, 0x1b, 0x16, 0xb2,
0x23, 0xae, 0xeb, 0x11, 0x61, 0xfc, 0x18, 0xc1, 0xe4, 0x06, 0x0d, 0x93, 0x51, 0x24, 0xe8, 0xef,
0xbe, 0xdc, 0xb4, 0xa2, 0x4f, 0x9b, 0xca, 0x18, 0x99, 0x1c, 0xa5, 0xe3, 0xc7, 0x92, 0x54, 0x7d,
0x1e, 0x9f, 0x2b, 0x4a, 0xae, 0x56, 0xaa, 0xf3, 0x7b, 0x04, 0x63, 0x51, 0x53, 0xd3, 0x5f, 0x7d,
0x6e, 0x3a, 0x18, 0x9a, 0x8f, 0xd6, 0x25, 0xd0, 0x6b, 0xfa, 0xa5, 0xde, 0x40, 0xd5, 0xfb, 0xa2,
0x52, 0x39, 0x24, 0x24, 0xa6, 0x44, 0x9f, 0x8f, 0xec, 0xb7, 0x08, 0x20, 0xeb, 0xca, 0xf0, 0x85,
0x62, 0x23, 0x94, 0xce, 0x4d, 0x1f, 0x62, 0x5f, 0x66, 0x98, 0xd2, 0x98, 0x05, 0xbd, 0x56, 0xe4,
0x75, 0xd1, 0xb5, 0xad, 0xca, 0xde, 0x0d, 0x77, 0x60, 0x2c, 0xea, 0x23, 0xfa, 0x7b, 0x3d, 0xd7,
0x67, 0xe8, 0xb5, 0x82, 0xfa, 0x13, 0x05, 0x3e, 0xce, 0xb9, 0xc5, 0xc2, 0x9c, 0xfb, 0x1a, 0xc1,
0x88, 0x68, 0xd6, 0xf1, 0xd9, 0x7e, 0xf2, 0x94, 0xc9, 0x69, 0x68, 0xa1, 0xbe, 0x28, 0xa1, 0x9d,
0x33, 0x8a, 0xbd, 0xd3, 0xf5, 0xec, 0x55, 0xb4, 0x88, 0x5f, 0x22, 0x38, 0x71, 0x9b, 0x78, 0xa4,
0x41, 0x9d, 0xa4, 0xb6, 0x06, 0xf8, 0x5a, 0x21, 0x92, 0xec, 0x47, 0x89, 0x99, 0xfc, 0x28, 0x31,
0xd3, 0xbb, 0xd1, 0xa3, 0xd9, 0x3c, 0xb6, 0x80, 0xc3, 0x18, 0x52, 0x47, 0xff, 0x47, 0x5a, 0xb3,
0x8a, 0xaf, 0x0c, 0x4c, 0xdc, 0x3b, 0xc9, 0x63, 0x13, 0x82, 0x96, 0xd2, 0x39, 0x08, 0x3f, 0x47,
0x30, 0x99, 0xc8, 0xbd, 0xc7, 0x29, 0x7d, 0x73, 0xf3, 0xd6, 0x5f, 0x5b, 0x80, 0xd0, 0x9f, 0x9a,
0x76, 0x55, 0x9a, 0xf6, 0x4f, 0x7c, 0xf9, 0x15, 0x4d, 0x4b, 0x4c, 0x5a, 0x0a, 0x85, 0x15, 0xdf,
0x20, 0x18, 0x4f, 0xa6, 0x10, 0x7c, 0xbe, 0x6f, 0xc2, 0xe6, 0xe7, 0x94, 0xa1, 0x25, 0x99, 0x25,
0xb1, 0x5f, 0x30, 0xe6, 0x8a, 0x92, 0x8c, 0xc7, 0xca, 0x45, 0xa2, 0x7d, 0x81, 0x00, 0xa7, 0x8d,
0x4a, 0xda, 0xba, 0xe0, 0xf9, 0x9c, 0xaa, 0xbe, 0x3d, 0x90, 0x7e, 0x7e, 0x20, 0x5f, 0xbe, 0x22,
0x2f, 0x16, 0x56, 0x64, 0x96, 0xea, 0xff, 0x14, 0xc1, 0xc4, 0x06, 0x4d, 0xfb, 0x8f, 0x02, 0x47,
0xe6, 0xe7, 0x2c, 0x7d, 0x61, 0x30, 0x63, 0x8c, 0xe8, 0x1f, 0x12, 0xd1, 0x3c, 0x2e, 0x76, 0x55,
0x02, 0xe0, 0x09, 0x82, 0x3f, 0x27, 0xc5, 0x28, 0x26, 0x2d, 0x0d, 0x52, 0x75, 0xdc, 0xe2, 0x15,
0x23, 0x5a, 0x7c, 0x35, 0x44, 0x1f, 0x22, 0xa8, 0xc4, 0x43, 0x05, 0x9e, 0xeb, 0x27, 0x5b, 0x9d,
0x3a, 0xf4, 0x53, 0x39, 0xae, 0xa4, 0xa9, 0x36, 0xfe, 0x25, 0xd5, 0x2e, 0x63, 0xab, 0x48, 0xad,
0xcf, 0x9c, 0xc0, 0x7a, 0x18, 0x4f, 0x1b, 0x8f, 0xac, 0x26, 0x6b, 0x04, 0x97, 0xd0, 0xda, 0xd5,
0x67, 0x07, 0x33, 0xe8, 0xa7, 0x83, 0x19, 0xf4, 0xf2, 0x60, 0x06, 0xbd, 0x6d, 0x16, 0xfd, 0xc7,
0x3d, 0xfa, 0xcf, 0xfc, 0xb7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x45, 0x31, 0x7c, 0x48, 0x17,
0x00, 0x00,
}

View File

@@ -18,7 +18,7 @@ import "github.com/argoproj/argo-cd/controller/services/application.proto";
// ApplicationQuery is a query for application resources
message ApplicationQuery {
optional string name = 1;
optional bool refresh = 2 [(gogoproto.nullable) = false];
optional string refresh = 2;
repeated string project = 3 [(gogoproto.customname) = "Projects"];
}

View File

@@ -5,10 +5,11 @@ import (
"testing"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@@ -18,12 +19,15 @@ import (
"github.com/argoproj/argo-cd/errors"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
apps "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
appinformer "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
mockrepo "github.com/argoproj/argo-cd/reposerver/mocks"
"github.com/argoproj/argo-cd/reposerver/repository"
mockreposerver "github.com/argoproj/argo-cd/reposerver/repository/mocks"
"github.com/argoproj/argo-cd/server/rbacpolicy"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/rbac"
"github.com/argoproj/argo-cd/util/settings"
@@ -94,10 +98,7 @@ func newTestAppServer(objects ...runtime.Object) *Server {
"server.secretkey": []byte("test"),
},
})
enforcer := rbac.NewEnforcer(kubeclientset, testNamespace, common.ArgoCDRBACConfigMapName, nil)
enforcer.SetBuiltinPolicy(test.BuiltinPolicy)
enforcer.SetDefaultRole("role:admin")
db := db.NewDB(testNamespace, settings.NewSettingsManager(kubeclientset, testNamespace), kubeclientset)
db := db.NewDB(testNamespace, settings.NewSettingsManager(context.Background(), kubeclientset, testNamespace), kubeclientset)
ctx := context.Background()
_, err := db.CreateRepository(ctx, fakeRepo())
errors.CheckError(err)
@@ -119,18 +120,37 @@ func newTestAppServer(objects ...runtime.Object) *Server {
Destinations: []appsv1.ApplicationDestination{{Server: "*", Namespace: "*"}},
},
}
myProj := &appsv1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "my-proj", Namespace: "default"},
Spec: appsv1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []appsv1.ApplicationDestination{{Server: "*", Namespace: "*"}},
},
}
objects = append(objects, defaultProj, myProj)
fakeAppsClientset := apps.NewSimpleClientset(objects...)
factory := appinformer.NewFilteredSharedInformerFactory(fakeAppsClientset, 0, "", func(options *metav1.ListOptions) {})
fakeProjLister := factory.Argoproj().V1alpha1().AppProjects().Lister().AppProjects(testNamespace)
enforcer := rbac.NewEnforcer(kubeclientset, testNamespace, common.ArgoCDRBACConfigMapName, nil)
enforcer.SetBuiltinPolicy(test.BuiltinPolicy)
enforcer.SetDefaultRole("role:admin")
enforcer.SetClaimsEnforcerFunc(rbacpolicy.NewRBACPolicyEnforcer(enforcer, fakeProjLister).EnforceClaims)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeclientset, testNamespace)
objects = append(objects, defaultProj)
server := NewServer(
testNamespace,
kubeclientset,
apps.NewSimpleClientset(objects...),
fakeAppsClientset,
mockRepoClient,
nil,
kube.KubectlCmd{},
db,
enforcer,
util.NewKeyLock(),
settingsMgr,
)
return server.(*Server)
}
@@ -280,3 +300,77 @@ func TestSyncAndTerminate(t *testing.T) {
assert.NotNil(t, app)
assert.Equal(t, appsv1.OperationTerminating, app.Status.OperationState.Phase)
}
func TestRollbackApp(t *testing.T) {
testApp := newTestApp()
testApp.Status.History = []appsv1.RevisionHistory{{
ID: 1,
Revision: "abc",
}}
appServer := newTestAppServer(testApp)
updatedApp, err := appServer.Rollback(context.Background(), &ApplicationRollbackRequest{
Name: &testApp.Name,
ID: 1,
})
assert.Nil(t, err)
assert.NotNil(t, updatedApp.Operation)
assert.NotNil(t, updatedApp.Operation.Sync)
assert.NotNil(t, updatedApp.Operation.Sync.ParameterOverrides)
assert.Len(t, updatedApp.Operation.Sync.ParameterOverrides, 0)
assert.Equal(t, "abc", updatedApp.Operation.Sync.Revision)
}
func TestUpdateAppProject(t *testing.T) {
testApp := newTestApp()
ctx := context.Background()
ctx = context.WithValue(ctx, "claims", &jwt.StandardClaims{Subject: "admin"})
appServer := newTestAppServer(testApp)
appServer.enf.SetDefaultRole("")
// Verify normal update works (without changing project)
appServer.enf.SetBuiltinPolicy(`p, admin, applications, update, default/test-app, allow`)
_, err := appServer.Update(ctx, &ApplicationUpdateRequest{Application: testApp})
assert.NoError(t, err)
// Verify caller cannot update to another project
testApp.Spec.Project = "my-proj"
_, err = appServer.Update(ctx, &ApplicationUpdateRequest{Application: testApp})
assert.Equal(t, err, grpc.ErrPermissionDenied)
// Verify inability to change projects without create privileges in new project
appServer.enf.SetBuiltinPolicy(`
p, admin, applications, update, default/test-app, allow
p, admin, applications, update, my-proj/test-app, allow
`)
_, err = appServer.Update(ctx, &ApplicationUpdateRequest{Application: testApp})
assert.Equal(t, err, grpc.ErrPermissionDenied)
// Verify inability to change projects without update privileges in new project
appServer.enf.SetBuiltinPolicy(`
p, admin, applications, update, default/test-app, allow
p, admin, applications, create, my-proj/test-app, allow
`)
_, err = appServer.Update(ctx, &ApplicationUpdateRequest{Application: testApp})
assert.Equal(t, err, grpc.ErrPermissionDenied)
// Verify inability to change projects without update privileges in old project
appServer.enf.SetBuiltinPolicy(`
p, admin, applications, create, my-proj/test-app, allow
p, admin, applications, update, my-proj/test-app, allow
`)
_, err = appServer.Update(ctx, &ApplicationUpdateRequest{Application: testApp})
assert.Equal(t, err, grpc.ErrPermissionDenied)
// Verify can update project with proper permissions
appServer.enf.SetBuiltinPolicy(`
p, admin, applications, update, default/test-app, allow
p, admin, applications, create, my-proj/test-app, allow
p, admin, applications, update, my-proj/test-app, allow
`)
updatedApp, err := appServer.Update(ctx, &ApplicationUpdateRequest{Application: testApp})
assert.NoError(t, err)
assert.Equal(t, "my-proj", updatedApp.Spec.Project)
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/argoproj/argo-cd/common"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/rbacpolicy"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/grpc"
@@ -55,7 +56,9 @@ func (s *Server) getConnectionState(ctx context.Context, cluster appv1.Cluster)
ModifiedAt: &now,
}
kubeClientset, err := kubernetes.NewForConfig(cluster.RESTConfig())
config := cluster.RESTConfig()
config.Timeout = time.Second
kubeClientset, err := kubernetes.NewForConfig(config)
if err == nil {
_, err = kubeClientset.Discovery().ServerVersion()
}
@@ -77,18 +80,29 @@ func (s *Server) getConnectionState(ctx context.Context, cluster appv1.Cluster)
// List returns list of clusters
func (s *Server) List(ctx context.Context, q *ClusterQuery) (*appv1.ClusterList, error) {
clusterList, err := s.db.ListClusters(ctx)
if clusterList != nil {
newItems := make([]appv1.Cluster, 0)
for _, clust := range clusterList.Items {
if s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionGet, clust.Server) {
if clust.ConnectionState.Status == "" {
clust.ConnectionState = s.getConnectionState(ctx, clust)
}
newItems = append(newItems, *redact(&clust))
}
}
clusterList.Items = newItems
if err != nil {
return nil, err
}
newItems := make([]appv1.Cluster, 0)
for _, clust := range clusterList.Items {
if s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionGet, clust.Server) {
newItems = append(newItems, clust)
}
}
err = util.RunAllAsync(len(newItems), func(i int) error {
clust := newItems[i]
if clust.ConnectionState.Status == "" {
clust.ConnectionState = s.getConnectionState(ctx, clust)
}
newItems[i] = *redact(&clust)
return nil
})
if err != nil {
return nil, err
}
clusterList.Items = newItems
return clusterList, err
}

View File

@@ -45,7 +45,7 @@ func (m *ClusterQuery) Reset() { *m = ClusterQuery{} }
func (m *ClusterQuery) String() string { return proto.CompactTextString(m) }
func (*ClusterQuery) ProtoMessage() {}
func (*ClusterQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_bf8d7367dfc95a3e, []int{0}
return fileDescriptor_cluster_0875510a34378ea0, []int{0}
}
func (m *ClusterQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -91,7 +91,7 @@ func (m *ClusterResponse) Reset() { *m = ClusterResponse{} }
func (m *ClusterResponse) String() string { return proto.CompactTextString(m) }
func (*ClusterResponse) ProtoMessage() {}
func (*ClusterResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_bf8d7367dfc95a3e, []int{1}
return fileDescriptor_cluster_0875510a34378ea0, []int{1}
}
func (m *ClusterResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -132,7 +132,7 @@ func (m *ClusterCreateRequest) Reset() { *m = ClusterCreateRequest{} }
func (m *ClusterCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterCreateRequest) ProtoMessage() {}
func (*ClusterCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_bf8d7367dfc95a3e, []int{2}
return fileDescriptor_cluster_0875510a34378ea0, []int{2}
}
func (m *ClusterCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -189,7 +189,7 @@ func (m *ClusterCreateFromKubeConfigRequest) Reset() { *m = ClusterCreat
func (m *ClusterCreateFromKubeConfigRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterCreateFromKubeConfigRequest) ProtoMessage() {}
func (*ClusterCreateFromKubeConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_bf8d7367dfc95a3e, []int{3}
return fileDescriptor_cluster_0875510a34378ea0, []int{3}
}
func (m *ClusterCreateFromKubeConfigRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -257,7 +257,7 @@ func (m *ClusterUpdateRequest) Reset() { *m = ClusterUpdateRequest{} }
func (m *ClusterUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterUpdateRequest) ProtoMessage() {}
func (*ClusterUpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_bf8d7367dfc95a3e, []int{4}
return fileDescriptor_cluster_0875510a34378ea0, []int{4}
}
func (m *ClusterUpdateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1393,10 +1393,10 @@ var (
)
func init() {
proto.RegisterFile("server/cluster/cluster.proto", fileDescriptor_cluster_bf8d7367dfc95a3e)
proto.RegisterFile("server/cluster/cluster.proto", fileDescriptor_cluster_0875510a34378ea0)
}
var fileDescriptor_cluster_bf8d7367dfc95a3e = []byte{
var fileDescriptor_cluster_0875510a34378ea0 = []byte{
// 564 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcd, 0x6e, 0x13, 0x31,
0x10, 0xc7, 0xe5, 0xb6, 0xda, 0x12, 0x83, 0xf8, 0xb0, 0x0a, 0x5a, 0xd2, 0x10, 0xa5, 0x3e, 0x54,

View File

@@ -46,7 +46,7 @@ func (m *ProjectCreateRequest) Reset() { *m = ProjectCreateRequest{} }
func (m *ProjectCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectCreateRequest) ProtoMessage() {}
func (*ProjectCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_8d94583ca767d5b3, []int{0}
return fileDescriptor_project_082822b5d17b8c4e, []int{0}
}
func (m *ProjectCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -96,7 +96,7 @@ func (m *ProjectTokenDeleteRequest) Reset() { *m = ProjectTokenDeleteReq
func (m *ProjectTokenDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenDeleteRequest) ProtoMessage() {}
func (*ProjectTokenDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_8d94583ca767d5b3, []int{1}
return fileDescriptor_project_082822b5d17b8c4e, []int{1}
}
func (m *ProjectTokenDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -162,7 +162,7 @@ func (m *ProjectTokenCreateRequest) Reset() { *m = ProjectTokenCreateReq
func (m *ProjectTokenCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenCreateRequest) ProtoMessage() {}
func (*ProjectTokenCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_8d94583ca767d5b3, []int{2}
return fileDescriptor_project_082822b5d17b8c4e, []int{2}
}
func (m *ProjectTokenCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -231,7 +231,7 @@ func (m *ProjectTokenResponse) Reset() { *m = ProjectTokenResponse{} }
func (m *ProjectTokenResponse) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenResponse) ProtoMessage() {}
func (*ProjectTokenResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_project_8d94583ca767d5b3, []int{3}
return fileDescriptor_project_082822b5d17b8c4e, []int{3}
}
func (m *ProjectTokenResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -279,7 +279,7 @@ func (m *ProjectQuery) Reset() { *m = ProjectQuery{} }
func (m *ProjectQuery) String() string { return proto.CompactTextString(m) }
func (*ProjectQuery) ProtoMessage() {}
func (*ProjectQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_project_8d94583ca767d5b3, []int{4}
return fileDescriptor_project_082822b5d17b8c4e, []int{4}
}
func (m *ProjectQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -326,7 +326,7 @@ func (m *ProjectUpdateRequest) Reset() { *m = ProjectUpdateRequest{} }
func (m *ProjectUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectUpdateRequest) ProtoMessage() {}
func (*ProjectUpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_8d94583ca767d5b3, []int{5}
return fileDescriptor_project_082822b5d17b8c4e, []int{5}
}
func (m *ProjectUpdateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -372,7 +372,7 @@ func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
func (*EmptyResponse) ProtoMessage() {}
func (*EmptyResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_project_8d94583ca767d5b3, []int{6}
return fileDescriptor_project_082822b5d17b8c4e, []int{6}
}
func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1846,10 +1846,10 @@ var (
)
func init() {
proto.RegisterFile("server/project/project.proto", fileDescriptor_project_8d94583ca767d5b3)
proto.RegisterFile("server/project/project.proto", fileDescriptor_project_082822b5d17b8c4e)
}
var fileDescriptor_project_8d94583ca767d5b3 = []byte{
var fileDescriptor_project_082822b5d17b8c4e = []byte{
// 697 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x5d, 0x6b, 0x13, 0x4d,
0x14, 0x66, 0x9a, 0xbe, 0x79, 0xdf, 0x4e, 0x5e, 0xb5, 0x0c, 0xad, 0xa6, 0xb1, 0x8d, 0x61, 0x2e,

View File

@@ -10,7 +10,8 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"github.com/argoproj/argo-cd/common"
@@ -24,15 +25,30 @@ import (
"github.com/argoproj/argo-cd/util/settings"
)
const testNamespace = "default"
func TestProjectServer(t *testing.T) {
enforcer := rbac.NewEnforcer(fake.NewSimpleClientset(), "default", common.ArgoCDRBACConfigMapName, nil)
kubeclientset := fake.NewSimpleClientset(&corev1.ConfigMap{
ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, Name: "argocd-cm"},
}, &corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Name: "argocd-secret",
Namespace: testNamespace,
},
Data: map[string][]byte{
"admin.password": []byte("test"),
"server.secretkey": []byte("test"),
},
})
settingsMgr := settings.NewSettingsManager(context.Background(), kubeclientset, testNamespace)
enforcer := rbac.NewEnforcer(kubeclientset, testNamespace, common.ArgoCDRBACConfigMapName, nil)
enforcer.SetBuiltinPolicy(test.BuiltinPolicy)
enforcer.SetDefaultRole("role:admin")
enforcer.SetClaimsEnforcerFunc(func(claims jwt.Claims, rvals ...interface{}) bool {
return true
})
existingProj := v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{Name: "test", Namespace: "default"},
ObjectMeta: v1.ObjectMeta{Name: "test", Namespace: testNamespace},
Spec: v1alpha1.AppProjectSpec{
Destinations: []v1alpha1.ApplicationDestination{
{Namespace: "ns1", Server: "https://server1"},
@@ -144,7 +160,7 @@ func TestProjectServer(t *testing.T) {
})
t.Run("TestCreateTokenSuccesfully", func(t *testing.T) {
sessionMgr := session.NewSessionManager(&settings.ArgoCDSettings{})
sessionMgr := session.NewSessionManager(settingsMgr)
projectWithRole := existingProj.DeepCopy()
tokenName := "testToken"
projectWithRole.Spec.Roles = []v1alpha1.ProjectRole{{Name: tokenName}}
@@ -163,7 +179,7 @@ func TestProjectServer(t *testing.T) {
})
t.Run("TestDeleteTokenSuccesfully", func(t *testing.T) {
sessionMgr := session.NewSessionManager(&settings.ArgoCDSettings{})
sessionMgr := session.NewSessionManager(settingsMgr)
projWithToken := existingProj.DeepCopy()
tokenName := "testToken"
issuedAt := int64(1)
@@ -182,7 +198,7 @@ func TestProjectServer(t *testing.T) {
})
t.Run("TestCreateTwoTokensInRoleSuccess", func(t *testing.T) {
sessionMgr := session.NewSessionManager(&settings.ArgoCDSettings{})
sessionMgr := session.NewSessionManager(settingsMgr)
projWithToken := existingProj.DeepCopy()
tokenName := "testToken"
token := v1alpha1.ProjectRole{Name: tokenName, JWTTokens: []v1alpha1.JWTToken{{IssuedAt: 1}}}
@@ -266,8 +282,7 @@ func TestProjectServer(t *testing.T) {
projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projWithRole), enforcer, util.NewKeyLock(), nil)
request := &ProjectUpdateRequest{Project: projWithRole}
_, err := projectServer.Update(context.Background(), request)
expectedErr := fmt.Sprintf("rpc error: code = InvalidArgument desc = incorrect policy format for '%s' as policies can't grant access to other projects", policy)
assert.EqualError(t, err, expectedErr)
assert.Contains(t, err.Error(), "object must be of form 'test/*' or 'test/<APPNAME>'")
})
t.Run("TestValidateProjectIncorrectProjectInRoleFailure", func(t *testing.T) {
@@ -286,8 +301,7 @@ func TestProjectServer(t *testing.T) {
projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projWithRole), enforcer, util.NewKeyLock(), nil)
request := &ProjectUpdateRequest{Project: projWithRole}
_, err := projectServer.Update(context.Background(), request)
expectedErr := fmt.Sprintf("rpc error: code = InvalidArgument desc = incorrect policy format for '%s' as policy can't grant access to other projects", invalidPolicy)
assert.EqualError(t, err, expectedErr)
assert.Contains(t, err.Error(), "policy subject must be: 'proj:test:testRole'")
})
t.Run("TestValidateProjectIncorrectTokenInRoleFailure", func(t *testing.T) {
@@ -306,8 +320,7 @@ func TestProjectServer(t *testing.T) {
projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projWithRole), enforcer, util.NewKeyLock(), nil)
request := &ProjectUpdateRequest{Project: projWithRole}
_, err := projectServer.Update(context.Background(), request)
expectedErr := fmt.Sprintf("rpc error: code = InvalidArgument desc = incorrect policy format for '%s' as policy can't grant access to other roles", invalidPolicy)
assert.EqualError(t, err, expectedErr)
assert.Contains(t, err.Error(), "policy subject must be: 'proj:test:testRole'")
})
t.Run("TestValidateProjectInvalidEffectFailure", func(t *testing.T) {
@@ -325,8 +338,7 @@ func TestProjectServer(t *testing.T) {
projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projWithRole), enforcer, util.NewKeyLock(), nil)
request := &ProjectUpdateRequest{Project: projWithRole}
_, err := projectServer.Update(context.Background(), request)
expectedErr := fmt.Sprintf("rpc error: code = InvalidArgument desc = incorrect policy format for '%s' as effect can only have value 'allow' or 'deny'", invalidPolicy)
assert.EqualError(t, err, expectedErr)
assert.Contains(t, err.Error(), "effect must be: 'allow' or 'deny'")
})
t.Run("TestNormalizeProjectRolePolicies", func(t *testing.T) {

View File

@@ -4,6 +4,7 @@ import (
"strings"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
@@ -75,6 +76,8 @@ func (p *RBACPolicyEnforcer) EnforceClaims(claims jwt.Claims, rvals ...interface
return true
}
}
logCtx := log.WithField("claims", claims).WithField("rval", rvals)
logCtx.Debug("enforce failed")
return false
}

View File

@@ -0,0 +1,83 @@
package rbacpolicy
import (
"io/ioutil"
"testing"
jwt "github.com/dgrijalva/jwt-go"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/rbac"
)
var builtinPolicy string
func init() {
policyBytes, err := ioutil.ReadFile("../../util/rbac/builtin-policy.csv")
if err != nil {
panic(err)
}
builtinPolicy = string(policyBytes)
}
func newFakeProj() *argoappv1.AppProject {
return &argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "my-proj",
Namespace: test.FakeArgoCDNamespace,
},
Spec: argoappv1.AppProjectSpec{
Roles: []argoappv1.ProjectRole{
{
Name: "my-role",
Policies: []string{
"p, proj:my-proj:my-role, applications, create, my-proj/*, allow",
},
Groups: []string{
"my-org:my-team",
},
JWTTokens: []argoappv1.JWTToken{
{
IssuedAt: 1234,
},
},
},
},
},
}
}
func TestEnforceAllPolicies(t *testing.T) {
kubeclientset := fake.NewSimpleClientset(test.NewFakeConfigMap())
projLister := test.NewFakeProjLister(newFakeProj())
enf := rbac.NewEnforcer(kubeclientset, test.FakeArgoCDNamespace, common.ArgoCDConfigMapName, nil)
enf.EnableLog(true)
enf.SetBuiltinPolicy(`p, alice, applications, create, my-proj/*, allow`)
enf.SetUserPolicy(`p, bob, applications, create, my-proj/*, allow`)
rbacEnf := NewRBACPolicyEnforcer(enf, projLister)
enf.SetClaimsEnforcerFunc(rbacEnf.EnforceClaims)
var claims jwt.MapClaims
claims = jwt.MapClaims{"sub": "alice"}
assert.True(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
claims = jwt.MapClaims{"sub": "bob"}
assert.True(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
claims = jwt.MapClaims{"sub": "proj:my-proj:my-role", "iat": 1234}
assert.True(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
claims = jwt.MapClaims{"groups": []string{"my-org:my-team"}}
assert.True(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
claims = jwt.MapClaims{"sub": "cathy"}
assert.False(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
claims = jwt.MapClaims{"sub": "proj:my-proj:my-role"}
assert.False(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
claims = jwt.MapClaims{"sub": "proj:my-proj:other-role", "iat": 1234}
assert.False(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
claims = jwt.MapClaims{"groups": []string{"my-org:other-group"}}
assert.False(t, enf.Enforce(claims, "applications", "create", "my-proj/my-app"))
}

View File

@@ -7,11 +7,12 @@ import (
"reflect"
"time"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
@@ -23,8 +24,6 @@ import (
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/rbac"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
)
// Server provides a Repository service
@@ -60,7 +59,7 @@ func (s *Server) getConnectionState(ctx context.Context, url string) appsv1.Conn
if err := s.cache.Get(cacheKey, &connectionState); err == nil {
return connectionState
}
now := v1.Now()
now := metav1.Now()
connectionState = appsv1.ConnectionState{
Status: appsv1.ConnectionStatusSuccessful,
ModifiedAt: &now,
@@ -94,10 +93,17 @@ func (s *Server) List(ctx context.Context, q *RepoQuery) (*appsv1.RepositoryList
if urls != nil {
for _, url := range urls {
if s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceRepositories, rbacpolicy.ActionGet, url) {
items = append(items, appsv1.Repository{Repo: url, ConnectionState: s.getConnectionState(ctx, url)})
items = append(items, appsv1.Repository{Repo: url})
}
}
}
err = util.RunAllAsync(len(items), func(i int) error {
items[i].ConnectionState = s.getConnectionState(ctx, items[i].Repo)
return nil
})
if err != nil {
return nil, err
}
return &appsv1.RepositoryList{Items: items}, nil
}
@@ -326,7 +332,7 @@ func (s *Server) Create(ctx context.Context, q *RepoCreateRequest) (*appsv1.Repo
// Update updates a repository
func (s *Server) Update(ctx context.Context, q *RepoUpdateRequest) (*appsv1.Repository, error) {
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceRepositories, "update", q.Repo.Repo) {
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceRepositories, rbacpolicy.ActionUpdate, q.Repo.Repo) {
return nil, grpc.ErrPermissionDenied
}
_, err := s.db.UpdateRepository(ctx, q.Repo)
@@ -335,7 +341,7 @@ func (s *Server) Update(ctx context.Context, q *RepoUpdateRequest) (*appsv1.Repo
// Delete updates a repository
func (s *Server) Delete(ctx context.Context, q *RepoQuery) (*RepoResponse, error) {
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceRepositories, "delete", q.Repo) {
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceRepositories, rbacpolicy.ActionDelete, q.Repo) {
return nil, grpc.ErrPermissionDenied
}
err := s.db.DeleteRepository(ctx, q.Repo)

View File

@@ -46,7 +46,7 @@ func (m *RepoAppsQuery) Reset() { *m = RepoAppsQuery{} }
func (m *RepoAppsQuery) String() string { return proto.CompactTextString(m) }
func (*RepoAppsQuery) ProtoMessage() {}
func (*RepoAppsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{0}
return fileDescriptor_repository_324bad698d34f88e, []int{0}
}
func (m *RepoAppsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -102,7 +102,7 @@ func (m *AppInfo) Reset() { *m = AppInfo{} }
func (m *AppInfo) String() string { return proto.CompactTextString(m) }
func (*AppInfo) ProtoMessage() {}
func (*AppInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{1}
return fileDescriptor_repository_324bad698d34f88e, []int{1}
}
func (m *AppInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -159,7 +159,7 @@ func (m *RepoAppDetailsQuery) Reset() { *m = RepoAppDetailsQuery{} }
func (m *RepoAppDetailsQuery) String() string { return proto.CompactTextString(m) }
func (*RepoAppDetailsQuery) ProtoMessage() {}
func (*RepoAppDetailsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{2}
return fileDescriptor_repository_324bad698d34f88e, []int{2}
}
func (m *RepoAppDetailsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -224,7 +224,7 @@ func (m *RepoAppDetailsResponse) Reset() { *m = RepoAppDetailsResponse{}
func (m *RepoAppDetailsResponse) String() string { return proto.CompactTextString(m) }
func (*RepoAppDetailsResponse) ProtoMessage() {}
func (*RepoAppDetailsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{3}
return fileDescriptor_repository_324bad698d34f88e, []int{3}
}
func (m *RepoAppDetailsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -293,7 +293,7 @@ func (m *RepoAppsResponse) Reset() { *m = RepoAppsResponse{} }
func (m *RepoAppsResponse) String() string { return proto.CompactTextString(m) }
func (*RepoAppsResponse) ProtoMessage() {}
func (*RepoAppsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{4}
return fileDescriptor_repository_324bad698d34f88e, []int{4}
}
func (m *RepoAppsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -344,7 +344,7 @@ func (m *KsonnetAppSpec) Reset() { *m = KsonnetAppSpec{} }
func (m *KsonnetAppSpec) String() string { return proto.CompactTextString(m) }
func (*KsonnetAppSpec) ProtoMessage() {}
func (*KsonnetAppSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{5}
return fileDescriptor_repository_324bad698d34f88e, []int{5}
}
func (m *KsonnetAppSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -408,7 +408,7 @@ func (m *HelmAppSpec) Reset() { *m = HelmAppSpec{} }
func (m *HelmAppSpec) String() string { return proto.CompactTextString(m) }
func (*HelmAppSpec) ProtoMessage() {}
func (*HelmAppSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{6}
return fileDescriptor_repository_324bad698d34f88e, []int{6}
}
func (m *HelmAppSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -470,7 +470,7 @@ func (m *KustomizeAppSpec) Reset() { *m = KustomizeAppSpec{} }
func (m *KustomizeAppSpec) String() string { return proto.CompactTextString(m) }
func (*KustomizeAppSpec) ProtoMessage() {}
func (*KustomizeAppSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{7}
return fileDescriptor_repository_324bad698d34f88e, []int{7}
}
func (m *KustomizeAppSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -524,7 +524,7 @@ func (m *KsonnetEnvironment) Reset() { *m = KsonnetEnvironment{} }
func (m *KsonnetEnvironment) String() string { return proto.CompactTextString(m) }
func (*KsonnetEnvironment) ProtoMessage() {}
func (*KsonnetEnvironment) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{8}
return fileDescriptor_repository_324bad698d34f88e, []int{8}
}
func (m *KsonnetEnvironment) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -595,7 +595,7 @@ func (m *KsonnetEnvironmentDestination) Reset() { *m = KsonnetEnvironmen
func (m *KsonnetEnvironmentDestination) String() string { return proto.CompactTextString(m) }
func (*KsonnetEnvironmentDestination) ProtoMessage() {}
func (*KsonnetEnvironmentDestination) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{9}
return fileDescriptor_repository_324bad698d34f88e, []int{9}
}
func (m *KsonnetEnvironmentDestination) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -650,7 +650,7 @@ func (m *RepoQuery) Reset() { *m = RepoQuery{} }
func (m *RepoQuery) String() string { return proto.CompactTextString(m) }
func (*RepoQuery) ProtoMessage() {}
func (*RepoQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{10}
return fileDescriptor_repository_324bad698d34f88e, []int{10}
}
func (m *RepoQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -696,7 +696,7 @@ func (m *RepoResponse) Reset() { *m = RepoResponse{} }
func (m *RepoResponse) String() string { return proto.CompactTextString(m) }
func (*RepoResponse) ProtoMessage() {}
func (*RepoResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{11}
return fileDescriptor_repository_324bad698d34f88e, []int{11}
}
func (m *RepoResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -737,7 +737,7 @@ func (m *RepoCreateRequest) Reset() { *m = RepoCreateRequest{} }
func (m *RepoCreateRequest) String() string { return proto.CompactTextString(m) }
func (*RepoCreateRequest) ProtoMessage() {}
func (*RepoCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{12}
return fileDescriptor_repository_324bad698d34f88e, []int{12}
}
func (m *RepoCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -791,7 +791,7 @@ func (m *RepoUpdateRequest) Reset() { *m = RepoUpdateRequest{} }
func (m *RepoUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*RepoUpdateRequest) ProtoMessage() {}
func (*RepoUpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_3c54adc3c349824a, []int{13}
return fileDescriptor_repository_324bad698d34f88e, []int{13}
}
func (m *RepoUpdateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3675,10 +3675,10 @@ var (
)
func init() {
proto.RegisterFile("server/repository/repository.proto", fileDescriptor_repository_3c54adc3c349824a)
proto.RegisterFile("server/repository/repository.proto", fileDescriptor_repository_324bad698d34f88e)
}
var fileDescriptor_repository_3c54adc3c349824a = []byte{
var fileDescriptor_repository_324bad698d34f88e = []byte{
// 893 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x5f, 0x6f, 0x1b, 0x45,
0x10, 0xd7, 0x25, 0xa9, 0x1b, 0x8f, 0xdb, 0x2a, 0xdd, 0x96, 0x60, 0x0e, 0xc7, 0x8d, 0x16, 0x09,

View File

@@ -4,20 +4,22 @@ import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"regexp"
"strings"
"time"
"github.com/gobuffalo/packr"
golang_proto "github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/go-grpc-middleware/auth"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
log "github.com/sirupsen/logrus"
"github.com/soheilhy/cmux"
@@ -34,7 +36,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
@@ -103,6 +105,7 @@ var (
box = packr.NewBox("../util/rbac")
builtinPolicy string
clientConstraint = fmt.Sprintf(">= %s", minClientVersion)
baseHRefRegex = regexp.MustCompile(`<base href="(.*)">`)
)
func init() {
@@ -135,6 +138,7 @@ type ArgoCDServerOpts struct {
Namespace string
DexServerAddr string
StaticAssetsDir string
BaseHRef string
KubeClientset kubernetes.Interface
AppClientset appclientset.Interface
RepoClientset reposerver.Clientset
@@ -160,34 +164,14 @@ func initializeDefaultProject(opts ArgoCDServerOpts) error {
return err
}
// initializeSettings sets default secret settings (password set to hostname)
func initializeSettings(settingsMgr *settings_util.SettingsManager, opts ArgoCDServerOpts) (*settings_util.ArgoCDSettings, error) {
defaultPassword, err := os.Hostname()
errors.CheckError(err)
cdSettings, err := settings_util.UpdateSettings(defaultPassword, settingsMgr, false, false, opts.Namespace)
if err != nil {
// assume settings are initialized by another instance of api server
if apierrors.IsConflict(err) {
return settingsMgr.GetSettings()
} else {
log.Fatal(err)
}
}
return cdSettings, nil
}
// NewServer returns a new instance of the Argo CD API server
func NewServer(opts ArgoCDServerOpts) *ArgoCDServer {
settingsMgr := settings_util.NewSettingsManager(opts.KubeClientset, opts.Namespace)
settings, err := initializeSettings(settingsMgr, opts)
func NewServer(ctx context.Context, opts ArgoCDServerOpts) *ArgoCDServer {
settingsMgr := settings_util.NewSettingsManager(ctx, opts.KubeClientset, opts.Namespace)
settings, err := settingsMgr.InitializeSettings()
errors.CheckError(err)
err = initializeDefaultProject(opts)
errors.CheckError(err)
sessionMgr := util_session.NewSessionManager(settings)
sessionMgr := util_session.NewSessionManager(settingsMgr)
factory := appinformer.NewFilteredSharedInformerFactory(opts.AppClientset, 0, opts.Namespace, func(options *metav1.ListOptions) {})
appInformer := factory.Argoproj().V1alpha1().Applications().Informer()
@@ -330,8 +314,7 @@ func (a *ArgoCDServer) Shutdown() {
// watchSettings watches the configmap and secret for any setting updates that would warrant a
// restart of the API server.
func (a *ArgoCDServer) watchSettings(ctx context.Context) {
a.settingsMgr.StartNotifier(ctx, a.settings)
updateCh := make(chan struct{}, 1)
updateCh := make(chan *settings_util.ArgoCDSettings, 1)
a.settingsMgr.Subscribe(updateCh)
prevURL := a.settings.URL
@@ -347,7 +330,8 @@ func (a *ArgoCDServer) watchSettings(ctx context.Context) {
}
for {
<-updateCh
newSettings := <-updateCh
a.settings = newSettings
newDexCfgBytes, err := dex.GenerateDexConfigYAML(a.settings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(prevDexCfgBytes) {
@@ -445,7 +429,7 @@ func (a *ArgoCDServer) newGRPCServer() *grpc.Server {
repoService := repository.NewServer(a.RepoClientset, db, a.enf, argocache.NewInMemoryCache(repository.DefaultRepoStatusCacheExpiration))
sessionService := session.NewServer(a.sessionMgr)
projectLock := util.NewKeyLock()
applicationService := application.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.RepoClientset, a.AppControllerClientset, kube.KubectlCmd{}, db, a.enf, projectLock)
applicationService := application.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.RepoClientset, a.AppControllerClientset, kube.KubectlCmd{}, db, a.enf, projectLock, a.settingsMgr)
projectService := project.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.enf, projectLock, a.sessionMgr)
settingsService := settings.NewServer(a.settingsMgr)
accountService := account.NewServer(a.sessionMgr, a.settingsMgr)
@@ -540,7 +524,7 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int) *http.Server
// Serve UI static assets
if a.StaticAssetsDir != "" {
mux.HandleFunc("/", newStaticAssetsHandler(a.StaticAssetsDir))
mux.HandleFunc("/", newStaticAssetsHandler(a.StaticAssetsDir, a.BaseHRef))
}
return &httpS
}
@@ -596,8 +580,37 @@ func registerDownloadHandlers(mux *http.ServeMux, base string) {
}
}
func indexFilePath(srcPath string, baseHRef string) (string, error) {
if baseHRef == "/" {
return srcPath, nil
}
filePath := path.Join(os.TempDir(), fmt.Sprintf("index_%s.html", strings.Replace(strings.Trim(baseHRef, "/"), "/", "_", -1)))
f, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
if os.IsExist(err) {
return filePath, nil
}
return "", err
}
defer util.Close(f)
data, err := ioutil.ReadFile(srcPath)
if err != nil {
return "", err
}
if baseHRef != "/" {
data = []byte(baseHRefRegex.ReplaceAllString(string(data), fmt.Sprintf(`<base href="/%s/">`, strings.Trim(baseHRef, "/"))))
}
_, err = f.Write(data)
if err != nil {
return "", err
}
return filePath, nil
}
// newStaticAssetsHandler returns an HTTP handler to serve UI static assets
func newStaticAssetsHandler(dir string) func(http.ResponseWriter, *http.Request) {
func newStaticAssetsHandler(dir string, baseHRef string) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
acceptHTML := false
for _, acceptType := range strings.Split(r.Header.Get("Accept"), ",") {
@@ -613,9 +626,14 @@ func newStaticAssetsHandler(dir string) func(http.ResponseWriter, *http.Request)
for k, v := range noCacheHeaders {
w.Header().Set(k, v)
}
http.ServeFile(w, r, dir+"/index.html")
indexHtmlPath, err := indexFilePath(path.Join(dir, "index.html"), baseHRef)
if err != nil {
http.Error(w, fmt.Sprintf("Unable to access index.html: %v", err), http.StatusInternalServerError)
return
}
http.ServeFile(w, r, indexHtmlPath)
} else {
http.ServeFile(w, r, dir+r.URL.Path)
http.ServeFile(w, r, path.Join(dir, r.URL.Path))
}
}
}

View File

@@ -11,17 +11,13 @@ import (
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
apps "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
appinformer "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/server/application"
"github.com/argoproj/argo-cd/server/rbacpolicy"
"github.com/argoproj/argo-cd/test"
@@ -29,54 +25,23 @@ import (
)
const (
fakeNamespace = "fake-ns"
builtinPolicyFile = "builtin-policy.csv"
)
func fakeServer() *ArgoCDServer {
cm := fakeConfigMap()
secret := fakeSecret()
cm := test.NewFakeConfigMap()
secret := test.NewFakeSecret()
kubeclientset := fake.NewSimpleClientset(cm, secret)
appClientSet := apps.NewSimpleClientset()
argoCDOpts := ArgoCDServerOpts{
Namespace: fakeNamespace,
Namespace: test.FakeArgoCDNamespace,
KubeClientset: kubeclientset,
AppClientset: appClientSet,
Insecure: true,
DisableAuth: true,
}
return NewServer(argoCDOpts)
}
func fakeConfigMap() *apiv1.ConfigMap {
cm := apiv1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
},
Data: make(map[string]string),
}
return &cm
}
func fakeSecret(policy ...string) *apiv1.Secret {
secret := apiv1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
},
Data: make(map[string][]byte),
}
return &secret
return NewServer(context.Background(), argoCDOpts)
}
func TestEnforceProjectToken(t *testing.T) {
@@ -94,17 +59,17 @@ func TestEnforceProjectToken(t *testing.T) {
role := v1alpha1.ProjectRole{Name: roleName, Policies: []string{defaultPolicy}, JWTTokens: []v1alpha1.JWTToken{{IssuedAt: defaultIssuedAt}}}
existingProj := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: projectName, Namespace: fakeNamespace},
ObjectMeta: metav1.ObjectMeta{Name: projectName, Namespace: test.FakeArgoCDNamespace},
Spec: v1alpha1.AppProjectSpec{
Roles: []v1alpha1.ProjectRole{role},
},
}
cm := fakeConfigMap()
secret := fakeSecret()
cm := test.NewFakeConfigMap()
secret := test.NewFakeSecret()
kubeclientset := fake.NewSimpleClientset(cm, secret)
t.Run("TestEnforceProjectTokenSuccessful", func(t *testing.T) {
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
cancel := test.StartInformer(s.projInformer)
defer cancel()
claims := jwt.MapClaims{"sub": defaultSub, "iat": defaultIssuedAt}
@@ -113,21 +78,21 @@ func TestEnforceProjectToken(t *testing.T) {
})
t.Run("TestEnforceProjectTokenWithDiffCreateAtFailure", func(t *testing.T) {
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
diffCreateAt := defaultIssuedAt + 1
claims := jwt.MapClaims{"sub": defaultSub, "iat": diffCreateAt}
assert.False(t, s.enf.Enforce(claims, "applications", "get", defaultTestObject))
})
t.Run("TestEnforceProjectTokenIncorrectSubFormatFailure", func(t *testing.T) {
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
invalidSub := "proj:test"
claims := jwt.MapClaims{"sub": invalidSub, "iat": defaultIssuedAt}
assert.False(t, s.enf.Enforce(claims, "applications", "get", defaultTestObject))
})
t.Run("TestEnforceProjectTokenNoTokenFailure", func(t *testing.T) {
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
nonExistentToken := "fake-token"
invalidSub := fmt.Sprintf(subFormat, projectName, nonExistentToken)
claims := jwt.MapClaims{"sub": invalidSub, "iat": defaultIssuedAt}
@@ -137,7 +102,7 @@ func TestEnforceProjectToken(t *testing.T) {
t.Run("TestEnforceProjectTokenNotJWTTokenFailure", func(t *testing.T) {
proj := existingProj.DeepCopy()
proj.Spec.Roles[0].JWTTokens = nil
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(proj)})
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(proj)})
claims := jwt.MapClaims{"sub": defaultSub, "iat": defaultIssuedAt}
assert.False(t, s.enf.Enforce(claims, "applications", "get", defaultTestObject))
})
@@ -150,7 +115,7 @@ func TestEnforceProjectToken(t *testing.T) {
proj := existingProj.DeepCopy()
proj.Spec.Roles[0] = role
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(proj)})
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(proj)})
cancel := test.StartInformer(s.projInformer)
defer cancel()
claims := jwt.MapClaims{"sub": defaultSub, "iat": defaultIssuedAt}
@@ -161,17 +126,11 @@ func TestEnforceProjectToken(t *testing.T) {
})
}
func newFakeProjLister(objects ...runtime.Object) applister.AppProjectNamespaceLister {
fakeAppClientset := apps.NewSimpleClientset(objects...)
factory := appinformer.NewFilteredSharedInformerFactory(fakeAppClientset, 0, "", func(options *metav1.ListOptions) {})
return factory.Argoproj().V1alpha1().AppProjects().Lister().AppProjects(fakeNamespace)
}
func TestEnforceClaims(t *testing.T) {
kubeclientset := fake.NewSimpleClientset(fakeConfigMap())
enf := rbac.NewEnforcer(kubeclientset, fakeNamespace, common.ArgoCDConfigMapName, nil)
kubeclientset := fake.NewSimpleClientset(test.NewFakeConfigMap())
enf := rbac.NewEnforcer(kubeclientset, test.FakeArgoCDNamespace, common.ArgoCDConfigMapName, nil)
enf.SetBuiltinPolicy(box.String(builtinPolicyFile))
rbacEnf := rbacpolicy.NewRBACPolicyEnforcer(enf, newFakeProjLister())
rbacEnf := rbacpolicy.NewRBACPolicyEnforcer(enf, test.NewFakeProjLister())
enf.SetClaimsEnforcerFunc(rbacEnf.EnforceClaims)
policy := `
g, org2:team2, role:admin
@@ -201,9 +160,9 @@ g, bob, role:admin
func TestDefaultRoleWithClaims(t *testing.T) {
kubeclientset := fake.NewSimpleClientset()
enf := rbac.NewEnforcer(kubeclientset, fakeNamespace, common.ArgoCDConfigMapName, nil)
enf := rbac.NewEnforcer(kubeclientset, test.FakeArgoCDNamespace, common.ArgoCDConfigMapName, nil)
enf.SetBuiltinPolicy(box.String(builtinPolicyFile))
rbacEnf := rbacpolicy.NewRBACPolicyEnforcer(enf, newFakeProjLister())
rbacEnf := rbacpolicy.NewRBACPolicyEnforcer(enf, test.NewFakeProjLister())
enf.SetClaimsEnforcerFunc(rbacEnf.EnforceClaims)
claims := jwt.MapClaims{"groups": []string{"org1:team1", "org2:team2"}}
@@ -214,10 +173,10 @@ func TestDefaultRoleWithClaims(t *testing.T) {
}
func TestEnforceNilClaims(t *testing.T) {
kubeclientset := fake.NewSimpleClientset(fakeConfigMap())
enf := rbac.NewEnforcer(kubeclientset, fakeNamespace, common.ArgoCDConfigMapName, nil)
kubeclientset := fake.NewSimpleClientset(test.NewFakeConfigMap())
enf := rbac.NewEnforcer(kubeclientset, test.FakeArgoCDNamespace, common.ArgoCDConfigMapName, nil)
enf.SetBuiltinPolicy(box.String(builtinPolicyFile))
rbacEnf := rbacpolicy.NewRBACPolicyEnforcer(enf, newFakeProjLister())
rbacEnf := rbacpolicy.NewRBACPolicyEnforcer(enf, test.NewFakeProjLister())
enf.SetClaimsEnforcerFunc(rbacEnf.EnforceClaims)
assert.False(t, enf.Enforce(nil, "applications", "get", "foo/obj"))
enf.SetDefaultRole("role:readonly")
@@ -225,46 +184,46 @@ func TestEnforceNilClaims(t *testing.T) {
}
func TestInitializingExistingDefaultProject(t *testing.T) {
cm := fakeConfigMap()
secret := fakeSecret()
cm := test.NewFakeConfigMap()
secret := test.NewFakeSecret()
kubeclientset := fake.NewSimpleClientset(cm, secret)
defaultProj := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: common.DefaultAppProjectName, Namespace: fakeNamespace},
ObjectMeta: metav1.ObjectMeta{Name: common.DefaultAppProjectName, Namespace: test.FakeArgoCDNamespace},
Spec: v1alpha1.AppProjectSpec{},
}
appClientSet := apps.NewSimpleClientset(defaultProj)
argoCDOpts := ArgoCDServerOpts{
Namespace: fakeNamespace,
Namespace: test.FakeArgoCDNamespace,
KubeClientset: kubeclientset,
AppClientset: appClientSet,
}
argocd := NewServer(argoCDOpts)
argocd := NewServer(context.Background(), argoCDOpts)
assert.NotNil(t, argocd)
proj, err := appClientSet.ArgoprojV1alpha1().AppProjects(fakeNamespace).Get(common.DefaultAppProjectName, metav1.GetOptions{})
proj, err := appClientSet.ArgoprojV1alpha1().AppProjects(test.FakeArgoCDNamespace).Get(common.DefaultAppProjectName, metav1.GetOptions{})
assert.Nil(t, err)
assert.NotNil(t, proj)
assert.Equal(t, proj.Name, common.DefaultAppProjectName)
}
func TestInitializingNotExistingDefaultProject(t *testing.T) {
cm := fakeConfigMap()
secret := fakeSecret()
cm := test.NewFakeConfigMap()
secret := test.NewFakeSecret()
kubeclientset := fake.NewSimpleClientset(cm, secret)
appClientSet := apps.NewSimpleClientset()
argoCDOpts := ArgoCDServerOpts{
Namespace: fakeNamespace,
Namespace: test.FakeArgoCDNamespace,
KubeClientset: kubeclientset,
AppClientset: appClientSet,
}
argocd := NewServer(argoCDOpts)
argocd := NewServer(context.Background(), argoCDOpts)
assert.NotNil(t, argocd)
proj, err := appClientSet.ArgoprojV1alpha1().AppProjects(fakeNamespace).Get(common.DefaultAppProjectName, metav1.GetOptions{})
proj, err := appClientSet.ArgoprojV1alpha1().AppProjects(test.FakeArgoCDNamespace).Get(common.DefaultAppProjectName, metav1.GetOptions{})
assert.Nil(t, err)
assert.NotNil(t, proj)
assert.Equal(t, proj.Name, common.DefaultAppProjectName)
@@ -287,7 +246,7 @@ func TestEnforceProjectGroups(t *testing.T) {
existingProj := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: projectName,
Namespace: fakeNamespace,
Namespace: test.FakeArgoCDNamespace,
},
Spec: v1alpha1.AppProjectSpec{
Roles: []v1alpha1.ProjectRole{
@@ -301,8 +260,8 @@ func TestEnforceProjectGroups(t *testing.T) {
},
},
}
kubeclientset := fake.NewSimpleClientset(fakeConfigMap(), fakeSecret())
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
kubeclientset := fake.NewSimpleClientset(test.NewFakeConfigMap(), test.NewFakeSecret())
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
cancel := test.StartInformer(s.projInformer)
defer cancel()
claims := jwt.MapClaims{
@@ -317,7 +276,7 @@ func TestEnforceProjectGroups(t *testing.T) {
log.Println(existingProj.ProjectPoliciesString())
existingProj.Spec.Roles[0].Groups = nil
log.Println(existingProj.ProjectPoliciesString())
s.AppClientset.ArgoprojV1alpha1().AppProjects(fakeNamespace).Update(&existingProj)
s.AppClientset.ArgoprojV1alpha1().AppProjects(test.FakeArgoCDNamespace).Update(&existingProj)
time.Sleep(100 * time.Millisecond) // this lets the informer get synced
assert.False(t, s.enf.Enforce(claims, "projects", "get", existingProj.ObjectMeta.Name))
assert.False(t, s.enf.Enforce(claims, "applications", "get", defaultTestObject))
@@ -336,12 +295,12 @@ func TestRevokedToken(t *testing.T) {
defaultIssuedAt := int64(1)
defaultSub := fmt.Sprintf(subFormat, projectName, roleName)
defaultPolicy := fmt.Sprintf(policyTemplate, defaultSub, projectName, defaultObject, defaultEffect)
kubeclientset := fake.NewSimpleClientset(fakeConfigMap(), fakeSecret())
kubeclientset := fake.NewSimpleClientset(test.NewFakeConfigMap(), test.NewFakeSecret())
existingProj := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: projectName,
Namespace: fakeNamespace,
Namespace: test.FakeArgoCDNamespace,
},
Spec: v1alpha1.AppProjectSpec{
Roles: []v1alpha1.ProjectRole{
@@ -358,7 +317,7 @@ func TestRevokedToken(t *testing.T) {
},
}
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
s := NewServer(context.Background(), ArgoCDServerOpts{Namespace: test.FakeArgoCDNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
cancel := test.StartInformer(s.projInformer)
defer cancel()
claims := jwt.MapClaims{"sub": defaultSub, "iat": defaultIssuedAt}
@@ -366,7 +325,7 @@ func TestRevokedToken(t *testing.T) {
assert.True(t, s.enf.Enforce(claims, "applications", "get", defaultTestObject))
// Now revoke the token by deleting the token
existingProj.Spec.Roles[0].JWTTokens = nil
s.AppClientset.ArgoprojV1alpha1().AppProjects(fakeNamespace).Update(&existingProj)
s.AppClientset.ArgoprojV1alpha1().AppProjects(test.FakeArgoCDNamespace).Update(&existingProj)
time.Sleep(200 * time.Millisecond) // this lets the informer get synced
assert.False(t, s.enf.Enforce(claims, "projects", "get", existingProj.ObjectMeta.Name))
assert.False(t, s.enf.Enforce(claims, "applications", "get", defaultTestObject))

View File

@@ -47,7 +47,7 @@ func (m *SessionCreateRequest) Reset() { *m = SessionCreateRequest{} }
func (m *SessionCreateRequest) String() string { return proto.CompactTextString(m) }
func (*SessionCreateRequest) ProtoMessage() {}
func (*SessionCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_session_217b926c109d1cc2, []int{0}
return fileDescriptor_session_8e535ce77fc5e082, []int{0}
}
func (m *SessionCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -108,7 +108,7 @@ func (m *SessionDeleteRequest) Reset() { *m = SessionDeleteRequest{} }
func (m *SessionDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*SessionDeleteRequest) ProtoMessage() {}
func (*SessionDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_session_217b926c109d1cc2, []int{1}
return fileDescriptor_session_8e535ce77fc5e082, []int{1}
}
func (m *SessionDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -149,7 +149,7 @@ func (m *SessionResponse) Reset() { *m = SessionResponse{} }
func (m *SessionResponse) String() string { return proto.CompactTextString(m) }
func (*SessionResponse) ProtoMessage() {}
func (*SessionResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_session_217b926c109d1cc2, []int{2}
return fileDescriptor_session_8e535ce77fc5e082, []int{2}
}
func (m *SessionResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -827,10 +827,10 @@ var (
)
func init() {
proto.RegisterFile("server/session/session.proto", fileDescriptor_session_217b926c109d1cc2)
proto.RegisterFile("server/session/session.proto", fileDescriptor_session_8e535ce77fc5e082)
}
var fileDescriptor_session_217b926c109d1cc2 = []byte{
var fileDescriptor_session_8e535ce77fc5e082 = []byte{
// 356 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xb1, 0x4e, 0xeb, 0x30,
0x14, 0x86, 0xe5, 0x5e, 0xdd, 0xde, 0x7b, 0x3d, 0xdc, 0x8a, 0x28, 0x82, 0x28, 0x2a, 0x15, 0xca,

View File

@@ -42,7 +42,7 @@ func (m *SettingsQuery) Reset() { *m = SettingsQuery{} }
func (m *SettingsQuery) String() string { return proto.CompactTextString(m) }
func (*SettingsQuery) ProtoMessage() {}
func (*SettingsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_90d4947f5a5e2583, []int{0}
return fileDescriptor_settings_902e174a76eb35c2, []int{0}
}
func (m *SettingsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -84,7 +84,7 @@ func (m *Settings) Reset() { *m = Settings{} }
func (m *Settings) String() string { return proto.CompactTextString(m) }
func (*Settings) ProtoMessage() {}
func (*Settings) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_90d4947f5a5e2583, []int{1}
return fileDescriptor_settings_902e174a76eb35c2, []int{1}
}
func (m *Settings) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -145,7 +145,7 @@ func (m *DexConfig) Reset() { *m = DexConfig{} }
func (m *DexConfig) String() string { return proto.CompactTextString(m) }
func (*DexConfig) ProtoMessage() {}
func (*DexConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_90d4947f5a5e2583, []int{2}
return fileDescriptor_settings_902e174a76eb35c2, []int{2}
}
func (m *DexConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -193,7 +193,7 @@ func (m *Connector) Reset() { *m = Connector{} }
func (m *Connector) String() string { return proto.CompactTextString(m) }
func (*Connector) ProtoMessage() {}
func (*Connector) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_90d4947f5a5e2583, []int{3}
return fileDescriptor_settings_902e174a76eb35c2, []int{3}
}
func (m *Connector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -249,7 +249,7 @@ func (m *OIDCConfig) Reset() { *m = OIDCConfig{} }
func (m *OIDCConfig) String() string { return proto.CompactTextString(m) }
func (*OIDCConfig) ProtoMessage() {}
func (*OIDCConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_90d4947f5a5e2583, []int{4}
return fileDescriptor_settings_902e174a76eb35c2, []int{4}
}
func (m *OIDCConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1291,10 +1291,10 @@ var (
)
func init() {
proto.RegisterFile("server/settings/settings.proto", fileDescriptor_settings_90d4947f5a5e2583)
proto.RegisterFile("server/settings/settings.proto", fileDescriptor_settings_902e174a76eb35c2)
}
var fileDescriptor_settings_90d4947f5a5e2583 = []byte{
var fileDescriptor_settings_902e174a76eb35c2 = []byte{
// 397 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xcd, 0x8a, 0xdb, 0x30,
0x18, 0x44, 0x71, 0x49, 0xe2, 0xaf, 0x3f, 0x69, 0xd5, 0x12, 0xdc, 0x50, 0x9c, 0xe0, 0x53, 0xa0,

View File

@@ -57,8 +57,7 @@
"in": "query"
},
{
"type": "boolean",
"format": "boolean",
"type": "string",
"name": "refresh",
"in": "query"
},
@@ -202,8 +201,7 @@
"required": true
},
{
"type": "boolean",
"format": "boolean",
"type": "string",
"name": "refresh",
"in": "query"
},
@@ -1224,8 +1222,7 @@
"in": "query"
},
{
"type": "boolean",
"format": "boolean",
"type": "string",
"name": "refresh",
"in": "query"
},
@@ -2486,6 +2483,18 @@
}
}
},
"v1alpha1ComparedTo": {
"type": "object",
"title": "ComparedTo contains application source and target which was used for resources comparison",
"properties": {
"destination": {
"$ref": "#/definitions/v1alpha1ApplicationDestination"
},
"source": {
"$ref": "#/definitions/v1alpha1ApplicationSource"
}
}
},
"v1alpha1ComponentParameter": {
"type": "object",
"title": "ComponentParameter contains information about component parameter value",
@@ -2527,6 +2536,20 @@
}
}
},
"v1alpha1InfoItem": {
"type": "object",
"title": "InfoItem contains human readable information about object",
"properties": {
"name": {
"description": "Name is a human readable title for this piece of information.",
"type": "string"
},
"value": {
"description": "Value is human readable content.",
"type": "string"
}
}
},
"v1alpha1JWTToken": {
"type": "object",
"title": "JWTToken holds the issuedAt and expiresAt values of a token",
@@ -2687,6 +2710,12 @@
"group": {
"type": "string"
},
"info": {
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1InfoItem"
}
},
"kind": {
"type": "string"
},
@@ -2699,12 +2728,6 @@
"resourceVersion": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
},
"version": {
"type": "string"
}
@@ -2886,7 +2909,7 @@
"type": "object",
"properties": {
"comparedTo": {
"$ref": "#/definitions/v1alpha1ApplicationSource"
"$ref": "#/definitions/v1alpha1ComparedTo"
},
"revision": {
"type": "string"

View File

@@ -51,7 +51,7 @@ func (m *VersionMessage) Reset() { *m = VersionMessage{} }
func (m *VersionMessage) String() string { return proto.CompactTextString(m) }
func (*VersionMessage) ProtoMessage() {}
func (*VersionMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_version_301443c925f6e1d7, []int{0}
return fileDescriptor_version_9a67e1897074bbcc, []int{0}
}
func (m *VersionMessage) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -781,10 +781,10 @@ var (
)
func init() {
proto.RegisterFile("server/version/version.proto", fileDescriptor_version_301443c925f6e1d7)
proto.RegisterFile("server/version/version.proto", fileDescriptor_version_9a67e1897074bbcc)
}
var fileDescriptor_version_301443c925f6e1d7 = []byte{
var fileDescriptor_version_9a67e1897074bbcc = []byte{
// 343 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0xcf, 0x4a, 0xc3, 0x40,
0x10, 0xc6, 0x49, 0xd5, 0xfe, 0x59, 0x4a, 0x0f, 0x8b, 0xd4, 0x25, 0x96, 0x22, 0x3d, 0x88, 0x08,

View File

@@ -6,6 +6,10 @@ import (
"testing"
"time"
"k8s.io/apimachinery/pkg/types"
"github.com/argoproj/argo-cd/util/diff"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -120,11 +124,11 @@ func TestAppManagement(t *testing.T) {
})
t.Run("TestAppRollbackSuccessful", func(t *testing.T) {
appWithHistory := testApp.DeepCopy()
// create app and ensure it's comparion status is not SyncStatusCodeUnknown
app := fixture.CreateApp(t, appWithHistory)
app.Status.History = []v1alpha1.RevisionHistory{{
app := fixture.CreateApp(t, testApp)
appWithHistory := app.DeepCopy()
appWithHistory.Status.History = []v1alpha1.RevisionHistory{{
ID: 1,
Revision: "abc",
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
@@ -133,10 +137,11 @@ func TestAppManagement(t *testing.T) {
Revision: "cdb",
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
}}
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Update(app)
if err != nil {
t.Fatalf("Unable to update app %v", err)
}
patch, _, err := diff.CreateTwoWayMergePatch(app, appWithHistory, &v1alpha1.Application{})
assert.Nil(t, err)
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Patch(app.Name, types.MergePatchType, patch)
assert.Nil(t, err)
// sync app and make sure it reaches InSync state
_, err = fixture.RunCli("app", "rollback", app.Name, "1")

View File

@@ -61,6 +61,7 @@ type Fixture struct {
ApiServerAddress string
ControllerServerAddress string
Enforcer *rbac.Enforcer
SettingsMgr *settings.SettingsManager
tearDownCallback func()
}
@@ -96,8 +97,7 @@ func (f *Fixture) setup() error {
return err
}
settingsMgr := settings.NewSettingsManager(f.KubeClient, f.Namespace)
err = settingsMgr.SaveSettings(&settings.ArgoCDSettings{})
err = f.SettingsMgr.SaveSettings(&settings.ArgoCDSettings{})
if err != nil {
return err
}
@@ -123,7 +123,7 @@ func (f *Fixture) setup() error {
}
memCache := cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
repoSrv, err := reposerver.NewServer(&FakeGitClientFactory{}, memCache, func(config *tls.Config) {})
repoSrv, err := reposerver.NewServer(&FakeGitClientFactory{}, memCache, func(config *tls.Config) {}, 0)
if err != nil {
return err
}
@@ -133,7 +133,8 @@ func (f *Fixture) setup() error {
f.ApiServerAddress = fmt.Sprintf("127.0.0.1:%d", apiServerPort)
f.ControllerServerAddress = fmt.Sprintf("127.0.0.1:%d", controllerServerPort)
apiServer := server.NewServer(server.ArgoCDServerOpts{
ctx, cancel := context.WithCancel(context.Background())
apiServer := server.NewServer(ctx, server.ArgoCDServerOpts{
Namespace: f.Namespace,
AppClientset: f.AppClient,
DisableAuth: true,
@@ -143,7 +144,6 @@ func (f *Fixture) setup() error {
AppControllerClientset: controller.NewAppControllerClientset(f.ControllerServerAddress),
})
ctx, cancel := context.WithCancel(context.Background())
go func() {
apiServer.Run(ctx, apiServerPort)
}()
@@ -167,7 +167,11 @@ func (f *Fixture) setup() error {
return err
}
ctrl := f.createController()
ctrl, err := f.createController()
if err != nil {
cancel()
return err
}
controllerServerGRPC, err := ctrl.CreateGRPC(func(config *tls.Config) {})
if err != nil {
cancel()
@@ -278,7 +282,7 @@ func NewFixture() (*Fixture, error) {
if err != nil {
return nil, err
}
settingsMgr := settings.NewSettingsManager(kubeClient, namespace)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, namespace)
db := db.NewDB(namespace, settingsMgr, kubeClient)
enforcer := rbac.NewEnforcer(kubeClient, namespace, common.ArgoCDRBACConfigMapName, nil)
err = enforcer.SetBuiltinPolicy(test.BuiltinPolicy)
@@ -295,6 +299,7 @@ func NewFixture() (*Fixture, error) {
KubeClient: kubeClient,
Namespace: namespace,
Enforcer: enforcer,
SettingsMgr: settingsMgr,
}
err = fixture.setup()
if err != nil {
@@ -325,9 +330,10 @@ func (f *Fixture) CreateApp(t *testing.T, application *v1alpha1.Application) *v1
}
// createController creates new controller instance
func (f *Fixture) createController() *controller.ApplicationController {
func (f *Fixture) createController() (*controller.ApplicationController, error) {
return controller.NewApplicationController(
f.Namespace,
f.SettingsMgr,
f.KubeClient,
f.AppClient,
reposerver.NewRepositoryServerClientset(f.RepoServerAddress),

View File

@@ -0,0 +1,3 @@
resources:
- servicecatalog-apiservice.yaml
- servicecatalog-svc.yaml

View File

@@ -0,0 +1,13 @@
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1beta1.servicecatalog.k8s.io
spec:
caBundle: null
group: servicecatalog.k8s.io
groupPriorityMinimum: 10000
service:
name: service-catalog-apiserver
namespace: default
version: v1beta1
versionPriority: 20

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: service-catalog-apiserver
namespace: default
spec:
ports:
- name: secure
port: 443
protocol: TCP
targetPort: 8443
selector:
app: service-catalog-apiserver

View File

@@ -0,0 +1,7 @@
```
argocd app create crd-creation \
--repo https://github.com/argoproj/argo-cd.git \
--path test/e2e/functional/crd-creation \
--dest-server https://kubernetes.default.svc \
--dest-namespace default
```

View File

@@ -0,0 +1,16 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Dummy
metadata:
name: dummy-crd-instance
---
apiVersion: argoproj.io/v1alpha1
kind: Dummy
metadata:
name: dummy-crd-instance
namespace: kube-system
---
apiVersion: argoproj.io/v1alpha1
kind: ClusterDummy
metadata:
name: cluster-dummy-crd-instance

View File

@@ -2,7 +2,7 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: dummy-crd.argoproj.io
name: dummies.argoproj.io
spec:
group: argoproj.io
version: v1alpha1
@@ -11,8 +11,14 @@ spec:
kind: Dummy
plural: dummies
---
apiVersion: argoproj.io/v1alpha1
kind: Dummy
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: dummy-crd-instance
name: clusterdummies.argoproj.io
spec:
group: argoproj.io
version: v1alpha1
scope: Cluster
names:
kind: ClusterDummy
plural: clusterdummies

View File

@@ -1,2 +1,3 @@
resources:
- crd.yaml
- crd-instances.yaml

View File

@@ -0,0 +1,32 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: extensions-deployment
labels:
app: extensions-deployment
spec:
replicas: 1
selector:
matchLabels:
app: extensions-deployment
template:
metadata:
labels:
app: extensions-deployment
spec:
containers:
- name: extensions-deployment
image: "gcr.io/heptio-images/ks-guestbook-demo:0.1"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http

View File

@@ -0,0 +1,9 @@
apiVersion: extensions/v1beta1
kind: NetworkPolicy
metadata:
name: allow-default
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress

View File

@@ -0,0 +1,17 @@
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: pod-security-example
spec:
privileged: false # Don't allow privileged pods!
# The rest fills in some required fields.
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
runAsUser:
rule: RunAsAny
fsGroup:
rule: RunAsAny
volumes:
- '*'

View File

@@ -8,15 +8,19 @@ import (
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
apps "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
appinformer "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
)
const (
FakeArgoCDNamespace = "fake-argocd-ns"
FakeDestNamespace = "fake-dest-ns"
FakeClusterURL = "https://fake-cluster:443"
TestAppInstanceName = "test-app-instance"
)
var (
@@ -180,3 +184,45 @@ func DemoDeployment() *appsv1.Deployment {
},
}
}
func NewFakeConfigMap() *apiv1.ConfigMap {
cm := apiv1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: FakeArgoCDNamespace,
},
Data: make(map[string]string),
}
return &cm
}
func NewFakeSecret(policy ...string) *apiv1.Secret {
secret := apiv1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: FakeArgoCDNamespace,
},
Data: map[string][]byte{
"admin.password": []byte("test"),
"server.secretkey": []byte("test"),
},
}
return &secret
}
func NewFakeProjLister(objects ...runtime.Object) applister.AppProjectNamespaceLister {
fakeAppClientset := apps.NewSimpleClientset(objects...)
factory := appinformer.NewFilteredSharedInformerFactory(fakeAppClientset, 0, "", func(options *metav1.ListOptions) {})
projInformer := factory.Argoproj().V1alpha1().AppProjects().Informer()
cancel := StartInformer(projInformer)
defer cancel()
return factory.Argoproj().V1alpha1().AppProjects().Lister().AppProjects(FakeArgoCDNamespace)
}

View File

@@ -9,8 +9,6 @@ import (
"strings"
"time"
"github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -23,14 +21,15 @@ import (
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/pkg/client/clientset/versioned/typed/application/v1alpha1"
applicationsv1 "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/ksonnet"
"github.com/argoproj/argo-cd/util/kube"
)
const (
@@ -67,17 +66,13 @@ func FilterByProjects(apps []argoappv1.Application, projects []string) []argoapp
}
// RefreshApp updates the refresh annotation of an application to coerce the controller to process it
func RefreshApp(appIf v1alpha1.ApplicationInterface, name string) (*argoappv1.Application, error) {
refreshString := time.Now().UTC().Format(time.RFC3339)
func RefreshApp(appIf v1alpha1.ApplicationInterface, name string, refreshType argoappv1.RefreshType) (*argoappv1.Application, error) {
metadata := map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]string{
common.AnnotationKeyRefresh: refreshString,
common.AnnotationKeyRefresh: string(refreshType),
},
},
"status": map[string]interface{}{
"observedAt": nil,
},
}
var err error
patch, err := json.Marshal(metadata)
@@ -91,7 +86,7 @@ func RefreshApp(appIf v1alpha1.ApplicationInterface, name string) (*argoappv1.Ap
return nil, err
}
} else {
log.Infof("Refreshed app '%s' for controller reprocessing (%s)", name, refreshString)
log.Infof("Requested app '%s' refresh", name)
return app, nil
}
time.Sleep(100 * time.Millisecond)
@@ -120,16 +115,11 @@ func WaitForRefresh(ctx context.Context, appIf v1alpha1.ApplicationInterface, na
if !ok {
return nil, fmt.Errorf("Application event object failed conversion: %v", next)
}
refreshTimestampStr := app.ObjectMeta.Annotations[common.AnnotationKeyRefresh]
if refreshTimestampStr == "" {
now := time.Now().UTC()
refreshTimestampStr = now.String()
annotations := app.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
refreshTimestamp, err := time.Parse(time.RFC3339, refreshTimestampStr)
if err != nil {
return nil, fmt.Errorf("Unable to parse '%s': %v", common.AnnotationKeyRefresh, err)
}
if app.Status.ObservedAt.After(refreshTimestamp) || app.Status.ObservedAt.Time.Equal(refreshTimestamp) {
if _, ok := annotations[common.AnnotationKeyRefresh]; !ok {
return app, nil
}
}
@@ -276,8 +266,8 @@ func verifyOneSourceType(source *argoappv1.ApplicationSource) *argoappv1.Applica
}
// GetAppProject returns a project from an application
func GetAppProject(spec *argoappv1.ApplicationSpec, appclientset appclientset.Interface, ns string) (*argoappv1.AppProject, error) {
return appclientset.ArgoprojV1alpha1().AppProjects(ns).Get(spec.GetProject(), metav1.GetOptions{})
func GetAppProject(spec *argoappv1.ApplicationSpec, projLister applicationsv1.AppProjectLister, ns string) (*argoappv1.AppProject, error) {
return projLister.AppProjects(ns).Get(spec.GetProject())
}
// queryAppSourceType queries repo server for yaml files in a directory, and determines its

View File

@@ -7,14 +7,17 @@ import (
"time"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/watch"
testcore "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
testcore "k8s.io/client-go/testing"
"github.com/argoproj/argo-cd/pkg/client/informers/externalversions/application/v1alpha1"
applisters "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
)
func TestRefreshApp(t *testing.T) {
@@ -23,7 +26,7 @@ func TestRefreshApp(t *testing.T) {
testApp.Namespace = "default"
appClientset := appclientset.NewSimpleClientset(&testApp)
appIf := appClientset.ArgoprojV1alpha1().Applications("default")
_, err := RefreshApp(appIf, "test-app")
_, err := RefreshApp(appIf, "test-app", argoappv1.RefreshTypeNormal)
assert.Nil(t, err)
// For some reason, the fake Application inferface doesn't reflect the patch status after Patch(),
// so can't verify it was set in unit tests.
@@ -43,7 +46,12 @@ func TestGetAppProjectWithNoProjDefined(t *testing.T) {
testApp.Name = "test-app"
testApp.Namespace = namespace
appClientset := appclientset.NewSimpleClientset(testProj)
proj, err := GetAppProject(&testApp.Spec, appClientset, namespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
informer := v1alpha1.NewAppProjectInformer(appClientset, namespace, 0, cache.Indexers{})
go informer.Run(ctx.Done())
cache.WaitForCacheSync(ctx.Done(), informer.HasSynced)
proj, err := GetAppProject(&testApp.Spec, applisters.NewAppProjectLister(informer.GetIndexer()), namespace)
assert.Nil(t, err)
assert.Equal(t, proj.Name, projName)
}
@@ -63,10 +71,6 @@ func TestWaitForRefresh(t *testing.T) {
var testApp argoappv1.Application
testApp.Name = "test-app"
testApp.Namespace = "default"
testApp.ObjectMeta.Annotations = map[string]string{
common.AnnotationKeyRefresh: time.Now().UTC().Format(time.RFC3339),
}
testApp.Status.ObservedAt = metav1.Now()
appClientset = appclientset.NewSimpleClientset()
appIf = appClientset.ArgoprojV1alpha1().Applications("default")

View File

@@ -29,24 +29,27 @@ var (
// ListClusters returns list of clusters
func (db *db) ListClusters(ctx context.Context) (*appv1.ClusterList, error) {
listOpts := metav1.ListOptions{}
labelSelector := labels.NewSelector()
req, err := labels.NewRequirement(common.LabelKeySecretType, selection.Equals, []string{common.LabelValueSecretTypeCluster})
if err != nil {
return nil, err
}
labelSelector = labelSelector.Add(*req)
listOpts.LabelSelector = labelSelector.String()
clusterSecrets, err := db.kubeclientset.CoreV1().Secrets(db.ns).List(listOpts)
secretsLister, err := db.settingsMgr.GetSecretsLister()
if err != nil {
return nil, err
}
clusterSecrets, err := secretsLister.Secrets(db.ns).List(labelSelector)
if err != nil {
return nil, err
}
clusterList := appv1.ClusterList{
Items: make([]appv1.Cluster, len(clusterSecrets.Items)),
Items: make([]appv1.Cluster, len(clusterSecrets)),
}
hasInClusterCredentials := false
for i, clusterSecret := range clusterSecrets.Items {
cluster := *secretToCluster(&clusterSecret)
for i, clusterSecret := range clusterSecrets {
cluster := *secretToCluster(clusterSecret)
clusterList.Items[i] = cluster
if cluster.Server == common.KubernetesInternalAPIServerAddr {
hasInClusterCredentials = true
@@ -83,7 +86,7 @@ func (db *db) CreateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Clust
}
return nil, err
}
return secretToCluster(clusterSecret), nil
return secretToCluster(clusterSecret), db.settingsMgr.ResyncInformers()
}
// ClusterEvent contains information about cluster event
@@ -160,9 +163,13 @@ func (db *db) getClusterSecret(server string) (*apiv1.Secret, error) {
if err != nil {
return nil, err
}
secretsLister, err := db.settingsMgr.GetSecretsLister()
if err != nil {
return nil, err
}
for _, name := range []string{secName, legacySecName} {
var clusterSecret *apiv1.Secret
clusterSecret, err = db.kubeclientset.CoreV1().Secrets(db.ns).Get(name, metav1.GetOptions{})
clusterSecret, err = secretsLister.Secrets(db.ns).Get(name)
if err != nil {
if apierr.IsNotFound(err) {
err = status.Errorf(codes.NotFound, "cluster %q not found", server)
@@ -201,7 +208,7 @@ func (db *db) UpdateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Clust
if err != nil {
return nil, err
}
return secretToCluster(clusterSecret), nil
return secretToCluster(clusterSecret), db.settingsMgr.ResyncInformers()
}
// Delete deletes a cluster by name
@@ -222,12 +229,15 @@ func (db *db) DeleteCluster(ctx context.Context, name string) error {
canDelete := secret.Annotations != nil && secret.Annotations[common.AnnotationKeyManagedBy] == common.AnnotationValueManagedByArgoCD || secret.Name == legacySecName
if canDelete {
return db.kubeclientset.CoreV1().Secrets(db.ns).Delete(secret.Name, &metav1.DeleteOptions{})
err = db.kubeclientset.CoreV1().Secrets(db.ns).Delete(secret.Name, &metav1.DeleteOptions{})
} else {
delete(secret.Labels, common.LabelKeySecretType)
_, err = db.kubeclientset.CoreV1().Secrets(db.ns).Update(secret)
}
if err != nil {
return err
}
return db.settingsMgr.ResyncInformers()
}
// serverToSecretName

View File

@@ -1,13 +1,13 @@
package db
import (
"github.com/argoproj/argo-cd/util/settings"
"context"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"golang.org/x/net/context"
"k8s.io/client-go/kubernetes"
"github.com/argoproj/argo-cd/util/settings"
)
type ArgoDB interface {
@@ -57,8 +57,11 @@ func NewDB(namespace string, settingsMgr *settings.SettingsManager, kubeclientse
func (db *db) getSecret(name string, cache map[string]*v1.Secret) (*v1.Secret, error) {
secret, ok := cache[name]
if !ok {
var err error
secret, err = db.kubeclientset.CoreV1().Secrets(db.ns).Get(name, metav1.GetOptions{})
secretsLister, err := db.settingsMgr.GetSecretsLister()
if err != nil {
return nil, err
}
secret, err = secretsLister.Secrets(db.ns).Get(name)
if err != nil {
return nil, err
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -46,7 +46,7 @@ func getClientset(config map[string]string, objects ...runtime.Object) *fake.Cli
func TestCreateRepository(t *testing.T) {
clientset := getClientset(nil)
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
repo, err := db.CreateRepository(context.Background(), &v1alpha1.Repository{
Repo: "https://github.com/argoproj/argocd-example-apps",
@@ -69,7 +69,7 @@ func TestCreateExistingRepository(t *testing.T) {
clientset := getClientset(map[string]string{
"repositories": `- url: https://github.com/argoproj/argocd-example-apps`,
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
_, err := db.CreateRepository(context.Background(), &v1alpha1.Repository{
Repo: "https://github.com/argoproj/argocd-example-apps",
@@ -104,7 +104,7 @@ func TestDeleteRepositoryManagedSecrets(t *testing.T) {
password: []byte("test-password"),
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
err := db.DeleteRepository(context.Background(), "https://github.com/argoproj/argocd-example-apps")
assert.Nil(t, err)
@@ -139,7 +139,7 @@ func TestDeleteRepositoryUnmanagedSecrets(t *testing.T) {
password: []byte("test-password"),
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
err := db.DeleteRepository(context.Background(), "https://github.com/argoproj/argocd-example-apps")
assert.Nil(t, err)
@@ -182,7 +182,7 @@ func TestUpdateRepositoryWithManagedSecrets(t *testing.T) {
sshPrivateKey: []byte("test-ssh-private-key"),
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
repo, err := db.GetRepository(context.Background(), "https://github.com/argoproj/argocd-example-apps")
assert.Nil(t, err)
@@ -216,7 +216,7 @@ func TestGetClusterSuccessful(t *testing.T) {
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
cluster, err := db.GetCluster(context.Background(), clusterURL)
assert.Nil(t, err)
assert.Equal(t, clusterURL, cluster.Server)
@@ -226,7 +226,7 @@ func TestGetNonExistingCluster(t *testing.T) {
clusterURL := "https://mycluster"
clientset := getClientset(nil)
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
_, err := db.GetCluster(context.Background(), clusterURL)
assert.NotNil(t, err)
status, ok := status.FromError(err)
@@ -249,7 +249,7 @@ func TestGetClusterFallbackToLegacyName(t *testing.T) {
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
cluster, err := db.GetCluster(context.Background(), clusterURL)
assert.Nil(t, err)
assert.Equal(t, clusterURL, cluster.Server)
@@ -258,7 +258,7 @@ func TestGetClusterFallbackToLegacyName(t *testing.T) {
func TestCreateClusterSuccessful(t *testing.T) {
clusterURL := "https://mycluster"
clientset := getClientset(nil)
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
_, err := db.CreateCluster(context.Background(), &v1alpha1.Cluster{
Server: clusterURL,
@@ -287,7 +287,7 @@ func TestDeleteClusterWithLegacyName(t *testing.T) {
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
err := db.DeleteCluster(context.Background(), clusterURL)
assert.Nil(t, err)
@@ -315,7 +315,7 @@ func TestDeleteClusterWithUnmanagedSecret(t *testing.T) {
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
err := db.DeleteCluster(context.Background(), clusterURL)
assert.Nil(t, err)
@@ -328,7 +328,7 @@ func TestDeleteClusterWithUnmanagedSecret(t *testing.T) {
func TestFuzzyEquivalence(t *testing.T) {
clientset := getClientset(nil)
ctx := context.Background()
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
repo, err := db.CreateRepository(ctx, &v1alpha1.Repository{
Repo: "https://github.com/argoproj/argocd-example-apps",
@@ -387,7 +387,7 @@ func TestListHelmRepositories(t *testing.T) {
"key": []byte("test-key"),
},
})
db := NewDB(testNamespace, settings.NewSettingsManager(clientset, testNamespace), clientset)
db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset)
repos, err := db.ListHelmRepos(context.Background())
assert.Nil(t, err)

View File

@@ -1,6 +1,7 @@
package dex
import (
"bytes"
"fmt"
"html"
"io/ioutil"
@@ -44,6 +45,7 @@ func NewDexHTTPReverseProxy(serverAddr string) func(writer http.ResponseWriter,
resp.Header.Set("Content-Length", strconv.Itoa(0))
resp.Header.Set("Location", fmt.Sprintf("/login?sso_error=%s", url.QueryEscape(message)))
resp.StatusCode = http.StatusSeeOther
resp.Body = ioutil.NopCloser(bytes.NewReader(make([]byte, 0)))
return nil
}
return nil

View File

@@ -33,14 +33,15 @@ type DiffResultList struct {
func Diff(config, live *unstructured.Unstructured) *DiffResult {
if config != nil {
config = stripTypeInformation(config)
EncodeSecretStringData(config)
Normalize(config)
}
if live != nil {
live = stripTypeInformation(live)
Normalize(live)
}
orig := GetLastAppliedConfigAnnotation(live)
if orig != nil && config != nil {
EncodeSecretStringData(orig)
Normalize(orig)
dr, err := ThreeWayDiff(orig, config, live)
if err == nil {
return dr
@@ -239,9 +240,24 @@ func (d *DiffResult) ASCIIFormat(left *unstructured.Unstructured, formatOpts for
return asciiFmt.Format(d.Diff)
}
// encodeSecretStringData mutates the supplied object and encodes stringData to data. If the object
// is not a secret, or is an invalid secret, then returns the same object
func EncodeSecretStringData(un *unstructured.Unstructured) {
func Normalize(un *unstructured.Unstructured) {
if un == nil {
return
}
// creationTimestamp is sometimes set to null in the config when exported (e.g. SealedSecrets)
// Removing the field allows a cleaner diff.
unstructured.RemoveNestedField(un.Object, "metadata", "creationTimestamp")
gvk := un.GroupVersionKind()
if gvk.Group == "" && gvk.Kind == "Secret" {
NormalizeSecret(un)
} else if gvk.Group == "rbac.authorization.k8s.io" && (gvk.Kind == "ClusterRole" || gvk.Kind == "Role") {
normalizeRole(un)
}
}
// NormalizeSecret mutates the supplied object and encodes stringData to data, and converts nils to
// empty strings. If the object is not a secret, or is an invalid secret, then returns the same object.
func NormalizeSecret(un *unstructured.Unstructured) {
if un == nil {
return
}
@@ -252,29 +268,57 @@ func EncodeSecretStringData(un *unstructured.Unstructured) {
var secret corev1.Secret
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &secret)
if err != nil {
log.Warnf("object unable to convert to secret: %v", err)
return
}
if len(secret.StringData) == 0 {
return
// We normalize nils to empty string to handle: https://github.com/argoproj/argo-cd/issues/943
for k, v := range secret.Data {
if len(v) == 0 {
secret.Data[k] = []byte("")
}
}
if secret.Data == nil {
secret.Data = make(map[string][]byte)
}
for k, v := range secret.StringData {
secret.Data[k] = []byte(v)
if len(secret.StringData) > 0 {
if secret.Data == nil {
secret.Data = make(map[string][]byte)
}
for k, v := range secret.StringData {
secret.Data[k] = []byte(v)
}
delete(un.Object, "stringData")
}
newObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secret)
if err != nil {
log.Warnf("object unable to convert from secret: %v", err)
return
}
err = unstructured.SetNestedMap(un.Object, newObj["data"].(map[string]interface{}), "data")
if err != nil {
log.Warnf("failed to set secret.data: %v", err)
if secret.Data != nil {
err = unstructured.SetNestedMap(un.Object, newObj["data"].(map[string]interface{}), "data")
if err != nil {
log.Warnf("failed to set secret.data: %v", err)
return
}
}
}
// normalizeRole mutates the supplied Role/ClusterRole and sets rules to null if it is an empty list
func normalizeRole(un *unstructured.Unstructured) {
if un == nil {
return
}
delete(un.Object, "stringData")
gvk := un.GroupVersionKind()
if gvk.Group != "rbac.authorization.k8s.io" || (gvk.Kind != "Role" && gvk.Kind != "ClusterRole") {
return
}
rulesIf, ok := un.Object["rules"]
if !ok {
return
}
rules, ok := rulesIf.([]interface{})
if !ok {
return
}
if rules != nil && len(rules) == 0 {
un.Object["rules"] = nil
}
}
// JSONFormat returns the diff as a JSON string

Some files were not shown because too many files have changed in this diff Show More