mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 01:28:45 +01:00
Compare commits
37 Commits
v1.3.0-rc3
...
v0.12.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd4bb2553d | ||
|
|
47e86f6488 | ||
|
|
b0282b17f9 | ||
|
|
f6661cc841 | ||
|
|
e8deeb2622 | ||
|
|
b0301b43dd | ||
|
|
d5ee07ef62 | ||
|
|
9468012cba | ||
|
|
3ffc4586dc | ||
|
|
14c9b63f21 | ||
|
|
20dea3fa9e | ||
|
|
8f990a9b91 | ||
|
|
3ca2a8bc2a | ||
|
|
268df4364d | ||
|
|
68bb7e2046 | ||
|
|
4e921a279c | ||
|
|
ff72b82bd6 | ||
|
|
17393c3e70 | ||
|
|
3ed3a44944 | ||
|
|
27922c8f83 | ||
|
|
c35f35666f | ||
|
|
6a61987d3d | ||
|
|
c68e4a5a56 | ||
|
|
1525f8e051 | ||
|
|
1c2b248d27 | ||
|
|
22fe538645 | ||
|
|
24d73e56f1 | ||
|
|
8f93bdc2a5 | ||
|
|
6d982ca397 | ||
|
|
eff67bffad | ||
|
|
2b781eea49 | ||
|
|
2f0dc20235 | ||
|
|
36dc50c121 | ||
|
|
f8f974e871 | ||
|
|
c4b474ae98 | ||
|
|
8d98d6e058 | ||
|
|
233708ecdd |
@@ -44,7 +44,7 @@ spec:
|
||||
repo: "{{workflow.parameters.repo}}"
|
||||
revision: "{{workflow.parameters.revision}}"
|
||||
container:
|
||||
image: argoproj/argo-cd-ci-builder:v0.12.0
|
||||
image: argoproj/argo-cd-ci-builder:v0.13.0
|
||||
imagePullPolicy: Always
|
||||
command: [bash, -c]
|
||||
args: ["{{inputs.parameters.cmd}}"]
|
||||
@@ -73,7 +73,7 @@ spec:
|
||||
repo: "{{workflow.parameters.repo}}"
|
||||
revision: "{{workflow.parameters.revision}}"
|
||||
container:
|
||||
image: argoproj/argo-cd-ci-builder:v0.12.0
|
||||
image: argoproj/argo-cd-ci-builder:v0.13.0
|
||||
imagePullPolicy: Always
|
||||
command: [sh, -c]
|
||||
args: ["until docker ps; do sleep 3; done && {{inputs.parameters.cmd}}"]
|
||||
|
||||
22
.golangci.yml
Normal file
22
.golangci.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
run:
|
||||
deadline: 8m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go"
|
||||
skip-dirs:
|
||||
- pkg/client
|
||||
- vendor
|
||||
linter-settings:
|
||||
goimports:
|
||||
local-prefixes: github.com/argoproj/argo-cd
|
||||
linters:
|
||||
enable:
|
||||
- vet
|
||||
- gofmt
|
||||
- goimports
|
||||
- deadcode
|
||||
- varcheck
|
||||
- structcheck
|
||||
- ineffassign
|
||||
- unconvert
|
||||
- misspell
|
||||
disable-all: true
|
||||
@@ -40,10 +40,8 @@ go get -u github.com/golang/protobuf/protoc-gen-go
|
||||
go get -u github.com/go-swagger/go-swagger/cmd/swagger
|
||||
go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
|
||||
go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
go get -u gopkg.in/alecthomas/gometalinter.v2
|
||||
go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
go get -u github.com/mattn/goreman
|
||||
|
||||
gometalinter.v2 --install
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
15
Dockerfile
15
Dockerfile
@@ -69,7 +69,7 @@ RUN curl -L -o /usr/local/bin/kustomize1 https://github.com/kubernetes-sigs/kust
|
||||
kustomize1 version
|
||||
|
||||
|
||||
ENV KUSTOMIZE_VERSION=2.0.2
|
||||
ENV KUSTOMIZE_VERSION=2.0.3
|
||||
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
|
||||
chmod +x /usr/local/bin/kustomize && \
|
||||
kustomize version
|
||||
@@ -79,6 +79,17 @@ ENV AWS_IAM_AUTHENTICATOR_VERSION=0.4.0-alpha.1
|
||||
RUN curl -L -o /usr/local/bin/aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/${AWS_IAM_AUTHENTICATOR_VERSION}/aws-iam-authenticator_${AWS_IAM_AUTHENTICATOR_VERSION}_linux_amd64 && \
|
||||
chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
|
||||
# Install golangci-lint
|
||||
RUN wget https://install.goreleaser.com/github.com/golangci/golangci-lint.sh && \
|
||||
chmod +x ./golangci-lint.sh && \
|
||||
./golangci-lint.sh -b $GOPATH/bin && \
|
||||
golangci-lint linters
|
||||
|
||||
COPY .golangci.yml ${GOPATH}/src/dummy/.golangci.yml
|
||||
|
||||
RUN cd ${GOPATH}/src/dummy && \
|
||||
touch dummy.go \
|
||||
golangci-lint run
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
@@ -94,6 +105,8 @@ RUN groupadd -g 999 argocd && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
COPY hack/ssh_known_hosts /etc/ssh/ssh_known_hosts
|
||||
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
|
||||
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
|
||||
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl
|
||||
|
||||
33
Gopkg.lock
generated
33
Gopkg.lock
generated
@@ -48,25 +48,6 @@
|
||||
pruneopts = ""
|
||||
revision = "09c41003ee1d5015b75f331e52215512e7145b8d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a74730e052a45a3fab1d310fdef2ec17ae3d6af16228421e238320846f2aaec8"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:0caf9208419fa5db5a0ca7112affaa9550c54291dda8e2abac0c0e76181c959e"
|
||||
@@ -760,7 +741,6 @@
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"log",
|
||||
"model",
|
||||
]
|
||||
pruneopts = ""
|
||||
@@ -992,8 +972,6 @@
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows/registry",
|
||||
"windows/svc/eventlog",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d0be0721c37eeb5299f245a996a483160fc36940"
|
||||
@@ -1115,14 +1093,6 @@
|
||||
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1"
|
||||
version = "v1.15.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1"
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bf7444e1e6a36e633f4f1624a67b9e4734cfb879c27ac0a2082ac16aff8462ac"
|
||||
name = "gopkg.in/go-playground/webhooks.v3"
|
||||
@@ -1378,6 +1348,7 @@
|
||||
"discovery",
|
||||
"discovery/fake",
|
||||
"dynamic",
|
||||
"dynamic/fake",
|
||||
"informers/core/v1",
|
||||
"informers/internalinterfaces",
|
||||
"kubernetes",
|
||||
@@ -1610,7 +1581,6 @@
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/prometheus/common/log",
|
||||
"github.com/sirupsen/logrus",
|
||||
"github.com/skratchdot/open-golang/open",
|
||||
"github.com/soheilhy/cmux",
|
||||
@@ -1673,6 +1643,7 @@
|
||||
"k8s.io/client-go/discovery",
|
||||
"k8s.io/client-go/discovery/fake",
|
||||
"k8s.io/client-go/dynamic",
|
||||
"k8s.io/client-go/dynamic/fake",
|
||||
"k8s.io/client-go/informers/core/v1",
|
||||
"k8s.io/client-go/kubernetes",
|
||||
"k8s.io/client-go/kubernetes/fake",
|
||||
|
||||
13
Makefile
13
Makefile
@@ -81,15 +81,15 @@ manifests:
|
||||
# files into the go binary
|
||||
.PHONY: server
|
||||
server: clean-debug
|
||||
${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
|
||||
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
|
||||
|
||||
.PHONY: repo-server
|
||||
repo-server:
|
||||
go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
|
||||
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
|
||||
|
||||
.PHONY: controller
|
||||
controller:
|
||||
${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
|
||||
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
|
||||
|
||||
.PHONY: packr
|
||||
packr:
|
||||
@@ -119,10 +119,11 @@ endif
|
||||
.PHONY: builder-image
|
||||
builder-image:
|
||||
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
|
||||
docker push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG)
|
||||
|
||||
.PHONY: dep-ensure
|
||||
dep-ensure:
|
||||
dep ensure
|
||||
dep ensure -no-vendor
|
||||
|
||||
.PHONY: format-code
|
||||
format-code:
|
||||
@@ -130,7 +131,7 @@ format-code:
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
gometalinter.v2 --config gometalinter.json ./...
|
||||
golangci-lint run
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@@ -159,4 +160,4 @@ release-precheck: manifests
|
||||
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
|
||||
|
||||
.PHONY: release
|
||||
release: release-precheck precheckin image release-cli
|
||||
release: release-precheck pre-commit image release-cli
|
||||
|
||||
@@ -1503,6 +1503,9 @@
|
||||
"clusterOIDCConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cliClientID": {
|
||||
"type": "string"
|
||||
},
|
||||
"clientID": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2543,12 +2546,18 @@
|
||||
"operationState": {
|
||||
"$ref": "#/definitions/v1alpha1OperationState"
|
||||
},
|
||||
"reconciledAt": {
|
||||
"$ref": "#/definitions/v1Time"
|
||||
},
|
||||
"resources": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceStatus"
|
||||
}
|
||||
},
|
||||
"sourceType": {
|
||||
"type": "string"
|
||||
},
|
||||
"sync": {
|
||||
"$ref": "#/definitions/v1alpha1SyncStatus"
|
||||
}
|
||||
@@ -2841,6 +2850,10 @@
|
||||
"connectionState": {
|
||||
"$ref": "#/definitions/v1alpha1ConnectionState"
|
||||
},
|
||||
"insecureIgnoreHostKey": {
|
||||
"type": "boolean",
|
||||
"format": "boolean"
|
||||
},
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
@@ -3,19 +3,21 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/argoproj/argo-cd"
|
||||
argocd "github.com/argoproj/argo-cd"
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/reposerver"
|
||||
"github.com/argoproj/argo-cd/util/cache"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/git"
|
||||
"github.com/argoproj/argo-cd/util/ksonnet"
|
||||
"github.com/argoproj/argo-cd/util/stats"
|
||||
"github.com/argoproj/argo-cd/util/tls"
|
||||
)
|
||||
@@ -23,7 +25,6 @@ import (
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-repo-server"
|
||||
port = 8081
|
||||
)
|
||||
|
||||
func newCommand() *cobra.Command {
|
||||
@@ -48,14 +49,13 @@ func newCommand() *cobra.Command {
|
||||
server, err := reposerver.NewServer(git.NewFactory(), cache, tlsConfigCustomizer, parallelismLimit)
|
||||
errors.CheckError(err)
|
||||
grpc := server.CreateGRPC()
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", common.PortRepoServer))
|
||||
errors.CheckError(err)
|
||||
|
||||
ksVers, err := ksonnet.KsonnetVersion()
|
||||
errors.CheckError(err)
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf(":%d", common.PortRepoServerMetrics), nil)) }()
|
||||
|
||||
log.Infof("argocd-repo-server %s serving on %s", argocd.GetVersion(), listener.Addr())
|
||||
log.Infof("ksonnet version: %s", ksVers)
|
||||
stats.RegisterStackDumper()
|
||||
stats.StartStatsTicker(10 * time.Minute)
|
||||
stats.RegisterHeapDumper("memprofile")
|
||||
|
||||
@@ -81,7 +81,7 @@ func NewCommand() *cobra.Command {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
argocd := server.NewServer(ctx, argoCDOpts)
|
||||
argocd.Run(ctx, 8080)
|
||||
argocd.Run(ctx, common.PortAPIServer)
|
||||
cancel()
|
||||
}
|
||||
},
|
||||
|
||||
@@ -27,13 +27,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/argoproj/argo-cd/controller"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/reposerver/repository"
|
||||
"github.com/argoproj/argo-cd/server/application"
|
||||
apirepository "github.com/argoproj/argo-cd/server/repository"
|
||||
"github.com/argoproj/argo-cd/server/settings"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/argo"
|
||||
@@ -116,7 +116,7 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
},
|
||||
}
|
||||
setAppOptions(c.Flags(), &app, &appOpts)
|
||||
setParameterOverrides(&app, argocdClient, appOpts.parameters)
|
||||
setParameterOverrides(&app, appOpts.parameters)
|
||||
}
|
||||
if app.Name == "" {
|
||||
c.HelpFunc()(c, args)
|
||||
@@ -225,6 +225,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
|
||||
func printAppSummaryTable(app *argoappv1.Application, appURL string) {
|
||||
fmt.Printf(printOpFmtStr, "Name:", app.Name)
|
||||
fmt.Printf(printOpFmtStr, "Project:", app.Spec.GetProject())
|
||||
fmt.Printf(printOpFmtStr, "Server:", app.Spec.Destination.Server)
|
||||
fmt.Printf(printOpFmtStr, "Namespace:", app.Spec.Destination.Namespace)
|
||||
fmt.Printf(printOpFmtStr, "URL:", appURL)
|
||||
@@ -353,7 +354,7 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
setParameterOverrides(app, argocdClient, appOpts.parameters)
|
||||
setParameterOverrides(app, appOpts.parameters)
|
||||
_, err = appIf.UpdateSpec(ctx, &application.ApplicationUpdateSpecRequest{
|
||||
Name: &app.Name,
|
||||
Spec: app.Spec,
|
||||
@@ -611,6 +612,17 @@ func getLocalObjects(app *argoappv1.Application, local string, appLabelKey strin
|
||||
return objs
|
||||
}
|
||||
|
||||
type resourceInfoProvider struct {
|
||||
namespacedByGk map[schema.GroupKind]bool
|
||||
}
|
||||
|
||||
// Infer if obj is namespaced or not from corresponding live objects list. If corresponding live object has namespace then target object is also namespaced.
|
||||
// If live object is missing then it does not matter if target is namespaced or not.
|
||||
func (p *resourceInfoProvider) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) {
|
||||
key := kube.GetResourceKey(obj)
|
||||
return p.namespacedByGk[key.GroupKind()], nil
|
||||
}
|
||||
|
||||
func groupLocalObjs(localObs []*unstructured.Unstructured, liveObjs []*unstructured.Unstructured, appNamespace string) map[kube.ResourceKey]*unstructured.Unstructured {
|
||||
namespacedByGk := make(map[schema.GroupKind]bool)
|
||||
for i := range liveObjs {
|
||||
@@ -619,22 +631,13 @@ func groupLocalObjs(localObs []*unstructured.Unstructured, liveObjs []*unstructu
|
||||
namespacedByGk[schema.GroupKind{Group: key.Group, Kind: key.Kind}] = key.Namespace != ""
|
||||
}
|
||||
}
|
||||
localObs, _, err := controller.DeduplicateTargetObjects("", appNamespace, localObs, &resourceInfoProvider{namespacedByGk: namespacedByGk})
|
||||
errors.CheckError(err)
|
||||
objByKey := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
for i := range localObs {
|
||||
obj := localObs[i]
|
||||
gk := obj.GroupVersionKind().GroupKind()
|
||||
// Infer if obj is namespaced or not from corresponding live objects list. If corresponding live object has namespace then target object is also namespaced.
|
||||
// If live object is missing then it does not matter if target is namespaced or not.
|
||||
namespace := obj.GetNamespace()
|
||||
if !namespacedByGk[gk] {
|
||||
namespace = ""
|
||||
} else {
|
||||
if namespace == "" {
|
||||
namespace = appNamespace
|
||||
}
|
||||
}
|
||||
if !hook.IsHook(obj) {
|
||||
objByKey[kube.NewResourceKey(gk.Group, gk.Kind, namespace, obj.GetName())] = obj
|
||||
objByKey[kube.GetResourceKey(obj)] = obj
|
||||
}
|
||||
}
|
||||
return objByKey
|
||||
@@ -651,11 +654,11 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
var command = &cobra.Command{
|
||||
Use: "diff APPNAME",
|
||||
Short: shortDesc,
|
||||
Long: shortDesc + "\nUses 'diff' to render the difference. KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff tool.",
|
||||
Long: shortDesc + "\nUses 'diff' to render the difference. KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff tool.\nReturns the following exit codes: 2 on general errors, 1 when a diff is found, and 0 when no diff is found",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
clientset := argocdclient.NewClientOrDie(clientOpts)
|
||||
@@ -684,9 +687,18 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
err := json.Unmarshal([]byte(res.LiveState), &live)
|
||||
errors.CheckError(err)
|
||||
|
||||
key := kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name)
|
||||
var key kube.ResourceKey
|
||||
if live != nil {
|
||||
key = kube.GetResourceKey(live)
|
||||
} else {
|
||||
var target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(res.TargetState), &target)
|
||||
errors.CheckError(err)
|
||||
key = kube.GetResourceKey(target)
|
||||
}
|
||||
|
||||
if local, ok := localObjs[key]; ok || live != nil {
|
||||
if local != nil {
|
||||
if local != nil && !kube.IsCRD(local) {
|
||||
err = kube.SetAppInstanceLabel(local, argoSettings.AppLabelKey, appName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
@@ -737,6 +749,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
|
||||
foundDiffs := false
|
||||
for i := range items {
|
||||
item := items[i]
|
||||
// TODO (amatyushentsev): use resource overrides exposed from API server
|
||||
@@ -744,8 +757,8 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
errors.CheckError(err)
|
||||
// Diff is already available in ResourceDiff Diff field but we have to recalculate diff again due to https://github.com/yudai/gojsondiff/issues/31
|
||||
diffRes := diff.Diff(item.target, item.live, normalizer)
|
||||
fmt.Printf("===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name)
|
||||
if diffRes.Modified || item.target == nil || item.live == nil {
|
||||
fmt.Printf("===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name)
|
||||
var live *unstructured.Unstructured
|
||||
var target *unstructured.Unstructured
|
||||
if item.target != nil && item.live != nil {
|
||||
@@ -757,9 +770,13 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
target = item.target
|
||||
}
|
||||
|
||||
foundDiffs = true
|
||||
printDiff(item.key.Name, target, live)
|
||||
}
|
||||
}
|
||||
if foundDiffs {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
@@ -1290,21 +1307,30 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
// If the app is a ksonnet app, then parameters are expected to be in the form: component=param=value
|
||||
// Otherwise, the app is assumed to be a helm app and is expected to be in the form:
|
||||
// param=value
|
||||
func setParameterOverrides(app *argoappv1.Application, argocdClient argocdclient.Client, parameters []string) {
|
||||
func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
if len(parameters) == 0 {
|
||||
return
|
||||
}
|
||||
conn, repoIf := argocdClient.NewRepoClientOrDie()
|
||||
defer util.Close(conn)
|
||||
var sourceType argoappv1.ApplicationSourceType
|
||||
if st, _ := app.Spec.Source.ExplicitType(); st != nil {
|
||||
sourceType = *st
|
||||
} else if app.Status.SourceType != "" {
|
||||
sourceType = app.Status.SourceType
|
||||
} else {
|
||||
// HACK: we don't know the source type, so make an educated guess based on the supplied
|
||||
// parameter string. This code handles the corner case where app doesn't exist yet, and the
|
||||
// command is something like: `argocd app create MYAPP -p foo=bar`
|
||||
// This logic is not foolproof, but when ksonnet is deprecated, this will no longer matter
|
||||
// since helm will remain as the only source type which has parameters.
|
||||
if len(strings.SplitN(parameters[0], "=", 3)) == 3 {
|
||||
sourceType = argoappv1.ApplicationSourceTypeKsonnet
|
||||
} else if len(strings.SplitN(parameters[0], "=", 2)) == 2 {
|
||||
sourceType = argoappv1.ApplicationSourceTypeHelm
|
||||
}
|
||||
}
|
||||
|
||||
appDetails, err := repoIf.GetAppDetails(context.Background(), &apirepository.RepoAppDetailsQuery{
|
||||
Repo: app.Spec.Source.RepoURL,
|
||||
Revision: app.Spec.Source.TargetRevision,
|
||||
Path: app.Spec.Source.Path,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if appDetails.Ksonnet != nil {
|
||||
switch sourceType {
|
||||
case argoappv1.ApplicationSourceTypeKsonnet:
|
||||
if app.Spec.Source.Ksonnet == nil {
|
||||
app.Spec.Source.Ksonnet = &argoappv1.ApplicationSourceKsonnet{}
|
||||
}
|
||||
@@ -1330,7 +1356,7 @@ func setParameterOverrides(app *argoappv1.Application, argocdClient argocdclient
|
||||
app.Spec.Source.Ksonnet.Parameters = append(app.Spec.Source.Ksonnet.Parameters, newParam)
|
||||
}
|
||||
}
|
||||
} else if appDetails.Helm != nil {
|
||||
case argoappv1.ApplicationSourceTypeHelm:
|
||||
if app.Spec.Source.Helm == nil {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
}
|
||||
@@ -1355,7 +1381,7 @@ func setParameterOverrides(app *argoappv1.Application, argocdClient argocdclient
|
||||
app.Spec.Source.Helm.Parameters = append(app.Spec.Source.Helm.Parameters, newParam)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
log.Fatalf("Parameters can only be set against Ksonnet or Helm applications")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
|
||||
@@ -39,9 +39,10 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
// NewRepoAddCommand returns a new instance of an `argocd repo add` command
|
||||
func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
repo appsv1.Repository
|
||||
upsert bool
|
||||
sshPrivateKeyPath string
|
||||
repo appsv1.Repository
|
||||
upsert bool
|
||||
sshPrivateKeyPath string
|
||||
insecureIgnoreHostKey bool
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "add REPO",
|
||||
@@ -59,12 +60,13 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
}
|
||||
repo.SSHPrivateKey = string(keyData)
|
||||
}
|
||||
repo.InsecureIgnoreHostKey = insecureIgnoreHostKey
|
||||
// First test the repo *without* username/password. This gives us a hint on whether this
|
||||
// is a private repo.
|
||||
// NOTE: it is important not to run git commands to test git credentials on the user's
|
||||
// system since it may mess with their git credential store (e.g. osx keychain).
|
||||
// See issue #315
|
||||
err := git.TestRepo(repo.Repo, "", "", repo.SSHPrivateKey)
|
||||
err := git.TestRepo(repo.Repo, "", "", repo.SSHPrivateKey, repo.InsecureIgnoreHostKey)
|
||||
if err != nil {
|
||||
if git.IsSSHURL(repo.Repo) {
|
||||
// If we failed using git SSH credentials, then the repo is automatically bad
|
||||
@@ -88,6 +90,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command.Flags().StringVar(&repo.Username, "username", "", "username to the repository")
|
||||
command.Flags().StringVar(&repo.Password, "password", "", "password to the repository")
|
||||
command.Flags().StringVar(&sshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
|
||||
command.Flags().BoolVar(&insecureIgnoreHostKey, "insecure-ignore-host-key", false, "disables SSH strict host key checking")
|
||||
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ const (
|
||||
PortRepoServer = 8081
|
||||
PortArgoCDMetrics = 8082
|
||||
PortArgoCDAPIServerMetrics = 8083
|
||||
PortRepoServerMetrics = 8084
|
||||
)
|
||||
|
||||
// Argo CD application related constants
|
||||
|
||||
@@ -112,8 +112,8 @@ func NewApplicationController(
|
||||
}
|
||||
appInformer, appLister := ctrl.newApplicationInformerAndLister()
|
||||
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{})
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settings, kubectlCmd, func(appName string) {
|
||||
ctrl.requestAppRefresh(appName)
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settings, kubectlCmd, func(appName string, fullRefresh bool) {
|
||||
ctrl.requestAppRefresh(appName, fullRefresh)
|
||||
ctrl.appRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, appName))
|
||||
})
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settings, stateCache, projInformer)
|
||||
@@ -143,11 +143,11 @@ func (ctrl *ApplicationController) getApp(name string) (*appv1.Application, erro
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application, comparisonResult *comparisonResult) error {
|
||||
tree, err := ctrl.resourceTree(a, comparisonResult.managedResources)
|
||||
managedResources, err := ctrl.managedResources(a, comparisonResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
managedResources, err := ctrl.managedResources(a, comparisonResult)
|
||||
tree, err := ctrl.resourceTree(a, managedResources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -158,20 +158,40 @@ func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application,
|
||||
return ctrl.cache.SetAppManagedResources(a.Name, managedResources)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) resourceTree(a *appv1.Application, resources []managedResource) ([]*appv1.ResourceNode, error) {
|
||||
func (ctrl *ApplicationController) resourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) ([]*appv1.ResourceNode, error) {
|
||||
items := make([]*appv1.ResourceNode, 0)
|
||||
for i := range resources {
|
||||
managedResource := resources[i]
|
||||
node := appv1.ResourceNode{
|
||||
Name: managedResource.Name,
|
||||
Version: managedResource.Version,
|
||||
Kind: managedResource.Kind,
|
||||
Group: managedResource.Group,
|
||||
Namespace: managedResource.Namespace,
|
||||
for i := range managedResources {
|
||||
managedResource := managedResources[i]
|
||||
var live = &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(managedResource.LiveState), &live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if managedResource.Live != nil {
|
||||
node.ResourceVersion = managedResource.Live.GetResourceVersion()
|
||||
children, err := ctrl.stateCache.GetChildren(a.Spec.Destination.Server, managedResource.Live)
|
||||
var target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(managedResource.TargetState), &target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
version := ""
|
||||
resourceVersion := ""
|
||||
if live != nil {
|
||||
resourceVersion = live.GetResourceVersion()
|
||||
version = live.GroupVersionKind().Version
|
||||
} else if target != nil {
|
||||
version = target.GroupVersionKind().Version
|
||||
}
|
||||
|
||||
node := appv1.ResourceNode{
|
||||
Version: version,
|
||||
ResourceVersion: resourceVersion,
|
||||
Name: managedResource.Name,
|
||||
Kind: managedResource.Kind,
|
||||
Group: managedResource.Group,
|
||||
Namespace: managedResource.Namespace,
|
||||
}
|
||||
|
||||
if live != nil {
|
||||
children, err := ctrl.stateCache.GetChildren(a.Spec.Destination.Server, live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -269,20 +289,20 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) requestAppRefresh(appName string) {
|
||||
func (ctrl *ApplicationController) requestAppRefresh(appName string, fullRefresh bool) {
|
||||
ctrl.refreshRequestedAppsMutex.Lock()
|
||||
defer ctrl.refreshRequestedAppsMutex.Unlock()
|
||||
ctrl.refreshRequestedApps[appName] = true
|
||||
ctrl.refreshRequestedApps[appName] = fullRefresh || ctrl.refreshRequestedApps[appName]
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) isRefreshRequested(appName string) bool {
|
||||
func (ctrl *ApplicationController) isRefreshRequested(appName string) (bool, bool) {
|
||||
ctrl.refreshRequestedAppsMutex.Lock()
|
||||
defer ctrl.refreshRequestedAppsMutex.Unlock()
|
||||
_, ok := ctrl.refreshRequestedApps[appName]
|
||||
fullRefresh, ok := ctrl.refreshRequestedApps[appName]
|
||||
if ok {
|
||||
delete(ctrl.refreshRequestedApps, appName)
|
||||
}
|
||||
return ok
|
||||
return ok, fullRefresh
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext bool) {
|
||||
@@ -475,7 +495,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
if state.Phase.Completed() {
|
||||
// if we just completed an operation, force a refresh so that UI will report up-to-date
|
||||
// sync/health information
|
||||
ctrl.requestAppRefresh(app.ObjectMeta.Name)
|
||||
ctrl.requestAppRefresh(app.ObjectMeta.Name, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -566,19 +586,39 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
log.Warnf("Key '%s' in index is not an application", appKey)
|
||||
return
|
||||
}
|
||||
needRefresh, refreshType := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout)
|
||||
needRefresh, refreshType, fullRefresh := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout)
|
||||
|
||||
if !needRefresh {
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
reconcileDuration := time.Now().Sub(startTime)
|
||||
ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
|
||||
logCtx := log.WithFields(log.Fields{"application": origApp.Name, "time_ms": reconcileDuration.Seconds() * 1e3})
|
||||
logCtx := log.WithFields(log.Fields{"application": origApp.Name, "time_ms": reconcileDuration.Seconds() * 1e3, "full": fullRefresh})
|
||||
logCtx.Info("Reconciliation completed")
|
||||
}()
|
||||
|
||||
app := origApp.DeepCopy()
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name})
|
||||
if !fullRefresh {
|
||||
if managedResources, err := ctrl.cache.GetAppManagedResources(app.Name); err != nil {
|
||||
logCtx.Warnf("Failed to get cached managed resources for tree reconciliation, fallback to full reconciliation")
|
||||
} else {
|
||||
if tree, err := ctrl.resourceTree(app, managedResources); err != nil {
|
||||
app.Status.Conditions = []appv1.ApplicationCondition{{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()}}
|
||||
} else {
|
||||
if err = ctrl.cache.SetAppResourcesTree(app.Name, tree); err != nil {
|
||||
logCtx.Errorf("Failed to cache resources tree: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
app.Status.ObservedAt = metav1.Now()
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
conditions, hasErrors := ctrl.refreshAppConditions(app)
|
||||
if hasErrors {
|
||||
@@ -598,7 +638,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
}
|
||||
err = ctrl.setAppManagedResources(app, compareResult)
|
||||
if err != nil {
|
||||
conditions = append(conditions, appv1.ApplicationCondition{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
logCtx.Errorf("Failed to cache app resources: %v", err)
|
||||
}
|
||||
|
||||
syncErrCond := ctrl.autoSync(app, compareResult.syncStatus)
|
||||
@@ -606,26 +646,32 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
conditions = append(conditions, *syncErrCond)
|
||||
}
|
||||
|
||||
app.Status.ObservedAt = compareResult.observedAt
|
||||
app.Status.ObservedAt = compareResult.reconciledAt
|
||||
app.Status.ReconciledAt = compareResult.reconciledAt
|
||||
app.Status.Sync = *compareResult.syncStatus
|
||||
app.Status.Health = *compareResult.healthStatus
|
||||
app.Status.Resources = compareResult.resources
|
||||
app.Status.Conditions = conditions
|
||||
app.Status.SourceType = compareResult.appSourceType
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// needRefreshAppStatus answers if application status needs to be refreshed.
|
||||
// Returns true if application never been compared, has changed or comparison result has expired.
|
||||
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType) {
|
||||
// Additionally returns whether full refresh was requested or not.
|
||||
// If full refresh is requested then target and live state should be reconciled, else only live state tree should be updated.
|
||||
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType, bool) {
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name})
|
||||
var reason string
|
||||
fullRefresh := true
|
||||
refreshType := appv1.RefreshTypeNormal
|
||||
expired := app.Status.ObservedAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
|
||||
expired := app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
|
||||
if requestedType, ok := app.IsRefreshRequested(); ok {
|
||||
refreshType = requestedType
|
||||
reason = fmt.Sprintf("%s refresh requested", refreshType)
|
||||
} else if ctrl.isRefreshRequested(app.Name) {
|
||||
} else if requested, full := ctrl.isRefreshRequested(app.Name); requested {
|
||||
fullRefresh = full
|
||||
reason = fmt.Sprintf("controller refresh requested")
|
||||
} else if app.Status.Sync.Status == appv1.SyncStatusCodeUnknown && expired {
|
||||
reason = "comparison status unknown"
|
||||
@@ -634,13 +680,13 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
|
||||
reason = "spec.destination differs"
|
||||
} else if expired {
|
||||
reason = fmt.Sprintf("comparison expired. observedAt: %v, expiry: %v", app.Status.ObservedAt, statusRefreshTimeout)
|
||||
reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
}
|
||||
if reason != "" {
|
||||
logCtx.Infof("Refreshing app status (%s)", reason)
|
||||
return true, refreshType
|
||||
return true, refreshType, fullRefresh
|
||||
}
|
||||
return false, refreshType
|
||||
return false, refreshType, fullRefresh
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) ([]appv1.ApplicationCondition, bool) {
|
||||
@@ -659,7 +705,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
specConditions, err := argo.GetSpecErrors(context.Background(), &app.Spec, proj, ctrl.repoClientset, ctrl.db)
|
||||
specConditions, _, err := argo.GetSpecErrors(context.Background(), &app.Spec, proj, ctrl.repoClientset, ctrl.db)
|
||||
if err != nil {
|
||||
conditions = append(conditions, appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionUnknownError,
|
||||
@@ -672,11 +718,12 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
|
||||
// List of condition types which have to be reevaluated by controller; all remaining conditions should stay as is.
|
||||
reevaluateTypes := map[appv1.ApplicationConditionType]bool{
|
||||
appv1.ApplicationConditionInvalidSpecError: true,
|
||||
appv1.ApplicationConditionUnknownError: true,
|
||||
appv1.ApplicationConditionComparisonError: true,
|
||||
appv1.ApplicationConditionSharedResourceWarning: true,
|
||||
appv1.ApplicationConditionSyncError: true,
|
||||
appv1.ApplicationConditionInvalidSpecError: true,
|
||||
appv1.ApplicationConditionUnknownError: true,
|
||||
appv1.ApplicationConditionComparisonError: true,
|
||||
appv1.ApplicationConditionSharedResourceWarning: true,
|
||||
appv1.ApplicationConditionSyncError: true,
|
||||
appv1.ApplicationConditionRepeatedResourceWarning: true,
|
||||
}
|
||||
appConditions := make([]appv1.ApplicationCondition, 0)
|
||||
for i := 0; i < len(app.Status.Conditions); i++ {
|
||||
@@ -858,7 +905,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
if oldOK && newOK {
|
||||
if toggledAutomatedSync(oldApp, newApp) {
|
||||
log.WithField("application", newApp.Name).Info("Enabled automated sync")
|
||||
ctrl.requestAppRefresh(newApp.Name)
|
||||
ctrl.requestAppRefresh(newApp.Name, true)
|
||||
}
|
||||
}
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
|
||||
154
controller/cache/cache.go
vendored
154
controller/cache/cache.go
vendored
@@ -2,7 +2,6 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -19,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
type LiveStateCache interface {
|
||||
IsNamespaced(server string, gvk schema.GroupVersionKind) (bool, error)
|
||||
IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error)
|
||||
// Returns child nodes for a given k8s resource
|
||||
GetChildren(server string, obj *unstructured.Unstructured) ([]appv1.ResourceNode, error)
|
||||
// Returns state of live nodes which correspond for target nodes of specified application.
|
||||
@@ -43,7 +42,7 @@ func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isName
|
||||
return key
|
||||
}
|
||||
|
||||
func NewLiveStateCache(db db.ArgoDB, appInformer cache.SharedIndexInformer, settings *settings.ArgoCDSettings, kubectl kube.Kubectl, onAppUpdated func(appName string)) LiveStateCache {
|
||||
func NewLiveStateCache(db db.ArgoDB, appInformer cache.SharedIndexInformer, settings *settings.ArgoCDSettings, kubectl kube.Kubectl, onAppUpdated func(appName string, fullRefresh bool)) LiveStateCache {
|
||||
return &liveStateCache{
|
||||
appInformer: appInformer,
|
||||
db: db,
|
||||
@@ -60,7 +59,7 @@ type liveStateCache struct {
|
||||
clusters map[string]*clusterInfo
|
||||
lock *sync.Mutex
|
||||
appInformer cache.SharedIndexInformer
|
||||
onAppUpdated func(appName string)
|
||||
onAppUpdated func(appName string, fullRefresh bool)
|
||||
kubectl kube.Kubectl
|
||||
settings *settings.ArgoCDSettings
|
||||
}
|
||||
@@ -73,13 +72,6 @@ func (c *liveStateCache) processEvent(event watch.EventType, obj *unstructured.U
|
||||
return info.processEvent(event, obj)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) removeCluster(server string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
delete(c.clusters, server)
|
||||
log.Infof("Dropped cluster %s cache", server)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
@@ -90,7 +82,7 @@ func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
info = &clusterInfo{
|
||||
apis: make(map[schema.GroupKind]*gkInfo),
|
||||
apisMeta: make(map[schema.GroupKind]*apiMeta),
|
||||
lock: &sync.Mutex{},
|
||||
nodes: make(map[kube.ResourceKey]*node),
|
||||
nsIndex: make(map[string]map[kube.ResourceKey]*node),
|
||||
@@ -140,12 +132,12 @@ func (c *liveStateCache) Delete(server string, obj *unstructured.Unstructured) e
|
||||
return clusterInfo.delete(obj)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IsNamespaced(server string, gvk schema.GroupVersionKind) (bool, error) {
|
||||
func (c *liveStateCache) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return clusterInfo.isNamespaced(gvk.GroupKind()), nil
|
||||
return clusterInfo.isNamespaced(obj), nil
|
||||
}
|
||||
|
||||
func (c *liveStateCache) GetChildren(server string, obj *unstructured.Unstructured) ([]appv1.ResourceNode, error) {
|
||||
@@ -175,135 +167,29 @@ func isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool {
|
||||
|
||||
// Run watches for resource changes annotated with application label on all registered clusters and schedule corresponding app refresh.
|
||||
func (c *liveStateCache) Run(ctx context.Context) {
|
||||
watchingClustersLock := sync.Mutex{}
|
||||
watchingClusters := make(map[string]struct {
|
||||
cancel context.CancelFunc
|
||||
cluster *appv1.Cluster
|
||||
})
|
||||
|
||||
util.RetryUntilSucceed(func() error {
|
||||
clusterEventCallback := func(event *db.ClusterEvent) {
|
||||
info, ok := watchingClusters[event.Cluster.Server]
|
||||
hasApps := isClusterHasApps(c.appInformer.GetStore().List(), event.Cluster)
|
||||
|
||||
// cluster resources must be watched only if cluster has at least one app
|
||||
if (event.Type == watch.Deleted || !hasApps) && ok {
|
||||
info.cancel()
|
||||
watchingClustersLock.Lock()
|
||||
delete(watchingClusters, event.Cluster.Server)
|
||||
watchingClustersLock.Unlock()
|
||||
} else if event.Type != watch.Deleted && !ok && hasApps {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
watchingClustersLock.Lock()
|
||||
watchingClusters[event.Cluster.Server] = struct {
|
||||
cancel context.CancelFunc
|
||||
cluster *appv1.Cluster
|
||||
}{
|
||||
cancel: func() {
|
||||
c.removeCluster(event.Cluster.Server)
|
||||
cancel()
|
||||
},
|
||||
cluster: event.Cluster,
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if cluster, ok := c.clusters[event.Cluster.Server]; ok {
|
||||
if event.Type == watch.Deleted {
|
||||
cluster.invalidate()
|
||||
delete(c.clusters, event.Cluster.Server)
|
||||
} else if event.Type == watch.Modified {
|
||||
cluster.cluster = event.Cluster
|
||||
cluster.invalidate()
|
||||
}
|
||||
watchingClustersLock.Unlock()
|
||||
go c.watchClusterResources(ctx, *event.Cluster)
|
||||
} else if event.Type == watch.Added && isClusterHasApps(c.appInformer.GetStore().List(), event.Cluster) {
|
||||
go func() {
|
||||
// warm up cache for cluster with apps
|
||||
_, _ = c.getSyncedCluster(event.Cluster.Server)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
onAppModified := func(obj interface{}) {
|
||||
if app, ok := obj.(*appv1.Application); ok {
|
||||
var cluster *appv1.Cluster
|
||||
info, infoOk := watchingClusters[app.Spec.Destination.Server]
|
||||
if infoOk {
|
||||
cluster = info.cluster
|
||||
} else {
|
||||
cluster, _ = c.db.GetCluster(ctx, app.Spec.Destination.Server)
|
||||
}
|
||||
if cluster != nil {
|
||||
// trigger cluster event every time when app created/deleted to either start or stop watching resources
|
||||
clusterEventCallback(&db.ClusterEvent{Cluster: cluster, Type: watch.Modified})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.appInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: onAppModified,
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldApp, oldOk := oldObj.(*appv1.Application)
|
||||
newApp, newOk := newObj.(*appv1.Application)
|
||||
if oldOk && newOk {
|
||||
if oldApp.Spec.Destination.Server != newApp.Spec.Destination.Server {
|
||||
onAppModified(oldObj)
|
||||
onAppModified(newApp)
|
||||
}
|
||||
}
|
||||
},
|
||||
DeleteFunc: onAppModified,
|
||||
})
|
||||
|
||||
return c.db.WatchClusters(ctx, clusterEventCallback)
|
||||
|
||||
}, "watch clusters", ctx, clusterRetryTimeout)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
// watchClusterResources watches for resource changes annotated with application label on specified cluster and schedule corresponding app refresh.
|
||||
func (c *liveStateCache) watchClusterResources(ctx context.Context, item appv1.Cluster) {
|
||||
util.RetryUntilSucceed(func() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("Recovered from panic: %v\n", r)
|
||||
}
|
||||
}()
|
||||
config := item.RESTConfig()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
ch, err := c.kubectl.WatchResources(ctx, config, c.settings, func(gk schema.GroupKind) (s string, e error) {
|
||||
clusterInfo, err := c.getSyncedCluster(item.Server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return clusterInfo.getResourceVersion(gk), nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for event := range ch {
|
||||
if event.WatchEvent != nil {
|
||||
eventObj := event.WatchEvent.Object.(*unstructured.Unstructured)
|
||||
if kube.IsCRD(eventObj) {
|
||||
// restart if new CRD has been created after watch started
|
||||
if event.WatchEvent.Type == watch.Added {
|
||||
c.removeCluster(item.Server)
|
||||
return fmt.Errorf("Restarting the watch because a new CRD %s was added", eventObj.GetName())
|
||||
} else if event.WatchEvent.Type == watch.Deleted {
|
||||
c.removeCluster(item.Server)
|
||||
return fmt.Errorf("Restarting the watch because CRD %s was deleted", eventObj.GetName())
|
||||
}
|
||||
}
|
||||
err = c.processEvent(event.WatchEvent.Type, eventObj, item.Server)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to process event %s for obj %v: %v", event.WatchEvent.Type, event.WatchEvent.Object, err)
|
||||
}
|
||||
} else {
|
||||
err = c.updateCache(item.Server, event.CacheRefresh.GVK.GroupKind(), event.CacheRefresh.ResourceVersion, event.CacheRefresh.Objects)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to process event %s for obj %v: %v", event.WatchEvent.Type, event.WatchEvent.Object, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return fmt.Errorf("resource updates channel has closed")
|
||||
}, fmt.Sprintf("watch app resources on %s", item.Server), ctx, clusterRetryTimeout)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) updateCache(server string, gk schema.GroupKind, resourceVersion string, objs []unstructured.Unstructured) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterInfo.updateCache(gk, resourceVersion, objs)
|
||||
return nil
|
||||
}
|
||||
|
||||
129
controller/cache/cache_test.go
vendored
129
controller/cache/cache_test.go
vendored
@@ -1,129 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
"github.com/argoproj/argo-cd/util/kube/kubetest"
|
||||
)
|
||||
|
||||
const (
|
||||
pollInterval = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
func TestWatchClusterResourcesHandlesResourceEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
events := make(chan kube.WatchEvent)
|
||||
defer func() {
|
||||
cancel()
|
||||
close(events)
|
||||
}()
|
||||
|
||||
pod := testPod.DeepCopy()
|
||||
|
||||
kubeMock := &kubetest.MockKubectlCmd{
|
||||
Resources: []kube.ResourcesBatch{{
|
||||
GVK: pod.GroupVersionKind(),
|
||||
Objects: make([]unstructured.Unstructured, 0),
|
||||
}},
|
||||
Events: events,
|
||||
}
|
||||
|
||||
server := "https://test"
|
||||
clusterCache := newClusterExt(kubeMock)
|
||||
|
||||
cache := &liveStateCache{
|
||||
clusters: map[string]*clusterInfo{server: clusterCache},
|
||||
lock: &sync.Mutex{},
|
||||
kubectl: kubeMock,
|
||||
}
|
||||
|
||||
go cache.watchClusterResources(ctx, v1alpha1.Cluster{Server: server})
|
||||
|
||||
assert.False(t, clusterCache.synced())
|
||||
|
||||
events <- kube.WatchEvent{WatchEvent: &watch.Event{Object: pod, Type: watch.Added}}
|
||||
|
||||
err := wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
_, hasPod := clusterCache.nodes[kube.GetResourceKey(pod)]
|
||||
return hasPod, nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
pod.SetResourceVersion("updated-resource-version")
|
||||
events <- kube.WatchEvent{WatchEvent: &watch.Event{Object: pod, Type: watch.Modified}}
|
||||
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
updatedPodInfo, hasPod := clusterCache.nodes[kube.GetResourceKey(pod)]
|
||||
return hasPod && updatedPodInfo.resourceVersion == "updated-resource-version", nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
events <- kube.WatchEvent{WatchEvent: &watch.Event{Object: pod, Type: watch.Deleted}}
|
||||
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
_, hasPod := clusterCache.nodes[kube.GetResourceKey(pod)]
|
||||
return !hasPod, nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestClusterCacheDroppedOnCreatedDeletedCRD(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
events := make(chan kube.WatchEvent)
|
||||
defer func() {
|
||||
cancel()
|
||||
close(events)
|
||||
}()
|
||||
|
||||
kubeMock := &kubetest.MockKubectlCmd{
|
||||
Resources: []kube.ResourcesBatch{{
|
||||
GVK: testCRD.GroupVersionKind(),
|
||||
Objects: make([]unstructured.Unstructured, 0),
|
||||
}},
|
||||
Events: events,
|
||||
}
|
||||
|
||||
server := "https://test"
|
||||
clusterCache := newClusterExt(kubeMock)
|
||||
|
||||
cache := &liveStateCache{
|
||||
clusters: map[string]*clusterInfo{server: clusterCache},
|
||||
lock: &sync.Mutex{},
|
||||
kubectl: kubeMock,
|
||||
}
|
||||
|
||||
go cache.watchClusterResources(ctx, v1alpha1.Cluster{Server: server})
|
||||
|
||||
err := clusterCache.ensureSynced()
|
||||
assert.Nil(t, err)
|
||||
|
||||
events <- kube.WatchEvent{WatchEvent: &watch.Event{Object: testCRD, Type: watch.Added}}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
cache.lock.Lock()
|
||||
defer cache.lock.Unlock()
|
||||
_, hasCache := cache.clusters[server]
|
||||
return !hasCache, nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
cache.clusters[server] = clusterCache
|
||||
|
||||
events <- kube.WatchEvent{WatchEvent: &watch.Event{Object: testCRD, Type: watch.Deleted}}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
cache.lock.Lock()
|
||||
defer cache.lock.Unlock()
|
||||
_, hasCache := cache.clusters[server]
|
||||
return !hasCache, nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
244
controller/cache/cluster.go
vendored
244
controller/cache/cluster.go
vendored
@@ -1,6 +1,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
@@ -21,44 +22,38 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
clusterSyncTimeout = 24 * time.Hour
|
||||
clusterRetryTimeout = 10 * time.Second
|
||||
clusterSyncTimeout = 24 * time.Hour
|
||||
clusterRetryTimeout = 10 * time.Second
|
||||
watchResourcesRetryTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
type gkInfo struct {
|
||||
resource metav1.APIResource
|
||||
type apiMeta struct {
|
||||
namespaced bool
|
||||
resourceVersion string
|
||||
watchCancel context.CancelFunc
|
||||
}
|
||||
|
||||
type clusterInfo struct {
|
||||
apis map[schema.GroupKind]*gkInfo
|
||||
nodes map[kube.ResourceKey]*node
|
||||
nsIndex map[string]map[kube.ResourceKey]*node
|
||||
lock *sync.Mutex
|
||||
onAppUpdated func(appName string)
|
||||
syncLock *sync.Mutex
|
||||
syncTime *time.Time
|
||||
syncError error
|
||||
apisMeta map[schema.GroupKind]*apiMeta
|
||||
|
||||
lock *sync.Mutex
|
||||
nodes map[kube.ResourceKey]*node
|
||||
nsIndex map[string]map[kube.ResourceKey]*node
|
||||
|
||||
onAppUpdated func(appName string, fullRefresh bool)
|
||||
kubectl kube.Kubectl
|
||||
cluster *appv1.Cluster
|
||||
syncLock *sync.Mutex
|
||||
syncTime *time.Time
|
||||
syncError error
|
||||
log *log.Entry
|
||||
settings *settings.ArgoCDSettings
|
||||
}
|
||||
|
||||
func (c *clusterInfo) getResourceVersion(gk schema.GroupKind) string {
|
||||
func (c *clusterInfo) replaceResourceCache(gk schema.GroupKind, resourceVersion string, objs []unstructured.Unstructured) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
info, ok := c.apis[gk]
|
||||
if ok {
|
||||
return info.resourceVersion
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *clusterInfo) updateCache(gk schema.GroupKind, resourceVersion string, objs []unstructured.Unstructured) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
info, ok := c.apis[gk]
|
||||
info, ok := c.apisMeta[gk]
|
||||
if ok {
|
||||
objByKind := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
for i := range objs {
|
||||
@@ -136,7 +131,13 @@ func (c *clusterInfo) removeNode(key kube.ResourceKey) {
|
||||
}
|
||||
|
||||
func (c *clusterInfo) invalidate() {
|
||||
c.syncLock.Lock()
|
||||
defer c.syncLock.Unlock()
|
||||
c.syncTime = nil
|
||||
for i := range c.apisMeta {
|
||||
c.apisMeta[i].watchCancel()
|
||||
}
|
||||
c.apisMeta = nil
|
||||
}
|
||||
|
||||
func (c *clusterInfo) synced() bool {
|
||||
@@ -149,38 +150,163 @@ func (c *clusterInfo) synced() bool {
|
||||
return time.Now().Before(c.syncTime.Add(clusterSyncTimeout))
|
||||
}
|
||||
|
||||
func (c *clusterInfo) sync() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
|
||||
}
|
||||
}()
|
||||
func (c *clusterInfo) stopWatching(gk schema.GroupKind) {
|
||||
c.syncLock.Lock()
|
||||
defer c.syncLock.Unlock()
|
||||
if info, ok := c.apisMeta[gk]; ok {
|
||||
info.watchCancel()
|
||||
delete(c.apisMeta, gk)
|
||||
c.replaceResourceCache(gk, "", []unstructured.Unstructured{})
|
||||
log.Warnf("Stop watching %s not found on %s.", gk, c.cluster.Server)
|
||||
}
|
||||
}
|
||||
|
||||
c.log.Info("Start syncing cluster")
|
||||
// startMissingWatches lists supported cluster resources and start watching for changes unless watch is already running
|
||||
func (c *clusterInfo) startMissingWatches() error {
|
||||
|
||||
c.apis = make(map[schema.GroupKind]*gkInfo)
|
||||
c.nodes = make(map[kube.ResourceKey]*node)
|
||||
|
||||
resources, err := c.kubectl.GetResources(c.cluster.RESTConfig(), c.settings, "")
|
||||
apis, err := c.kubectl.GetAPIResources(c.cluster.RESTConfig(), c.settings)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to sync cluster %s: %v", c.cluster.Server, err)
|
||||
return err
|
||||
}
|
||||
|
||||
appLabelKey := c.settings.GetAppInstanceLabelKey()
|
||||
for res := range resources {
|
||||
if res.Error != nil {
|
||||
return res.Error
|
||||
for i := range apis {
|
||||
api := apis[i]
|
||||
if _, ok := c.apisMeta[api.GroupKind]; !ok {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
info := &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel}
|
||||
c.apisMeta[api.GroupKind] = info
|
||||
go c.watchEvents(ctx, api, info)
|
||||
}
|
||||
if _, ok := c.apis[res.GVK.GroupKind()]; !ok {
|
||||
c.apis[res.GVK.GroupKind()] = &gkInfo{
|
||||
resourceVersion: res.ListResourceVersion,
|
||||
resource: res.ResourceInfo,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSynced(lock *sync.Mutex, action func() error) error {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
return action()
|
||||
}
|
||||
|
||||
func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo, info *apiMeta) {
|
||||
util.RetryUntilSucceed(func() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
|
||||
}
|
||||
}()
|
||||
|
||||
err = runSynced(c.syncLock, func() error {
|
||||
if info.resourceVersion == "" {
|
||||
list, err := api.Interface.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.replaceResourceCache(api.GroupKind, list.GetResourceVersion(), list.Items)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w, err := api.Interface.Watch(metav1.ListOptions{ResourceVersion: info.resourceVersion})
|
||||
if errors.IsNotFound(err) {
|
||||
c.stopWatching(api.GroupKind)
|
||||
return nil
|
||||
}
|
||||
|
||||
err = runSynced(c.syncLock, func() error {
|
||||
if errors.IsGone(err) {
|
||||
info.resourceVersion = ""
|
||||
log.Warnf("Resource version of %s on %s is too old.", api.GroupKind, c.cluster.Server)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case event, ok := <-w.ResultChan():
|
||||
if ok {
|
||||
obj := event.Object.(*unstructured.Unstructured)
|
||||
info.resourceVersion = obj.GetResourceVersion()
|
||||
err = c.processEvent(event.Type, obj)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to process event %s %s/%s/%s: %v", event.Type, obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if kube.IsCRD(obj) {
|
||||
if event.Type == watch.Deleted {
|
||||
group, groupOk, groupErr := unstructured.NestedString(obj.Object, "spec", "group")
|
||||
kind, kindOk, kindErr := unstructured.NestedString(obj.Object, "spec", "names", "kind")
|
||||
|
||||
if groupOk && groupErr == nil && kindOk && kindErr == nil {
|
||||
gk := schema.GroupKind{Group: group, Kind: kind}
|
||||
c.stopWatching(gk)
|
||||
}
|
||||
} else {
|
||||
err = runSynced(c.syncLock, func() error {
|
||||
return c.startMissingWatches()
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Warnf("Failed to start missing watch: %v", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Watch %s on %s has closed", api.GroupKind, c.cluster.Server)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range res.Objects {
|
||||
c.setNode(createObjInfo(&res.Objects[i], appLabelKey))
|
||||
|
||||
}, fmt.Sprintf("watch %s on %s", api.GroupKind, c.cluster.Server), ctx, watchResourcesRetryTimeout)
|
||||
}
|
||||
|
||||
func (c *clusterInfo) sync() (err error) {
|
||||
|
||||
c.log.Info("Start syncing cluster")
|
||||
|
||||
for i := range c.apisMeta {
|
||||
c.apisMeta[i].watchCancel()
|
||||
}
|
||||
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
|
||||
c.nodes = make(map[kube.ResourceKey]*node)
|
||||
|
||||
apis, err := c.kubectl.GetAPIResources(c.cluster.RESTConfig(), c.settings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lock := sync.Mutex{}
|
||||
err = util.RunAllAsync(len(apis), func(i int) error {
|
||||
api := apis[i]
|
||||
list, err := api.Interface.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
for i := range list.Items {
|
||||
c.setNode(createObjInfo(&list.Items[i], c.settings.GetAppInstanceLabelKey()))
|
||||
}
|
||||
lock.Unlock()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
err = c.startMissingWatches()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Failed to sync cluster %s: %v", c.cluster.Server, err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.log.Info("Cluster successfully synced")
|
||||
@@ -219,8 +345,8 @@ func (c *clusterInfo) getChildren(obj *unstructured.Unstructured) []appv1.Resour
|
||||
return children
|
||||
}
|
||||
|
||||
func (c *clusterInfo) isNamespaced(gk schema.GroupKind) bool {
|
||||
if api, ok := c.apis[gk]; ok && !api.resource.Namespaced {
|
||||
func (c *clusterInfo) isNamespaced(obj *unstructured.Unstructured) bool {
|
||||
if api, ok := c.apisMeta[kube.GetResourceKey(obj).GroupKind()]; ok && !api.namespaced {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -242,7 +368,7 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
|
||||
lock := &sync.Mutex{}
|
||||
err := util.RunAllAsync(len(targetObjs), func(i int) error {
|
||||
targetObj := targetObjs[i]
|
||||
key := GetTargetObjKey(a, targetObj, c.isNamespaced(targetObj.GroupVersionKind().GroupKind()))
|
||||
key := GetTargetObjKey(a, targetObj, c.isNamespaced(targetObj))
|
||||
lock.Lock()
|
||||
managedObj := managedObjs[key]
|
||||
lock.Unlock()
|
||||
@@ -311,9 +437,6 @@ func (c *clusterInfo) processEvent(event watch.EventType, un *unstructured.Unstr
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
key := kube.GetResourceKey(un)
|
||||
if info, ok := c.apis[schema.GroupKind{Group: key.Group, Kind: key.Kind}]; ok {
|
||||
info.resourceVersion = un.GetResourceVersion()
|
||||
}
|
||||
existingNode, exists := c.nodes[key]
|
||||
if event == watch.Deleted {
|
||||
if exists {
|
||||
@@ -342,18 +465,23 @@ func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstruc
|
||||
if app == "" || skipAppRequeing(key) {
|
||||
continue
|
||||
}
|
||||
toNotify[app] = true
|
||||
toNotify[app] = n.isRootAppNode() || toNotify[app]
|
||||
}
|
||||
}
|
||||
for name := range toNotify {
|
||||
c.onAppUpdated(name)
|
||||
for name, full := range toNotify {
|
||||
c.onAppUpdated(name, full)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, existingNode *node) {
|
||||
func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) {
|
||||
appName := n.appName
|
||||
if ns, ok := c.nsIndex[key.Namespace]; ok {
|
||||
appName = n.getApp(ns)
|
||||
}
|
||||
|
||||
c.removeNode(key)
|
||||
if existingNode.appName != "" {
|
||||
c.onAppUpdated(existingNode.appName)
|
||||
if appName != "" {
|
||||
c.onAppUpdated(appName, n.isRootAppNode())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
69
controller/cache/cluster_test.go
vendored
69
controller/cache/cluster_test.go
vendored
@@ -1,11 +1,15 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -49,8 +53,7 @@ var (
|
||||
resourceVersion: "123"`)
|
||||
|
||||
testRS = strToUnstructured(`
|
||||
apiVersion: v1
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
name: helm-guestbook-rs
|
||||
@@ -62,52 +65,52 @@ var (
|
||||
resourceVersion: "123"`)
|
||||
|
||||
testDeploy = strToUnstructured(`
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: helm-guestbook
|
||||
name: helm-guestbook
|
||||
namespace: default
|
||||
resourceVersion: "123"`)
|
||||
|
||||
testCRD = strToUnstructured(`
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: my-custom-resource-definition
|
||||
resourceVersion: "123"`)
|
||||
)
|
||||
|
||||
func newCluster(objs ...*unstructured.Unstructured) *clusterInfo {
|
||||
resByGVK := make(map[schema.GroupVersionKind][]unstructured.Unstructured)
|
||||
runtimeObjs := make([]runtime.Object, len(objs))
|
||||
for i := range objs {
|
||||
resByGVK[objs[i].GroupVersionKind()] = append(resByGVK[objs[i].GroupVersionKind()], *objs[i])
|
||||
runtimeObjs[i] = objs[i]
|
||||
}
|
||||
resources := make([]kube.ResourcesBatch, 0)
|
||||
for gvk, objects := range resByGVK {
|
||||
resources = append(resources, kube.ResourcesBatch{
|
||||
ListResourceVersion: "1",
|
||||
GVK: gvk,
|
||||
Objects: objects,
|
||||
})
|
||||
}
|
||||
return newClusterExt(kubetest.MockKubectlCmd{
|
||||
Resources: resources,
|
||||
})
|
||||
scheme := runtime.NewScheme()
|
||||
client := fake.NewSimpleDynamicClient(scheme, runtimeObjs...)
|
||||
|
||||
apiResources := []kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Pod"},
|
||||
Interface: client.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}),
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"},
|
||||
Interface: client.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}),
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"},
|
||||
Interface: client.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}),
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}}
|
||||
|
||||
return newClusterExt(kubetest.MockKubectlCmd{APIResources: apiResources})
|
||||
}
|
||||
|
||||
func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
|
||||
return &clusterInfo{
|
||||
lock: &sync.Mutex{},
|
||||
nodes: make(map[kube.ResourceKey]*node),
|
||||
onAppUpdated: func(appName string) {},
|
||||
onAppUpdated: func(appName string, fullRefresh bool) {},
|
||||
kubectl: kubectl,
|
||||
nsIndex: make(map[string]map[kube.ResourceKey]*node),
|
||||
cluster: &appv1.Cluster{},
|
||||
syncTime: nil,
|
||||
syncLock: &sync.Mutex{},
|
||||
apis: make(map[schema.GroupKind]*gkInfo),
|
||||
apisMeta: make(map[schema.GroupKind]*apiMeta),
|
||||
log: log.WithField("cluster", "test"),
|
||||
settings: &settings.ArgoCDSettings{},
|
||||
}
|
||||
@@ -135,8 +138,8 @@ func TestGetChildren(t *testing.T) {
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "helm-guestbook-rs",
|
||||
Group: "extensions",
|
||||
Version: "v1beta1",
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
ResourceVersion: "123",
|
||||
Children: rsChildren,
|
||||
Info: []appv1.InfoItem{},
|
||||
@@ -149,7 +152,7 @@ func TestGetManagedLiveObjs(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
|
||||
targetDeploy := strToUnstructured(`
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: helm-guestbook
|
||||
@@ -269,8 +272,8 @@ func TestUpdateResourceTags(t *testing.T) {
|
||||
func TestUpdateAppResource(t *testing.T) {
|
||||
updatesReceived := make([]string, 0)
|
||||
cluster := newCluster(testPod, testRS, testDeploy)
|
||||
cluster.onAppUpdated = func(appName string) {
|
||||
updatesReceived = append(updatesReceived, appName)
|
||||
cluster.onAppUpdated = func(appName string, fullRefresh bool) {
|
||||
updatesReceived = append(updatesReceived, fmt.Sprintf("%s: %v", appName, fullRefresh))
|
||||
}
|
||||
|
||||
err := cluster.ensureSynced()
|
||||
@@ -279,7 +282,7 @@ func TestUpdateAppResource(t *testing.T) {
|
||||
err = cluster.processEvent(watch.Modified, mustToUnstructured(testPod))
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, []string{"helm-guestbook"}, updatesReceived)
|
||||
assert.Contains(t, updatesReceived, "helm-guestbook: false")
|
||||
}
|
||||
|
||||
func TestCircularReference(t *testing.T) {
|
||||
@@ -316,7 +319,7 @@ func TestWatchCacheUpdated(t *testing.T) {
|
||||
|
||||
podGroupKind := testPod.GroupVersionKind().GroupKind()
|
||||
|
||||
cluster.updateCache(podGroupKind, "updated-list-version", []unstructured.Unstructured{*updated, *added})
|
||||
cluster.replaceResourceCache(podGroupKind, "updated-list-version", []unstructured.Unstructured{*updated, *added})
|
||||
|
||||
_, ok := cluster.nodes[kube.GetResourceKey(removed)]
|
||||
assert.False(t, ok)
|
||||
@@ -327,6 +330,4 @@ func TestWatchCacheUpdated(t *testing.T) {
|
||||
|
||||
_, ok = cluster.nodes[kube.GetResourceKey(added)]
|
||||
assert.True(t, ok)
|
||||
|
||||
assert.Equal(t, cluster.getResourceVersion(podGroupKind), "updated-list-version")
|
||||
}
|
||||
|
||||
2
controller/cache/info.go
vendored
2
controller/cache/info.go
vendored
@@ -3,7 +3,7 @@ package cache
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
k8snode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
27
controller/cache/mocks/LiveStateCache.go
vendored
27
controller/cache/mocks/LiveStateCache.go
vendored
@@ -2,14 +2,11 @@
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
import "github.com/argoproj/argo-cd/util/kube"
|
||||
import "github.com/stretchr/testify/mock"
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
import "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
import context "context"
|
||||
import kube "github.com/argoproj/argo-cd/util/kube"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
|
||||
// LiveStateCache is an autogenerated mock type for the LiveStateCache type
|
||||
type LiveStateCache struct {
|
||||
@@ -81,20 +78,20 @@ func (_m *LiveStateCache) Invalidate() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// IsNamespaced provides a mock function with given fields: server, gvk
|
||||
func (_m *LiveStateCache) IsNamespaced(server string, gvk schema.GroupVersionKind) (bool, error) {
|
||||
ret := _m.Called(server, gvk)
|
||||
// IsNamespaced provides a mock function with given fields: server, obj
|
||||
func (_m *LiveStateCache) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) {
|
||||
ret := _m.Called(server, obj)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(string, schema.GroupVersionKind) bool); ok {
|
||||
r0 = rf(server, gvk)
|
||||
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured) bool); ok {
|
||||
r0 = rf(server, obj)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, schema.GroupVersionKind) error); ok {
|
||||
r1 = rf(server, gvk)
|
||||
if rf, ok := ret.Get(1).(func(string, *unstructured.Unstructured) error); ok {
|
||||
r1 = rf(server, obj)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
8
controller/cache/node.go
vendored
8
controller/cache/node.go
vendored
@@ -6,7 +6,7 @@ import (
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -21,13 +21,17 @@ type node struct {
|
||||
resource *unstructured.Unstructured
|
||||
}
|
||||
|
||||
func (n *node) isRootAppNode() bool {
|
||||
return n.appName != "" && len(n.ownerRefs) == 0
|
||||
}
|
||||
|
||||
func (n *node) resourceKey() kube.ResourceKey {
|
||||
return kube.NewResourceKey(n.ref.GroupVersionKind().Group, n.ref.Kind, n.ref.Namespace, n.ref.Name)
|
||||
}
|
||||
|
||||
func (n *node) isParentOf(child *node) bool {
|
||||
ownerGvk := n.ref.GroupVersionKind()
|
||||
for _, ownerRef := range child.ownerRefs {
|
||||
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
|
||||
if kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name) == n.resourceKey() {
|
||||
return true
|
||||
}
|
||||
|
||||
25
controller/cache/node_test.go
vendored
Normal file
25
controller/cache/node_test.go
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIsParentOf(t *testing.T) {
|
||||
child := createObjInfo(testPod, "")
|
||||
parent := createObjInfo(testRS, "")
|
||||
grandParent := createObjInfo(testDeploy, "")
|
||||
|
||||
assert.True(t, parent.isParentOf(child))
|
||||
assert.False(t, grandParent.isParentOf(child))
|
||||
}
|
||||
|
||||
func TestIsParentOfSameKindDifferentGroup(t *testing.T) {
|
||||
rs := testRS.DeepCopy()
|
||||
rs.SetAPIVersion("somecrd.io/v1")
|
||||
child := createObjInfo(testPod, "")
|
||||
invalidParent := createObjInfo(rs, "")
|
||||
|
||||
assert.False(t, invalidParent.isParentOf(child))
|
||||
}
|
||||
@@ -60,6 +60,8 @@ var (
|
||||
func NewMetricsServer(addr string, appLister applister.ApplicationLister) *MetricsServer {
|
||||
mux := http.NewServeMux()
|
||||
appRegistry := NewAppRegistry(appLister)
|
||||
appRegistry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
|
||||
appRegistry.MustRegister(prometheus.NewGoCollector())
|
||||
mux.Handle(MetricsPath, promhttp.HandlerFor(appRegistry, promhttp.HandlerOpts{}))
|
||||
|
||||
syncCounter := prometheus.NewCounterVec(
|
||||
|
||||
@@ -220,7 +220,7 @@ func TestReconcileMetrics(t *testing.T) {
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister)
|
||||
|
||||
fakeApp := newFakeApp(fakeApp)
|
||||
metricsServ.IncReconcile(fakeApp, time.Duration(5*time.Second))
|
||||
metricsServ.IncReconcile(fakeApp, 5*time.Second)
|
||||
|
||||
req, err := http.NewRequest("GET", "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -49,6 +49,10 @@ func GetLiveObjs(res []managedResource) []*unstructured.Unstructured {
|
||||
return objs
|
||||
}
|
||||
|
||||
type ResourceInfoProvider interface {
|
||||
IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error)
|
||||
}
|
||||
|
||||
// AppStateManager defines methods which allow to compare application spec and actual application state.
|
||||
type AppStateManager interface {
|
||||
CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool) (*comparisonResult, error)
|
||||
@@ -56,7 +60,7 @@ type AppStateManager interface {
|
||||
}
|
||||
|
||||
type comparisonResult struct {
|
||||
observedAt metav1.Time
|
||||
reconciledAt metav1.Time
|
||||
syncStatus *v1alpha1.SyncStatus
|
||||
healthStatus *v1alpha1.HealthStatus
|
||||
resources []v1alpha1.ResourceStatus
|
||||
@@ -131,6 +135,43 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
return targetObjs, hooks, manifestInfo, nil
|
||||
}
|
||||
|
||||
func DeduplicateTargetObjects(
|
||||
server string,
|
||||
namespace string,
|
||||
objs []*unstructured.Unstructured,
|
||||
infoProvider ResourceInfoProvider,
|
||||
) ([]*unstructured.Unstructured, []v1alpha1.ApplicationCondition, error) {
|
||||
|
||||
targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured)
|
||||
for i := range objs {
|
||||
obj := objs[i]
|
||||
isNamespaced, err := infoProvider.IsNamespaced(server, obj)
|
||||
if err != nil {
|
||||
return objs, nil, err
|
||||
}
|
||||
if !isNamespaced {
|
||||
obj.SetNamespace("")
|
||||
} else if obj.GetNamespace() == "" {
|
||||
obj.SetNamespace(namespace)
|
||||
}
|
||||
key := kubeutil.GetResourceKey(obj)
|
||||
targetByKey[key] = append(targetByKey[key], obj)
|
||||
}
|
||||
conditions := make([]v1alpha1.ApplicationCondition, 0)
|
||||
result := make([]*unstructured.Unstructured, 0)
|
||||
for key, targets := range targetByKey {
|
||||
if len(targets) > 1 {
|
||||
conditions = append(conditions, appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionRepeatedResourceWarning,
|
||||
Message: fmt.Sprintf("Resource %s appeared %d times among application resources.", key.String(), len(targets)),
|
||||
})
|
||||
}
|
||||
result = append(result, targets[len(targets)-1])
|
||||
}
|
||||
|
||||
return result, conditions, nil
|
||||
}
|
||||
|
||||
// CompareAppState compares application git state to the live app state, using the specified
|
||||
// revision and supplied source. If revision or overrides are empty, then compares against
|
||||
// revision and overrides in the app spec.
|
||||
@@ -151,6 +192,12 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
failedToLoadObjs = true
|
||||
}
|
||||
targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Server, app.Spec.Destination.Namespace, targetObjs, m.liveStateCache)
|
||||
if err != nil {
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
}
|
||||
conditions = append(conditions, dedupConditions...)
|
||||
|
||||
logCtx.Debugf("Generated config manifests")
|
||||
liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)
|
||||
if err != nil {
|
||||
@@ -175,7 +222,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
for i, obj := range targetObjs {
|
||||
gvk := obj.GroupVersionKind()
|
||||
ns := util.FirstNonEmpty(obj.GetNamespace(), app.Spec.Destination.Namespace)
|
||||
if namespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, obj.GroupVersionKind()); err == nil && !namespaced {
|
||||
if namespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, obj); err == nil && !namespaced {
|
||||
ns = ""
|
||||
}
|
||||
key := kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName())
|
||||
@@ -270,7 +317,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
}
|
||||
|
||||
compRes := comparisonResult{
|
||||
observedAt: observedAt,
|
||||
reconciledAt: observedAt,
|
||||
syncStatus: &syncStatus,
|
||||
healthStatus: healthStatus,
|
||||
resources: resourceSummaries,
|
||||
@@ -278,7 +325,9 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
conditions: conditions,
|
||||
hooks: hooks,
|
||||
diffNormalizer: diffNormalizer,
|
||||
appSourceType: v1alpha1.ApplicationSourceType(manifestInfo.SourceType),
|
||||
}
|
||||
if manifestInfo != nil {
|
||||
compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
|
||||
}
|
||||
return &compRes, nil
|
||||
}
|
||||
|
||||
@@ -141,3 +141,40 @@ func TestCompareAppStateExtraHook(t *testing.T) {
|
||||
assert.Equal(t, 1, len(compRes.managedResources))
|
||||
assert.Equal(t, 0, len(compRes.conditions))
|
||||
}
|
||||
|
||||
func toJSON(t *testing.T, obj *unstructured.Unstructured) string {
|
||||
data, err := json.Marshal(obj)
|
||||
assert.NoError(t, err)
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
|
||||
obj1 := test.NewPod()
|
||||
obj1.SetNamespace(test.FakeDestNamespace)
|
||||
obj2 := test.NewPod()
|
||||
obj3 := test.NewPod()
|
||||
obj3.SetNamespace("kube-system")
|
||||
|
||||
app := newFakeApp()
|
||||
data := fakeData{
|
||||
manifestResponse: &repository.ManifestResponse{
|
||||
Manifests: []string{toJSON(t, obj1), toJSON(t, obj2), toJSON(t, obj3)},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(obj1): obj1,
|
||||
kube.GetResourceKey(obj3): obj3,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Contains(t, compRes.conditions, argoappv1.ApplicationCondition{
|
||||
Message: "Resource /Pod/fake-dest-ns/my-pod appeared 2 times among application resources.",
|
||||
Type: argoappv1.ApplicationConditionRepeatedResourceWarning,
|
||||
})
|
||||
assert.Equal(t, 2, len(compRes.resources))
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
|
||||
@@ -103,4 +103,10 @@ data:
|
||||
issuer: https://dev-123456.oktapreview.com
|
||||
clientID: aaaabbbbccccddddeee
|
||||
clientSecret: $oidc.okta.clientSecret
|
||||
|
||||
# Some OIDC providers require a separate clientID for different callback URLs.
|
||||
# For example, if configuring Argo CD with self-hosted Dex, you will need a separate client ID
|
||||
# for the 'localhost' (CLI) client to Dex. This field is optional. If omitted, the CLI will
|
||||
# use the same clientID as the Argo CD server
|
||||
cliClientID: vvvvwwwwxxxxyyyyzzzz
|
||||
```
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"Vendor": true,
|
||||
"DisableAll": true,
|
||||
"Deadline": "8m",
|
||||
"Enable": [
|
||||
"vet",
|
||||
"gofmt",
|
||||
"goimports",
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"varcheck",
|
||||
"structcheck",
|
||||
"ineffassign",
|
||||
"unconvert",
|
||||
"misspell"
|
||||
],
|
||||
"Linters": {
|
||||
"goimports": {"Command": "goimports -l --local github.com/argoproj/argo-cd"}
|
||||
},
|
||||
"Skip": [
|
||||
"pkg/client",
|
||||
"vendor/",
|
||||
".pb.go"
|
||||
],
|
||||
"Exclude": [
|
||||
"pkg/client",
|
||||
"vendor/",
|
||||
".pb.go",
|
||||
".*warning.*fmt.Fprint"
|
||||
]
|
||||
}
|
||||
7
hack/git-ask-pass.sh
Executable file
7
hack/git-ask-pass.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
# This script is used as the commaned supplied to GIT_ASKPASS as a way to supply username/password
|
||||
# credentials to git, without having to use git credentials helpers, or having on-disk config.
|
||||
case "$1" in
|
||||
Username*) echo "${GIT_USERNAME}" ;;
|
||||
Password*) echo "${GIT_PASSWORD}" ;;
|
||||
esac
|
||||
8
hack/ssh_known_hosts
Normal file
8
hack/ssh_known_hosts
Normal file
@@ -0,0 +1,8 @@
|
||||
# This file was automatically generated. DO NOT EDIT
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
|
||||
24
hack/update-ssh-known-hosts.sh
Executable file
24
hack/update-ssh-known-hosts.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
KNOWN_HOSTS_FILE=$(dirname "$0")/ssh_known_hosts
|
||||
HEADER="# This file was automatically generated. DO NOT EDIT"
|
||||
echo "$HEADER" > $KNOWN_HOSTS_FILE
|
||||
ssh-keyscan github.com gitlab.com bitbucket.org ssh.dev.azure.com vs-ssh.visualstudio.com | sort -u >> $KNOWN_HOSTS_FILE
|
||||
chmod 0644 $KNOWN_HOSTS_FILE
|
||||
|
||||
# Public SSH keys can be verified at the following URLs:
|
||||
# - github.com: https://help.github.com/articles/github-s-ssh-key-fingerprints/
|
||||
# - gitlab.com: https://docs.gitlab.com/ee/user/gitlab_com/#ssh-host-keys-fingerprints
|
||||
# - bitbucket.org: https://confluence.atlassian.com/bitbucket/ssh-keys-935365775.html
|
||||
# - ssh.dev.azure.com, vs-ssh.visualstudio.com: https://docs.microsoft.com/en-us/azure/devops/repos/git/use-ssh-keys-to-authenticate?view=azure-devops
|
||||
diff - <(ssh-keygen -l -f $KNOWN_HOSTS_FILE | sort -k 3) <<EOF
|
||||
2048 SHA256:zzXQOXSRBEiUtuE8AikJYKwbHaxvSc0ojez9YXaGp1A bitbucket.org (RSA)
|
||||
2048 SHA256:nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8 github.com (RSA)
|
||||
256 SHA256:HbW3g8zUjNSksFbqTiUWPWg2Bq1x8xdGUrliXFzSnUw gitlab.com (ECDSA)
|
||||
256 SHA256:eUXGGm1YGsMAS7vkcx6JOJdOGHPem5gQp4taiCfCLB8 gitlab.com (ED25519)
|
||||
2048 SHA256:ROQFvPThGrW4RuWLoL9tq9I9zJ42fK4XywyRtbOz/EQ gitlab.com (RSA)
|
||||
2048 SHA256:ohD8VZEXGWo6Ez8GSEJQ9WpafgLFsOfLOtGGQCQo6Og ssh.dev.azure.com (RSA)
|
||||
2048 SHA256:ohD8VZEXGWo6Ez8GSEJQ9WpafgLFsOfLOtGGQCQo6Og vs-ssh.visualstudio.com (RSA)
|
||||
EOF
|
||||
@@ -7,6 +7,8 @@ metadata:
|
||||
app.kubernetes.io/component: application-controller
|
||||
name: argocd-application-controller
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
|
||||
@@ -12,7 +12,7 @@ bases:
|
||||
images:
|
||||
- name: argoproj/argocd
|
||||
newName: argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v0.12.0
|
||||
- name: argoproj/argocd-ui
|
||||
newName: argoproj/argocd-ui
|
||||
newTag: latest
|
||||
newTag: v0.12.0
|
||||
|
||||
@@ -26,6 +26,7 @@ spec:
|
||||
- argocd-redis:6379
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
- containerPort: 8084
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 8081
|
||||
|
||||
@@ -8,7 +8,13 @@ metadata:
|
||||
name: argocd-repo-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8081
|
||||
- name: server
|
||||
protocol: TCP
|
||||
port: 8081
|
||||
targetPort: 8081
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 8084
|
||||
targetPort: 8084
|
||||
selector:
|
||||
app.kubernetes.io/name: argocd-repo-server
|
||||
|
||||
@@ -7,26 +7,24 @@ metadata:
|
||||
app.kubernetes.io/component: server
|
||||
name: argocd-server
|
||||
rules:
|
||||
# support viewing and deleting a live object view in UI
|
||||
- apiGroups:
|
||||
- '*'
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
# support listing events of
|
||||
- delete # supports deletion a live object in UI
|
||||
- get # supports viewing live object manifest in UI
|
||||
- patch # supports `argocd app patch`
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- list
|
||||
# support viewing pod logs from UI
|
||||
- list # supports listing events in UI
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- get # supports viewing pod logs from UI
|
||||
|
||||
@@ -17,7 +17,7 @@ patchesStrategicMerge:
|
||||
images:
|
||||
- name: argoproj/argocd
|
||||
newName: argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v0.12.0
|
||||
- name: argoproj/argocd-ui
|
||||
newName: argoproj/argocd-ui
|
||||
newTag: latest
|
||||
newTag: v0.12.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
dependencies:
|
||||
- name: redis-ha
|
||||
repository: https://kubernetes-charts.storage.googleapis.com
|
||||
version: 3.1.6
|
||||
digest: sha256:f804c4ab7502492dcd2e9865984df6f9df8273cc8ce4b729de2a1a80c31db459
|
||||
generated: 2019-02-23T03:25:05.793489-08:00
|
||||
version: 3.3.1
|
||||
digest: sha256:3e273208c389589d3d8935ddc39bf245f72c3ea0b6d3e61c4dc0862b7c3839eb
|
||||
generated: 2019-03-19T21:34:14.183861-07:00
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dependencies:
|
||||
- name: redis-ha
|
||||
version: 3.1.6
|
||||
version: 3.3.1
|
||||
repository: https://kubernetes-charts.storage.googleapis.com
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
labels:
|
||||
heritage: Tiller
|
||||
release: argocd
|
||||
chart: redis-ha-3.1.6
|
||||
chart: redis-ha-3.3.1
|
||||
app: argocd-redis-ha
|
||||
data:
|
||||
redis.conf: |
|
||||
@@ -20,7 +20,7 @@ data:
|
||||
rdbchecksum yes
|
||||
rdbcompression yes
|
||||
repl-diskless-sync yes
|
||||
save 900 1
|
||||
save ""
|
||||
|
||||
sentinel.conf: |
|
||||
dir "/data"
|
||||
@@ -66,8 +66,9 @@ data:
|
||||
echo "Setting up defaults"
|
||||
if [ "$INDEX" = "0" ]; then
|
||||
echo "Setting this pod as the default master"
|
||||
sed -i "s/^.*slaveof.*//" "$REDIS_CONF"
|
||||
redis_update "$ANNOUNCE_IP"
|
||||
sentinel_update "$ANNOUNCE_IP"
|
||||
sed -i "s/^.*slaveof.*//" "$REDIS_CONF"
|
||||
else
|
||||
DEFAULT_MASTER="$(getent hosts "$SERVICE-announce-0" | awk '{ print $1 }')"
|
||||
if [ -z "$DEFAULT_MASTER" ]; then
|
||||
@@ -135,7 +136,7 @@ metadata:
|
||||
labels:
|
||||
heritage: Tiller
|
||||
release: argocd
|
||||
chart: redis-ha-3.1.6
|
||||
chart: redis-ha-3.3.1
|
||||
app: argocd-redis-ha
|
||||
data:
|
||||
check-quorum.sh: |
|
||||
@@ -170,6 +171,59 @@ data:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
---
|
||||
# Source: redis-ha/charts/redis-ha/templates/redis-ha-serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argocd-redis-ha
|
||||
labels:
|
||||
heritage: Tiller
|
||||
release: argocd
|
||||
chart: redis-ha-3.3.1
|
||||
app: argocd-redis-ha
|
||||
|
||||
---
|
||||
# Source: redis-ha/charts/redis-ha/templates/redis-ha-role.yaml
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argocd-redis-ha
|
||||
labels:
|
||||
heritage: Tiller
|
||||
release: argocd
|
||||
chart: redis-ha-3.3.1
|
||||
app: argocd-redis-ha
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
|
||||
---
|
||||
# Source: redis-ha/charts/redis-ha/templates/redis-ha-rolebinding.yaml
|
||||
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: argocd-redis-ha
|
||||
labels:
|
||||
heritage: Tiller
|
||||
release: argocd
|
||||
chart: redis-ha-3.3.1
|
||||
app: argocd-redis-ha
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-redis-ha
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argocd-redis-ha
|
||||
|
||||
---
|
||||
# Source: redis-ha/charts/redis-ha/templates/redis-ha-announce-service.yaml
|
||||
|
||||
@@ -182,7 +236,7 @@ metadata:
|
||||
app: redis-ha
|
||||
heritage: "Tiller"
|
||||
release: "argocd"
|
||||
chart: redis-ha-3.1.6
|
||||
chart: redis-ha-3.3.1
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
spec:
|
||||
@@ -210,7 +264,7 @@ metadata:
|
||||
app: redis-ha
|
||||
heritage: "Tiller"
|
||||
release: "argocd"
|
||||
chart: redis-ha-3.1.6
|
||||
chart: redis-ha-3.3.1
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
spec:
|
||||
@@ -238,7 +292,7 @@ metadata:
|
||||
app: redis-ha
|
||||
heritage: "Tiller"
|
||||
release: "argocd"
|
||||
chart: redis-ha-3.1.6
|
||||
chart: redis-ha-3.3.1
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
spec:
|
||||
@@ -268,7 +322,7 @@ metadata:
|
||||
app: redis-ha
|
||||
heritage: "Tiller"
|
||||
release: "argocd"
|
||||
chart: redis-ha-3.1.6
|
||||
chart: redis-ha-3.3.1
|
||||
annotations:
|
||||
spec:
|
||||
type: ClusterIP
|
||||
@@ -296,7 +350,7 @@ metadata:
|
||||
app: redis-ha
|
||||
heritage: "Tiller"
|
||||
release: "argocd"
|
||||
chart: redis-ha-3.1.6
|
||||
chart: redis-ha-3.3.1
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -310,8 +364,8 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/init-config: fd61dcd77a6f4a93ac6609398c79958da8eeae692c1203f77809abf3301e09e6
|
||||
checksum/probe-config: 6a71f32ef8f1e6e6046352b15c3ee4e201a0288442ae2abc3a64461df55d7bc4
|
||||
checksum/init-config: 06440ee4a409be2aa01dfa08c14dd964fe3bad2ada57da1a538ad5cd771a045f
|
||||
checksum/probe-config: 4b9888f173366e436f167856ee3469e8c1fd5221e29caa2129373db23578ec10
|
||||
labels:
|
||||
release: argocd
|
||||
app: redis-ha
|
||||
@@ -338,6 +392,7 @@ spec:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
|
||||
serviceAccountName: argocd-redis-ha
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: redis:5.0.3-alpine
|
||||
@@ -351,13 +406,13 @@ spec:
|
||||
- /readonly-config/init.sh
|
||||
env:
|
||||
- name: SENTINEL_ID_0
|
||||
value: fcf9c936f413aee4a7faceeef0817f2894e477ff
|
||||
value: e791a161cb06f0d3eb0cc392117d34cf0eae9d71
|
||||
|
||||
- name: SENTINEL_ID_1
|
||||
value: cdf48d26a604810dc199f2d532723d2087f68e1e
|
||||
value: d9b3204a90597a7500530efd881942d8145996ac
|
||||
|
||||
- name: SENTINEL_ID_2
|
||||
value: 4e85671bbbc383ff723c51ffb0ac2bc2e7a60cea
|
||||
value: d9deb539c0402841c2492e9959c8086602fa4284
|
||||
|
||||
volumeMounts:
|
||||
- name: config
|
||||
|
||||
@@ -3,3 +3,5 @@ redis-ha:
|
||||
enabled: false
|
||||
redis:
|
||||
masterGroupName: argocd
|
||||
config:
|
||||
save: "\"\""
|
||||
|
||||
@@ -42,6 +42,23 @@ patchesJson6902:
|
||||
kind: StatefulSet
|
||||
name: argocd-redis-ha-server
|
||||
path: overlays/modify-labels.yaml
|
||||
- target:
|
||||
version: v1
|
||||
kind: ServiceAccount
|
||||
name: argocd-redis-ha
|
||||
path: overlays/modify-labels.yaml
|
||||
- target:
|
||||
group: rbac.authorization.k8s.io
|
||||
version: v1
|
||||
kind: Role
|
||||
name: argocd-redis-ha
|
||||
path: overlays/modify-labels.yaml
|
||||
- target:
|
||||
group: rbac.authorization.k8s.io
|
||||
version: v1
|
||||
kind: RoleBinding
|
||||
name: argocd-redis-ha
|
||||
path: overlays/modify-labels.yaml
|
||||
|
||||
# add pod template labels
|
||||
- target:
|
||||
|
||||
@@ -55,6 +55,15 @@ metadata:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-redis-ha
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -122,6 +131,22 @@ rules:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-redis-ha
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -199,6 +224,7 @@ rules:
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -247,6 +273,22 @@ subjects:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-redis-ha
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argocd-redis-ha
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-redis-ha
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -326,18 +368,19 @@ data:
|
||||
$REDIS_PORT\" >> $REDIS_CONF\n}\n\ncopy_config() {\n cp /readonly-config/redis.conf
|
||||
\"$REDIS_CONF\"\n cp /readonly-config/sentinel.conf \"$SENTINEL_CONF\"\n}\n\nsetup_defaults()
|
||||
{\n echo \"Setting up defaults\"\n if [ \"$INDEX\" = \"0\" ]; then\n echo
|
||||
\"Setting this pod as the default master\"\n sed -i \"s/^.*slaveof.*//\"
|
||||
\"$REDIS_CONF\"\n sentinel_update \"$ANNOUNCE_IP\"\n else\n DEFAULT_MASTER=\"$(getent
|
||||
hosts \"$SERVICE-announce-0\" | awk '{ print $1 }')\"\n if [ -z \"$DEFAULT_MASTER\"
|
||||
]; then\n echo \"Unable to resolve host\"\n exit 1\n fi\n
|
||||
\ echo \"Setting default slave config..\"\n redis_update \"$DEFAULT_MASTER\"\n
|
||||
\ sentinel_update \"$DEFAULT_MASTER\"\n fi\n}\n\nfind_master() {\n echo
|
||||
\"Attempting to find master\"\n if [ \"$(redis-cli -h \"$MASTER\" ping)\" !=
|
||||
\"PONG\" ]; then\n echo \"Can't ping master, attempting to force failover\"\n
|
||||
\ if redis-cli -h \"$SERVICE\" -p \"$SENTINEL_PORT\" sentinel failover \"$MASTER_GROUP\"
|
||||
| grep -q 'NOGOODSLAVE' ; then \n setup_defaults\n return
|
||||
0\n fi\n sleep 10\n MASTER=\"$(redis-cli -h $SERVICE -p $SENTINEL_PORT
|
||||
sentinel get-master-addr-by-name $MASTER_GROUP | grep -E '[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}')\"\n
|
||||
\"Setting this pod as the default master\"\n redis_update \"$ANNOUNCE_IP\"\n
|
||||
\ sentinel_update \"$ANNOUNCE_IP\"\n sed -i \"s/^.*slaveof.*//\"
|
||||
\"$REDIS_CONF\"\n else\n DEFAULT_MASTER=\"$(getent hosts \"$SERVICE-announce-0\"
|
||||
| awk '{ print $1 }')\"\n if [ -z \"$DEFAULT_MASTER\" ]; then\n echo
|
||||
\"Unable to resolve host\"\n exit 1\n fi\n echo \"Setting
|
||||
default slave config..\"\n redis_update \"$DEFAULT_MASTER\"\n sentinel_update
|
||||
\"$DEFAULT_MASTER\"\n fi\n}\n\nfind_master() {\n echo \"Attempting to find
|
||||
master\"\n if [ \"$(redis-cli -h \"$MASTER\" ping)\" != \"PONG\" ]; then\n
|
||||
\ echo \"Can't ping master, attempting to force failover\"\n if redis-cli
|
||||
-h \"$SERVICE\" -p \"$SENTINEL_PORT\" sentinel failover \"$MASTER_GROUP\" | grep
|
||||
-q 'NOGOODSLAVE' ; then \n setup_defaults\n return 0\n fi\n
|
||||
\ sleep 10\n MASTER=\"$(redis-cli -h $SERVICE -p $SENTINEL_PORT sentinel
|
||||
get-master-addr-by-name $MASTER_GROUP | grep -E '[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}')\"\n
|
||||
\ if [ \"$MASTER\" ]; then\n sentinel_update \"$MASTER\"\n redis_update
|
||||
\"$MASTER\"\n else\n echo \"Could not failover, exiting...\"\n
|
||||
\ exit 1\n fi\n else\n echo \"Found reachable master,
|
||||
@@ -357,7 +400,7 @@ data:
|
||||
rdbchecksum yes
|
||||
rdbcompression yes
|
||||
repl-diskless-sync yes
|
||||
save 900 1
|
||||
save ""
|
||||
sentinel.conf: |
|
||||
dir "/data"
|
||||
sentinel down-after-milliseconds argocd 10000
|
||||
@@ -571,8 +614,14 @@ metadata:
|
||||
name: argocd-repo-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8081
|
||||
- name: server
|
||||
port: 8081
|
||||
protocol: TCP
|
||||
targetPort: 8081
|
||||
- name: metrics
|
||||
port: 8084
|
||||
protocol: TCP
|
||||
targetPort: 8084
|
||||
selector:
|
||||
app.kubernetes.io/name: argocd-repo-server
|
||||
---
|
||||
@@ -626,6 +675,8 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -646,7 +697,7 @@ spec:
|
||||
- argocd-redis-ha-announce-2:26379
|
||||
- --sentinelmaster
|
||||
- argocd
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -693,7 +744,7 @@ spec:
|
||||
- cp
|
||||
- /usr/local/bin/argocd-util
|
||||
- /shared
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -748,11 +799,12 @@ spec:
|
||||
- argocd-redis-ha-announce-2:26379
|
||||
- --sentinelmaster
|
||||
- argocd
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-repo-server
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
- containerPort: 8084
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
@@ -804,7 +856,7 @@ spec:
|
||||
- argocd-redis-ha-announce-2:26379
|
||||
- --sentinelmaster
|
||||
- argocd
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-server
|
||||
ports:
|
||||
@@ -825,7 +877,7 @@ spec:
|
||||
- -r
|
||||
- /app
|
||||
- /shared
|
||||
image: argoproj/argocd-ui:latest
|
||||
image: argoproj/argocd-ui:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: ui
|
||||
volumeMounts:
|
||||
@@ -854,8 +906,8 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/init-config: fd61dcd77a6f4a93ac6609398c79958da8eeae692c1203f77809abf3301e09e6
|
||||
checksum/probe-config: 6a71f32ef8f1e6e6046352b15c3ee4e201a0288442ae2abc3a64461df55d7bc4
|
||||
checksum/init-config: 06440ee4a409be2aa01dfa08c14dd964fe3bad2ada57da1a538ad5cd771a045f
|
||||
checksum/probe-config: 4b9888f173366e436f167856ee3469e8c1fd5221e29caa2129373db23578ec10
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
spec:
|
||||
@@ -945,11 +997,11 @@ spec:
|
||||
- sh
|
||||
env:
|
||||
- name: SENTINEL_ID_0
|
||||
value: fcf9c936f413aee4a7faceeef0817f2894e477ff
|
||||
value: e791a161cb06f0d3eb0cc392117d34cf0eae9d71
|
||||
- name: SENTINEL_ID_1
|
||||
value: cdf48d26a604810dc199f2d532723d2087f68e1e
|
||||
value: d9b3204a90597a7500530efd881942d8145996ac
|
||||
- name: SENTINEL_ID_2
|
||||
value: 4e85671bbbc383ff723c51ffb0ac2bc2e7a60cea
|
||||
value: d9deb539c0402841c2492e9959c8086602fa4284
|
||||
image: redis:5.0.3-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
@@ -964,6 +1016,7 @@ spec:
|
||||
fsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
serviceAccountName: argocd-redis-ha
|
||||
volumes:
|
||||
- configMap:
|
||||
name: argocd-redis-ha-configmap
|
||||
|
||||
@@ -55,6 +55,15 @@ metadata:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-redis-ha
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -122,6 +131,22 @@ rules:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-redis-ha
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -197,6 +222,22 @@ subjects:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-redis-ha
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argocd-redis-ha
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-redis-ha
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -242,18 +283,19 @@ data:
|
||||
$REDIS_PORT\" >> $REDIS_CONF\n}\n\ncopy_config() {\n cp /readonly-config/redis.conf
|
||||
\"$REDIS_CONF\"\n cp /readonly-config/sentinel.conf \"$SENTINEL_CONF\"\n}\n\nsetup_defaults()
|
||||
{\n echo \"Setting up defaults\"\n if [ \"$INDEX\" = \"0\" ]; then\n echo
|
||||
\"Setting this pod as the default master\"\n sed -i \"s/^.*slaveof.*//\"
|
||||
\"$REDIS_CONF\"\n sentinel_update \"$ANNOUNCE_IP\"\n else\n DEFAULT_MASTER=\"$(getent
|
||||
hosts \"$SERVICE-announce-0\" | awk '{ print $1 }')\"\n if [ -z \"$DEFAULT_MASTER\"
|
||||
]; then\n echo \"Unable to resolve host\"\n exit 1\n fi\n
|
||||
\ echo \"Setting default slave config..\"\n redis_update \"$DEFAULT_MASTER\"\n
|
||||
\ sentinel_update \"$DEFAULT_MASTER\"\n fi\n}\n\nfind_master() {\n echo
|
||||
\"Attempting to find master\"\n if [ \"$(redis-cli -h \"$MASTER\" ping)\" !=
|
||||
\"PONG\" ]; then\n echo \"Can't ping master, attempting to force failover\"\n
|
||||
\ if redis-cli -h \"$SERVICE\" -p \"$SENTINEL_PORT\" sentinel failover \"$MASTER_GROUP\"
|
||||
| grep -q 'NOGOODSLAVE' ; then \n setup_defaults\n return
|
||||
0\n fi\n sleep 10\n MASTER=\"$(redis-cli -h $SERVICE -p $SENTINEL_PORT
|
||||
sentinel get-master-addr-by-name $MASTER_GROUP | grep -E '[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}')\"\n
|
||||
\"Setting this pod as the default master\"\n redis_update \"$ANNOUNCE_IP\"\n
|
||||
\ sentinel_update \"$ANNOUNCE_IP\"\n sed -i \"s/^.*slaveof.*//\"
|
||||
\"$REDIS_CONF\"\n else\n DEFAULT_MASTER=\"$(getent hosts \"$SERVICE-announce-0\"
|
||||
| awk '{ print $1 }')\"\n if [ -z \"$DEFAULT_MASTER\" ]; then\n echo
|
||||
\"Unable to resolve host\"\n exit 1\n fi\n echo \"Setting
|
||||
default slave config..\"\n redis_update \"$DEFAULT_MASTER\"\n sentinel_update
|
||||
\"$DEFAULT_MASTER\"\n fi\n}\n\nfind_master() {\n echo \"Attempting to find
|
||||
master\"\n if [ \"$(redis-cli -h \"$MASTER\" ping)\" != \"PONG\" ]; then\n
|
||||
\ echo \"Can't ping master, attempting to force failover\"\n if redis-cli
|
||||
-h \"$SERVICE\" -p \"$SENTINEL_PORT\" sentinel failover \"$MASTER_GROUP\" | grep
|
||||
-q 'NOGOODSLAVE' ; then \n setup_defaults\n return 0\n fi\n
|
||||
\ sleep 10\n MASTER=\"$(redis-cli -h $SERVICE -p $SENTINEL_PORT sentinel
|
||||
get-master-addr-by-name $MASTER_GROUP | grep -E '[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}')\"\n
|
||||
\ if [ \"$MASTER\" ]; then\n sentinel_update \"$MASTER\"\n redis_update
|
||||
\"$MASTER\"\n else\n echo \"Could not failover, exiting...\"\n
|
||||
\ exit 1\n fi\n else\n echo \"Found reachable master,
|
||||
@@ -273,7 +315,7 @@ data:
|
||||
rdbchecksum yes
|
||||
rdbcompression yes
|
||||
repl-diskless-sync yes
|
||||
save 900 1
|
||||
save ""
|
||||
sentinel.conf: |
|
||||
dir "/data"
|
||||
sentinel down-after-milliseconds argocd 10000
|
||||
@@ -487,8 +529,14 @@ metadata:
|
||||
name: argocd-repo-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8081
|
||||
- name: server
|
||||
port: 8081
|
||||
protocol: TCP
|
||||
targetPort: 8081
|
||||
- name: metrics
|
||||
port: 8084
|
||||
protocol: TCP
|
||||
targetPort: 8084
|
||||
selector:
|
||||
app.kubernetes.io/name: argocd-repo-server
|
||||
---
|
||||
@@ -542,6 +590,8 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -562,7 +612,7 @@ spec:
|
||||
- argocd-redis-ha-announce-2:26379
|
||||
- --sentinelmaster
|
||||
- argocd
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -609,7 +659,7 @@ spec:
|
||||
- cp
|
||||
- /usr/local/bin/argocd-util
|
||||
- /shared
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -664,11 +714,12 @@ spec:
|
||||
- argocd-redis-ha-announce-2:26379
|
||||
- --sentinelmaster
|
||||
- argocd
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-repo-server
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
- containerPort: 8084
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
@@ -720,7 +771,7 @@ spec:
|
||||
- argocd-redis-ha-announce-2:26379
|
||||
- --sentinelmaster
|
||||
- argocd
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-server
|
||||
ports:
|
||||
@@ -741,7 +792,7 @@ spec:
|
||||
- -r
|
||||
- /app
|
||||
- /shared
|
||||
image: argoproj/argocd-ui:latest
|
||||
image: argoproj/argocd-ui:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: ui
|
||||
volumeMounts:
|
||||
@@ -770,8 +821,8 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/init-config: fd61dcd77a6f4a93ac6609398c79958da8eeae692c1203f77809abf3301e09e6
|
||||
checksum/probe-config: 6a71f32ef8f1e6e6046352b15c3ee4e201a0288442ae2abc3a64461df55d7bc4
|
||||
checksum/init-config: 06440ee4a409be2aa01dfa08c14dd964fe3bad2ada57da1a538ad5cd771a045f
|
||||
checksum/probe-config: 4b9888f173366e436f167856ee3469e8c1fd5221e29caa2129373db23578ec10
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-redis-ha
|
||||
spec:
|
||||
@@ -861,11 +912,11 @@ spec:
|
||||
- sh
|
||||
env:
|
||||
- name: SENTINEL_ID_0
|
||||
value: fcf9c936f413aee4a7faceeef0817f2894e477ff
|
||||
value: e791a161cb06f0d3eb0cc392117d34cf0eae9d71
|
||||
- name: SENTINEL_ID_1
|
||||
value: cdf48d26a604810dc199f2d532723d2087f68e1e
|
||||
value: d9b3204a90597a7500530efd881942d8145996ac
|
||||
- name: SENTINEL_ID_2
|
||||
value: 4e85671bbbc383ff723c51ffb0ac2bc2e7a60cea
|
||||
value: d9deb539c0402841c2492e9959c8086602fa4284
|
||||
image: redis:5.0.3-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
@@ -880,6 +931,7 @@ spec:
|
||||
fsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
serviceAccountName: argocd-redis-ha
|
||||
volumes:
|
||||
- configMap:
|
||||
name: argocd-redis-ha-configmap
|
||||
|
||||
@@ -199,6 +199,7 @@ rules:
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -384,8 +385,14 @@ metadata:
|
||||
name: argocd-repo-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8081
|
||||
- name: server
|
||||
port: 8081
|
||||
protocol: TCP
|
||||
targetPort: 8081
|
||||
- name: metrics
|
||||
port: 8084
|
||||
protocol: TCP
|
||||
targetPort: 8084
|
||||
selector:
|
||||
app.kubernetes.io/name: argocd-repo-server
|
||||
---
|
||||
@@ -439,6 +446,8 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -451,7 +460,7 @@ spec:
|
||||
- "20"
|
||||
- --operation-processors
|
||||
- "10"
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -498,7 +507,7 @@ spec:
|
||||
- cp
|
||||
- /usr/local/bin/argocd-util
|
||||
- /shared
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -561,11 +570,12 @@ spec:
|
||||
- argocd-repo-server
|
||||
- --redis
|
||||
- argocd-redis:6379
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-repo-server
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
- containerPort: 8084
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
@@ -594,7 +604,7 @@ spec:
|
||||
- argocd-server
|
||||
- --staticassets
|
||||
- /shared/app
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-server
|
||||
ports:
|
||||
@@ -615,7 +625,7 @@ spec:
|
||||
- -r
|
||||
- /app
|
||||
- /shared
|
||||
image: argoproj/argocd-ui:latest
|
||||
image: argoproj/argocd-ui:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: ui
|
||||
volumeMounts:
|
||||
|
||||
@@ -300,8 +300,14 @@ metadata:
|
||||
name: argocd-repo-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8081
|
||||
- name: server
|
||||
port: 8081
|
||||
protocol: TCP
|
||||
targetPort: 8081
|
||||
- name: metrics
|
||||
port: 8084
|
||||
protocol: TCP
|
||||
targetPort: 8084
|
||||
selector:
|
||||
app.kubernetes.io/name: argocd-repo-server
|
||||
---
|
||||
@@ -355,6 +361,8 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -367,7 +375,7 @@ spec:
|
||||
- "20"
|
||||
- --operation-processors
|
||||
- "10"
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -414,7 +422,7 @@ spec:
|
||||
- cp
|
||||
- /usr/local/bin/argocd-util
|
||||
- /shared
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -477,11 +485,12 @@ spec:
|
||||
- argocd-repo-server
|
||||
- --redis
|
||||
- argocd-redis:6379
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-repo-server
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
- containerPort: 8084
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
@@ -510,7 +519,7 @@ spec:
|
||||
- argocd-server
|
||||
- --staticassets
|
||||
- /shared/app
|
||||
image: argoproj/argocd:latest
|
||||
image: argoproj/argocd:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: argocd-server
|
||||
ports:
|
||||
@@ -531,7 +540,7 @@ spec:
|
||||
- -r
|
||||
- /app
|
||||
- /shared
|
||||
image: argoproj/argocd-ui:latest
|
||||
image: argoproj/argocd-ui:v0.12.0
|
||||
imagePullPolicy: Always
|
||||
name: ui
|
||||
volumeMounts:
|
||||
|
||||
@@ -3,7 +3,6 @@ package apiclient
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -25,7 +24,7 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/argoproj/argo-cd"
|
||||
argocd "github.com/argoproj/argo-cd"
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
@@ -39,6 +38,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/server/version"
|
||||
grpc_util "github.com/argoproj/argo-cd/util/grpc"
|
||||
"github.com/argoproj/argo-cd/util/localconfig"
|
||||
tls_util "github.com/argoproj/argo-cd/util/tls"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -201,7 +201,11 @@ func (c *client) OIDCConfig(ctx context.Context, set *settings.Settings) (*oauth
|
||||
clientID = common.ArgoCDCLIClientAppID
|
||||
issuerURL = fmt.Sprintf("%s%s", set.URL, common.DexAPIEndpoint)
|
||||
} else if set.OIDCConfig != nil && set.OIDCConfig.Issuer != "" {
|
||||
clientID = set.OIDCConfig.ClientID
|
||||
if set.OIDCConfig.CLIClientID != "" {
|
||||
clientID = set.OIDCConfig.CLIClientID
|
||||
} else {
|
||||
clientID = set.OIDCConfig.ClientID
|
||||
}
|
||||
issuerURL = set.OIDCConfig.Issuer
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("%s is not configured with SSO", c.ServerAddr)
|
||||
@@ -389,7 +393,7 @@ func (c *client) newConn() (*grpc.ClientConn, io.Closer, error) {
|
||||
func (c *client) tlsConfig() (*tls.Config, error) {
|
||||
var tlsConfig tls.Config
|
||||
if len(c.CertPEMData) > 0 {
|
||||
cp := x509.NewCertPool()
|
||||
cp := tls_util.BestEffortSystemCertPool()
|
||||
if !cp.AppendCertsFromPEM(c.CertPEMData) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -215,9 +215,13 @@ message ApplicationStatus {
|
||||
|
||||
repeated ApplicationCondition conditions = 5;
|
||||
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time observedAt = 6;
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time reconciledAt = 6;
|
||||
|
||||
optional OperationState operationState = 7;
|
||||
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time observedAt = 8;
|
||||
|
||||
optional string sourceType = 9;
|
||||
}
|
||||
|
||||
// ApplicationWatchEvent contains information about application change.
|
||||
@@ -444,6 +448,8 @@ message Repository {
|
||||
optional string sshPrivateKey = 4;
|
||||
|
||||
optional ConnectionState connectionState = 5;
|
||||
|
||||
optional bool insecureIgnoreHostKey = 6;
|
||||
}
|
||||
|
||||
// RepositoryList is a collection of Repositories.
|
||||
|
||||
@@ -220,8 +220,10 @@ type ApplicationStatus struct {
|
||||
Health HealthStatus `json:"health,omitempty" protobuf:"bytes,3,opt,name=health"`
|
||||
History []RevisionHistory `json:"history,omitempty" protobuf:"bytes,4,opt,name=history"`
|
||||
Conditions []ApplicationCondition `json:"conditions,omitempty" protobuf:"bytes,5,opt,name=conditions"`
|
||||
ObservedAt metav1.Time `json:"observedAt,omitempty" protobuf:"bytes,6,opt,name=observedAt"`
|
||||
ReconciledAt metav1.Time `json:"reconciledAt,omitempty" protobuf:"bytes,6,opt,name=reconciledAt"`
|
||||
OperationState *OperationState `json:"operationState,omitempty" protobuf:"bytes,7,opt,name=operationState"`
|
||||
ObservedAt metav1.Time `json:"observedAt,omitempty" protobuf:"bytes,8,opt,name=observedAt"`
|
||||
SourceType ApplicationSourceType `json:"sourceType,omitempty" protobuf:"bytes,9,opt,name=sourceType"`
|
||||
}
|
||||
|
||||
// Operation contains requested operation parameters.
|
||||
@@ -468,6 +470,8 @@ const (
|
||||
ApplicationConditionUnknownError = "UnknownError"
|
||||
// ApplicationConditionSharedResourceWarning indicates that controller detected resources which belongs to more than one application
|
||||
ApplicationConditionSharedResourceWarning = "SharedResourceWarning"
|
||||
// ApplicationConditionRepeatedResourceWarning indicates that application source has resource with same Group, Kind, Name, Namespace multiple times
|
||||
ApplicationConditionRepeatedResourceWarning = "RepeatedResourceWarning"
|
||||
)
|
||||
|
||||
// ApplicationCondition contains details about current application condition
|
||||
@@ -667,11 +671,12 @@ type HelmRepository struct {
|
||||
|
||||
// Repository is a Git repository holding application configurations
|
||||
type Repository struct {
|
||||
Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"`
|
||||
Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"`
|
||||
Password string `json:"password,omitempty" protobuf:"bytes,3,opt,name=password"`
|
||||
SSHPrivateKey string `json:"sshPrivateKey,omitempty" protobuf:"bytes,4,opt,name=sshPrivateKey"`
|
||||
ConnectionState ConnectionState `json:"connectionState,omitempty" protobuf:"bytes,5,opt,name=connectionState"`
|
||||
Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"`
|
||||
Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"`
|
||||
Password string `json:"password,omitempty" protobuf:"bytes,3,opt,name=password"`
|
||||
SSHPrivateKey string `json:"sshPrivateKey,omitempty" protobuf:"bytes,4,opt,name=sshPrivateKey"`
|
||||
ConnectionState ConnectionState `json:"connectionState,omitempty" protobuf:"bytes,5,opt,name=connectionState"`
|
||||
InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty" protobuf:"bytes,6,opt,name=insecureIgnoreHostKey"`
|
||||
}
|
||||
|
||||
// RepositoryList is a collection of Repositories.
|
||||
|
||||
@@ -479,7 +479,7 @@ func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) {
|
||||
*out = make([]ApplicationCondition, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.ObservedAt.DeepCopyInto(&out.ObservedAt)
|
||||
in.ReconciledAt.DeepCopyInto(&out.ReconciledAt)
|
||||
if in.OperationState != nil {
|
||||
in, out := &in.OperationState, &out.OperationState
|
||||
if *in == nil {
|
||||
@@ -489,6 +489,7 @@ func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) {
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
in.ObservedAt.DeepCopyInto(&out.ObservedAt)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ func GenerateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
|
||||
}
|
||||
}
|
||||
case v1alpha1.ApplicationSourceTypeKustomize:
|
||||
k := kustomize.NewKustomizeApp(appPath)
|
||||
k := kustomize.NewKustomizeApp(appPath, kustomizeCredentials(q.Repo))
|
||||
targetObjs, _, err = k.Build(q.ApplicationSource.Kustomize)
|
||||
case v1alpha1.ApplicationSourceTypePlugin:
|
||||
targetObjs, err = runConfigManagementPlugin(appPath, q, q.Plugins)
|
||||
@@ -489,7 +489,7 @@ func pathExists(ss ...string) bool {
|
||||
func (s *Service) newClientResolveRevision(repo *v1alpha1.Repository, revision string) (git.Client, string, error) {
|
||||
repoURL := git.NormalizeGitURL(repo.Repo)
|
||||
appRepoPath := tempRepoPath(repoURL)
|
||||
gitClient, err := s.gitFactory.NewClient(repoURL, appRepoPath, repo.Username, repo.Password, repo.SSHPrivateKey)
|
||||
gitClient, err := s.gitFactory.NewClient(repoURL, appRepoPath, repo.Username, repo.Password, repo.SSHPrivateKey, repo.InsecureIgnoreHostKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -635,7 +635,7 @@ func (s *Service) GetAppDetails(ctx context.Context, q *RepoServerAppDetailsQuer
|
||||
case v1alpha1.ApplicationSourceTypeKustomize:
|
||||
res.Kustomize = &KustomizeAppSpec{}
|
||||
res.Kustomize.Path = q.Path
|
||||
k := kustomize.NewKustomizeApp(appPath)
|
||||
k := kustomize.NewKustomizeApp(appPath, kustomizeCredentials(q.Repo))
|
||||
_, params, err := k.Build(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -651,3 +651,13 @@ func (q *RepoServerAppDetailsQuery) valueFiles() []string {
|
||||
}
|
||||
return q.Helm.ValueFiles
|
||||
}
|
||||
|
||||
func kustomizeCredentials(repo *v1alpha1.Repository) *kustomize.GitCredentials {
|
||||
if repo == nil || repo.Password == "" {
|
||||
return nil
|
||||
}
|
||||
return &kustomize.GitCredentials{
|
||||
Username: repo.Username,
|
||||
Password: repo.Password,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ type fakeGitClientFactory struct {
|
||||
root string
|
||||
}
|
||||
|
||||
func (f *fakeGitClientFactory) NewClient(repoURL, path, username, password, sshPrivateKey string) (git.Client, error) {
|
||||
func (f *fakeGitClientFactory) NewClient(repoURL, path, username, password, sshPrivateKey string, insecureIgnoreHostKey bool) (git.Client, error) {
|
||||
mockClient := gitmocks.Client{}
|
||||
root := "./testdata"
|
||||
if f.root != "" {
|
||||
@@ -251,7 +251,7 @@ func TestGetAppDetailsKustomize(t *testing.T) {
|
||||
Path: "kustomization_yaml",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "nginx", res.Kustomize.ImageTags[0].Name)
|
||||
assert.Equal(t, "1.15.4", res.Kustomize.ImageTags[0].Value)
|
||||
assert.Equal(t, "k8s.gcr.io/nginx-slim", res.Kustomize.ImageTags[0].Name)
|
||||
assert.Equal(t, "0.8", res.Kustomize.ImageTags[0].Value)
|
||||
assert.Equal(t, 2, len(res.Kustomize.ImageTags))
|
||||
}
|
||||
|
||||
@@ -433,6 +433,11 @@ func (s *Server) Watch(q *ApplicationQuery, ws ApplicationService_WatchServer) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Stop()
|
||||
logCtx := log.NewEntry(log.New())
|
||||
if q.Name != nil {
|
||||
logCtx = logCtx.WithField("application", *q.Name)
|
||||
}
|
||||
claims := ws.Context().Value("claims")
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
@@ -448,17 +453,17 @@ func (s *Server) Watch(q *ApplicationQuery, ws ApplicationService_WatchServer) e
|
||||
Application: a,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warnf("Unable to send stream message: %v", err)
|
||||
logCtx.Warnf("Unable to send stream message: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
done <- true
|
||||
logCtx.Info("k8s application watch event channel closed")
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ws.Context().Done():
|
||||
w.Stop()
|
||||
logCtx.Info("client watch grpc context closed")
|
||||
case <-done:
|
||||
w.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -492,18 +497,13 @@ func (s *Server) validateAndNormalizeApp(ctx context.Context, app *appv1.Applica
|
||||
}
|
||||
}
|
||||
|
||||
conditions, err := argo.GetSpecErrors(ctx, &app.Spec, proj, s.repoClientset, s.db)
|
||||
conditions, appSourceType, err := argo.GetSpecErrors(ctx, &app.Spec, proj, s.repoClientset, s.db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
return status.Errorf(codes.InvalidArgument, "application spec is invalid: %s", argo.FormatAppConditions(conditions))
|
||||
}
|
||||
|
||||
appSourceType, err := argo.QueryAppSourceType(ctx, app, s.repoClientset, s.db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.Spec = *argo.NormalizeApplicationSpec(&app.Spec, appSourceType)
|
||||
return nil
|
||||
}
|
||||
@@ -728,7 +728,10 @@ func (s *Server) PodLogs(q *ApplicationPodLogsQuery, ws ApplicationService_PodLo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logCtx := log.WithField("application", q.Name)
|
||||
defer util.Close(stream)
|
||||
done := make(chan bool)
|
||||
gracefulExit := false
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stream)
|
||||
for scanner.Scan() {
|
||||
@@ -745,18 +748,25 @@ func (s *Server) PodLogs(q *ApplicationPodLogsQuery, ws ApplicationService_PodLo
|
||||
TimeStamp: metaLogTime,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warnf("Unable to send stream message: %v", err)
|
||||
logCtx.Warnf("Unable to send stream message: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done <- true
|
||||
if gracefulExit {
|
||||
logCtx.Info("k8s pod logs scanner completed due to closed grpc context")
|
||||
} else if err := scanner.Err(); err != nil {
|
||||
logCtx.Warnf("k8s pod logs scanner failed with error: %v", err)
|
||||
} else {
|
||||
logCtx.Info("k8s pod logs scanner completed with EOF")
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ws.Context().Done():
|
||||
util.Close(stream)
|
||||
logCtx.Info("client pod logs grpc context closed")
|
||||
gracefulExit = true
|
||||
case <-done:
|
||||
}
|
||||
return nil
|
||||
@@ -889,7 +899,7 @@ func (s *Server) resolveRevision(ctx context.Context, app *appv1.Application, sy
|
||||
// If we couldn't retrieve from the repo service, assume public repositories
|
||||
repo = &appv1.Repository{Repo: app.Spec.Source.RepoURL}
|
||||
}
|
||||
gitClient, err := s.gitFactory.NewClient(repo.Repo, "", repo.Username, repo.Password, repo.SSHPrivateKey)
|
||||
gitClient, err := s.gitFactory.NewClient(repo.Repo, "", repo.Username, repo.Password, repo.SSHPrivateKey, repo.InsecureIgnoreHostKey)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func (s *Server) getConnectionState(ctx context.Context, url string) appsv1.Conn
|
||||
}
|
||||
repo, err := s.db.GetRepository(ctx, url)
|
||||
if err == nil {
|
||||
err = git.TestRepo(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey)
|
||||
err = git.TestRepo(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey, repo.InsecureIgnoreHostKey)
|
||||
}
|
||||
if err != nil {
|
||||
connectionState.Status = appsv1.ConnectionStatusFailed
|
||||
@@ -235,7 +235,7 @@ func (s *Server) Create(ctx context.Context, q *RepoCreateRequest) (*appsv1.Repo
|
||||
return nil, err
|
||||
}
|
||||
r := q.Repo
|
||||
err := git.TestRepo(r.Repo, r.Username, r.Password, r.SSHPrivateKey)
|
||||
err := git.TestRepo(r.Repo, r.Username, r.Password, r.SSHPrivateKey, r.InsecureIgnoreHostKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -38,9 +38,10 @@ func (s *Server) Get(ctx context.Context, q *SettingsQuery) (*Settings, error) {
|
||||
}
|
||||
if oidcConfig := argoCDSettings.OIDCConfig(); oidcConfig != nil {
|
||||
set.OIDCConfig = &OIDCConfig{
|
||||
Name: oidcConfig.Name,
|
||||
Issuer: oidcConfig.Issuer,
|
||||
ClientID: oidcConfig.ClientID,
|
||||
Name: oidcConfig.Name,
|
||||
Issuer: oidcConfig.Issuer,
|
||||
ClientID: oidcConfig.ClientID,
|
||||
CLIClientID: oidcConfig.CLIClientID,
|
||||
}
|
||||
}
|
||||
return &set, nil
|
||||
|
||||
@@ -42,7 +42,7 @@ func (m *SettingsQuery) Reset() { *m = SettingsQuery{} }
|
||||
func (m *SettingsQuery) String() string { return proto.CompactTextString(m) }
|
||||
func (*SettingsQuery) ProtoMessage() {}
|
||||
func (*SettingsQuery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_settings_ec03f87e34619c39, []int{0}
|
||||
return fileDescriptor_settings_f217579c211395f6, []int{0}
|
||||
}
|
||||
func (m *SettingsQuery) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -85,7 +85,7 @@ func (m *Settings) Reset() { *m = Settings{} }
|
||||
func (m *Settings) String() string { return proto.CompactTextString(m) }
|
||||
func (*Settings) ProtoMessage() {}
|
||||
func (*Settings) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_settings_ec03f87e34619c39, []int{1}
|
||||
return fileDescriptor_settings_f217579c211395f6, []int{1}
|
||||
}
|
||||
func (m *Settings) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -153,7 +153,7 @@ func (m *DexConfig) Reset() { *m = DexConfig{} }
|
||||
func (m *DexConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*DexConfig) ProtoMessage() {}
|
||||
func (*DexConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_settings_ec03f87e34619c39, []int{2}
|
||||
return fileDescriptor_settings_f217579c211395f6, []int{2}
|
||||
}
|
||||
func (m *DexConfig) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -201,7 +201,7 @@ func (m *Connector) Reset() { *m = Connector{} }
|
||||
func (m *Connector) String() string { return proto.CompactTextString(m) }
|
||||
func (*Connector) ProtoMessage() {}
|
||||
func (*Connector) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_settings_ec03f87e34619c39, []int{3}
|
||||
return fileDescriptor_settings_f217579c211395f6, []int{3}
|
||||
}
|
||||
func (m *Connector) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -248,6 +248,7 @@ type OIDCConfig struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Issuer string `protobuf:"bytes,2,opt,name=issuer,proto3" json:"issuer,omitempty"`
|
||||
ClientID string `protobuf:"bytes,3,opt,name=clientID,proto3" json:"clientID,omitempty"`
|
||||
CLIClientID string `protobuf:"bytes,4,opt,name=cliClientID,proto3" json:"cliClientID,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@@ -257,7 +258,7 @@ func (m *OIDCConfig) Reset() { *m = OIDCConfig{} }
|
||||
func (m *OIDCConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*OIDCConfig) ProtoMessage() {}
|
||||
func (*OIDCConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_settings_ec03f87e34619c39, []int{4}
|
||||
return fileDescriptor_settings_f217579c211395f6, []int{4}
|
||||
}
|
||||
func (m *OIDCConfig) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -307,6 +308,13 @@ func (m *OIDCConfig) GetClientID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *OIDCConfig) GetCLIClientID() string {
|
||||
if m != nil {
|
||||
return m.CLIClientID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SettingsQuery)(nil), "cluster.SettingsQuery")
|
||||
proto.RegisterType((*Settings)(nil), "cluster.Settings")
|
||||
@@ -562,6 +570,12 @@ func (m *OIDCConfig) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintSettings(dAtA, i, uint64(len(m.ClientID)))
|
||||
i += copy(dAtA[i:], m.ClientID)
|
||||
}
|
||||
if len(m.CLIClientID) > 0 {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintSettings(dAtA, i, uint64(len(m.CLIClientID)))
|
||||
i += copy(dAtA[i:], m.CLIClientID)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
@@ -658,6 +672,10 @@ func (m *OIDCConfig) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSettings(uint64(l))
|
||||
}
|
||||
l = len(m.CLIClientID)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSettings(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
@@ -1210,6 +1228,35 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.ClientID = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CLIClientID", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSettings
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSettings
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.CLIClientID = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSettings(dAtA[iNdEx:])
|
||||
@@ -1338,35 +1385,37 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("server/settings/settings.proto", fileDescriptor_settings_ec03f87e34619c39)
|
||||
proto.RegisterFile("server/settings/settings.proto", fileDescriptor_settings_f217579c211395f6)
|
||||
}
|
||||
|
||||
var fileDescriptor_settings_ec03f87e34619c39 = []byte{
|
||||
// 415 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x41, 0x6b, 0xd4, 0x40,
|
||||
0x18, 0x65, 0x9a, 0xd2, 0x6e, 0xbe, 0xaa, 0xd5, 0x51, 0x4a, 0x5c, 0x64, 0x77, 0xc9, 0x69, 0x41,
|
||||
0x4c, 0x74, 0x7b, 0xf2, 0x24, 0x24, 0x0b, 0x52, 0x2d, 0x88, 0x53, 0xbc, 0x08, 0x1e, 0x92, 0xd9,
|
||||
0xcf, 0x38, 0x92, 0xce, 0x84, 0xc9, 0x64, 0x71, 0xaf, 0xfe, 0x05, 0xff, 0x8f, 0x67, 0x8f, 0x82,
|
||||
0xf7, 0x45, 0x82, 0x3f, 0x44, 0x32, 0x9b, 0xa4, 0xa9, 0xf6, 0xf6, 0xe6, 0xbd, 0x79, 0xc9, 0x9b,
|
||||
0xef, 0x7b, 0x30, 0x29, 0x51, 0xaf, 0x51, 0x87, 0x25, 0x1a, 0x23, 0x64, 0x56, 0xf6, 0x20, 0x28,
|
||||
0xb4, 0x32, 0x8a, 0x1e, 0xf2, 0xbc, 0x2a, 0x0d, 0xea, 0xf1, 0x83, 0x4c, 0x65, 0xca, 0x72, 0x61,
|
||||
0x83, 0x76, 0xf2, 0xf8, 0x51, 0xa6, 0x54, 0x96, 0x63, 0x98, 0x14, 0x22, 0x4c, 0xa4, 0x54, 0x26,
|
||||
0x31, 0x42, 0xc9, 0xd6, 0xec, 0x1f, 0xc3, 0xed, 0x8b, 0xf6, 0x73, 0x6f, 0x2b, 0xd4, 0x1b, 0xff,
|
||||
0x3b, 0x81, 0x51, 0xc7, 0xd0, 0x87, 0xe0, 0x54, 0x3a, 0xf7, 0xc8, 0x8c, 0xcc, 0xdd, 0xe8, 0xb0,
|
||||
0xde, 0x4e, 0x9d, 0x77, 0xec, 0x9c, 0x35, 0x1c, 0x7d, 0x0a, 0xee, 0x0a, 0xbf, 0xc4, 0x4a, 0x7e,
|
||||
0x14, 0x99, 0xb7, 0x37, 0x23, 0xf3, 0xa3, 0x05, 0x0d, 0xda, 0x24, 0xc1, 0xb2, 0x53, 0xd8, 0xd5,
|
||||
0x25, 0x1a, 0x03, 0x28, 0xb1, 0xe2, 0xad, 0xc5, 0xb1, 0x96, 0xfb, 0xbd, 0xe5, 0xcd, 0xd9, 0x32,
|
||||
0xde, 0x49, 0xd1, 0x9d, 0x7a, 0x3b, 0x85, 0xab, 0x33, 0x1b, 0xd8, 0xe8, 0x0c, 0x8e, 0x92, 0xa2,
|
||||
0x38, 0x4f, 0x52, 0xcc, 0x5f, 0xe3, 0xc6, 0xdb, 0x6f, 0x92, 0xb1, 0x21, 0xe5, 0xbf, 0x00, 0xb7,
|
||||
0xff, 0x3d, 0x5d, 0x00, 0x70, 0x25, 0x25, 0x72, 0xa3, 0x74, 0xe9, 0x91, 0x99, 0x73, 0x2d, 0x66,
|
||||
0xdc, 0x49, 0x6c, 0x70, 0xcb, 0x3f, 0x05, 0xb7, 0x17, 0x28, 0x85, 0x7d, 0x99, 0x5c, 0xe2, 0x6e,
|
||||
0x04, 0xcc, 0xe2, 0x86, 0x33, 0x9b, 0x02, 0xed, 0xab, 0x5d, 0x66, 0xb1, 0x9f, 0xc2, 0x20, 0xf1,
|
||||
0x8d, 0xae, 0x13, 0x38, 0x10, 0x65, 0x59, 0xa1, 0x6e, 0x7d, 0xed, 0x89, 0xce, 0x61, 0xc4, 0x73,
|
||||
0x81, 0xd2, 0x9c, 0x2d, 0xed, 0x50, 0xdc, 0xe8, 0x56, 0xbd, 0x9d, 0x8e, 0xe2, 0x96, 0x63, 0xbd,
|
||||
0xba, 0xf8, 0x00, 0xc7, 0xdd, 0x66, 0x2e, 0x50, 0xaf, 0x05, 0x47, 0xfa, 0x0a, 0x9c, 0x97, 0x68,
|
||||
0xe8, 0x49, 0xff, 0xa4, 0x6b, 0xcb, 0x1c, 0xdf, 0xfb, 0x8f, 0xf7, 0xbd, 0xaf, 0xbf, 0xfe, 0x7c,
|
||||
0xdb, 0xa3, 0xf4, 0xae, 0x2d, 0xc4, 0xfa, 0x59, 0xdf, 0xa6, 0xe8, 0xf9, 0x8f, 0x7a, 0x42, 0x7e,
|
||||
0xd6, 0x13, 0xf2, 0xbb, 0x9e, 0x90, 0xf7, 0x8f, 0x33, 0x61, 0x3e, 0x55, 0x69, 0xc0, 0xd5, 0x65,
|
||||
0x98, 0x68, 0xdb, 0xab, 0xcf, 0x16, 0x3c, 0xe1, 0xab, 0xf0, 0x9f, 0x46, 0xa6, 0x07, 0xb6, 0x4c,
|
||||
0xa7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x0f, 0xf6, 0xb0, 0xab, 0x02, 0x00, 0x00,
|
||||
var fileDescriptor_settings_f217579c211395f6 = []byte{
|
||||
// 436 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x41, 0x8b, 0xd3, 0x40,
|
||||
0x18, 0x65, 0x36, 0xcb, 0x6e, 0xf3, 0x55, 0xad, 0x8e, 0xb2, 0xc4, 0x22, 0x6d, 0xc9, 0xa9, 0x20,
|
||||
0x36, 0x6e, 0xf7, 0xe4, 0x49, 0x48, 0x0a, 0x52, 0x2d, 0x88, 0xb3, 0x78, 0x11, 0x3c, 0xa4, 0xd3,
|
||||
0xcf, 0x38, 0x92, 0x9d, 0x09, 0x93, 0x49, 0xb1, 0x57, 0xff, 0x82, 0xf8, 0x77, 0x3c, 0x7b, 0x14,
|
||||
0xbc, 0x17, 0x09, 0xfe, 0x10, 0xe9, 0x34, 0xc9, 0xa6, 0xba, 0xb7, 0x37, 0xef, 0x7d, 0x2f, 0x79,
|
||||
0xf3, 0xcd, 0x83, 0x41, 0x8e, 0x7a, 0x8d, 0x3a, 0xc8, 0xd1, 0x18, 0x21, 0x93, 0xbc, 0x01, 0x93,
|
||||
0x4c, 0x2b, 0xa3, 0xe8, 0x29, 0x4f, 0x8b, 0xdc, 0xa0, 0xee, 0x3f, 0x48, 0x54, 0xa2, 0x2c, 0x17,
|
||||
0xec, 0xd0, 0x5e, 0xee, 0x3f, 0x4a, 0x94, 0x4a, 0x52, 0x0c, 0xe2, 0x4c, 0x04, 0xb1, 0x94, 0xca,
|
||||
0xc4, 0x46, 0x28, 0x59, 0x99, 0xfd, 0x1e, 0xdc, 0xbe, 0xac, 0x3e, 0xf7, 0xa6, 0x40, 0xbd, 0xf1,
|
||||
0xbf, 0x13, 0xe8, 0xd4, 0x0c, 0x7d, 0x08, 0x4e, 0xa1, 0x53, 0x8f, 0x8c, 0xc8, 0xd8, 0x0d, 0x4f,
|
||||
0xcb, 0xed, 0xd0, 0x79, 0xcb, 0x16, 0x6c, 0xc7, 0xd1, 0xa7, 0xe0, 0xae, 0xf0, 0x73, 0xa4, 0xe4,
|
||||
0x07, 0x91, 0x78, 0x47, 0x23, 0x32, 0xee, 0x4e, 0xe9, 0xa4, 0x4a, 0x32, 0x99, 0xd5, 0x0a, 0xbb,
|
||||
0x1e, 0xa2, 0x11, 0x80, 0x12, 0x2b, 0x5e, 0x59, 0x1c, 0x6b, 0xb9, 0xdf, 0x58, 0x5e, 0xcf, 0x67,
|
||||
0xd1, 0x5e, 0x0a, 0xef, 0x94, 0xdb, 0x21, 0x5c, 0x9f, 0x59, 0xcb, 0x46, 0x47, 0xd0, 0x8d, 0xb3,
|
||||
0x6c, 0x11, 0x2f, 0x31, 0x7d, 0x85, 0x1b, 0xef, 0x78, 0x97, 0x8c, 0xb5, 0x29, 0xff, 0x39, 0xb8,
|
||||
0xcd, 0xef, 0xe9, 0x14, 0x80, 0x2b, 0x29, 0x91, 0x1b, 0xa5, 0x73, 0x8f, 0x8c, 0x9c, 0x83, 0x98,
|
||||
0x51, 0x2d, 0xb1, 0xd6, 0x94, 0x7f, 0x01, 0x6e, 0x23, 0x50, 0x0a, 0xc7, 0x32, 0xbe, 0xc2, 0xfd,
|
||||
0x0a, 0x98, 0xc5, 0x3b, 0xce, 0x6c, 0x32, 0xb4, 0xb7, 0x76, 0x99, 0xc5, 0xfe, 0x37, 0x02, 0xad,
|
||||
0xc8, 0x37, 0xda, 0xce, 0xe0, 0x44, 0xe4, 0x79, 0x81, 0xba, 0x32, 0x56, 0x27, 0x3a, 0x86, 0x0e,
|
||||
0x4f, 0x05, 0x4a, 0x33, 0x9f, 0xd9, 0xad, 0xb8, 0xe1, 0xad, 0x72, 0x3b, 0xec, 0x44, 0x15, 0xc7,
|
||||
0x1a, 0x95, 0x9e, 0x43, 0x97, 0xa7, 0xa2, 0x16, 0xf6, 0x97, 0x0f, 0x7b, 0xe5, 0x76, 0xd8, 0x8d,
|
||||
0x16, 0xf3, 0x66, 0xbe, 0x3d, 0x33, 0x7d, 0x0f, 0xbd, 0xfa, 0x35, 0x2f, 0x51, 0xaf, 0x05, 0x47,
|
||||
0xfa, 0x12, 0x9c, 0x17, 0x68, 0xe8, 0x59, 0xb3, 0x86, 0x83, 0x02, 0xf4, 0xef, 0xfd, 0xc7, 0xfb,
|
||||
0xde, 0x97, 0x5f, 0x7f, 0xbe, 0x1e, 0x51, 0x7a, 0xd7, 0x96, 0x68, 0x7d, 0xde, 0x34, 0x30, 0x7c,
|
||||
0xf6, 0xa3, 0x1c, 0x90, 0x9f, 0xe5, 0x80, 0xfc, 0x2e, 0x07, 0xe4, 0xdd, 0xe3, 0x44, 0x98, 0x8f,
|
||||
0xc5, 0x72, 0xc2, 0xd5, 0x55, 0x10, 0x6b, 0xdb, 0xc5, 0x4f, 0x16, 0x3c, 0xe1, 0xab, 0xe0, 0x9f,
|
||||
0x16, 0x2f, 0x4f, 0x6c, 0x01, 0x2f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x5e, 0x8a, 0x67,
|
||||
0xdf, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ message OIDCConfig {
|
||||
string name = 1;
|
||||
string issuer = 2;
|
||||
string clientID = 3 [(gogoproto.customname) = "ClientID"];
|
||||
string cliClientID = 4 [(gogoproto.customname) = "CLIClientID"];
|
||||
}
|
||||
|
||||
// SettingsService
|
||||
|
||||
@@ -380,7 +380,7 @@ func WaitUntil(t *testing.T, condition wait.ConditionFunc) {
|
||||
|
||||
type FakeGitClientFactory struct{}
|
||||
|
||||
func (f *FakeGitClientFactory) NewClient(repoURL, path, username, password, sshPrivateKey string) (git.Client, error) {
|
||||
func (f *FakeGitClientFactory) NewClient(repoURL, path, username, password, sshPrivateKey string, insecureIgnoreHostKey bool) (git.Client, error) {
|
||||
return &FakeGitClient{
|
||||
root: path,
|
||||
}, nil
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterdummies.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
version: v1alpha1
|
||||
scope: Cluster
|
||||
names:
|
||||
kind: ClusterDummy
|
||||
plural: clusterdummies
|
||||
|
||||
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ClusterDummy
|
||||
metadata:
|
||||
name: cluster-dummy-crd-instance
|
||||
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ClusterDummy
|
||||
metadata:
|
||||
name: cluster-dummy-crd-instance
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ClusterDummy
|
||||
metadata:
|
||||
name: cluster-dummy-crd-instance
|
||||
namespace: kube-system
|
||||
@@ -0,0 +1,32 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: dummies.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
kind: Dummy
|
||||
plural: dummies
|
||||
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Dummy
|
||||
metadata:
|
||||
name: dummy-crd-instance
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Dummy
|
||||
metadata:
|
||||
name: dummy-crd-instance
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Dummy
|
||||
metadata:
|
||||
name: dummy-crd-instance
|
||||
@@ -140,20 +140,20 @@ func GetSpecErrors(
|
||||
proj *argoappv1.AppProject,
|
||||
repoClientset reposerver.Clientset,
|
||||
db db.ArgoDB,
|
||||
) ([]argoappv1.ApplicationCondition, error) {
|
||||
) ([]argoappv1.ApplicationCondition, argoappv1.ApplicationSourceType, error) {
|
||||
conditions := make([]argoappv1.ApplicationCondition, 0)
|
||||
if spec.Source.RepoURL == "" || spec.Source.Path == "" {
|
||||
conditions = append(conditions, argoappv1.ApplicationCondition{
|
||||
Type: argoappv1.ApplicationConditionInvalidSpecError,
|
||||
Message: "spec.source.repoURL and spec.source.path are required",
|
||||
})
|
||||
return conditions, nil
|
||||
return conditions, "", nil
|
||||
}
|
||||
|
||||
// Test the repo
|
||||
conn, repoClient, err := repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, "", err
|
||||
}
|
||||
defer util.Close(conn)
|
||||
repoAccessable := false
|
||||
@@ -164,7 +164,7 @@ func GetSpecErrors(
|
||||
// The repo has not been added to Argo CD so we do not have credentials to access it.
|
||||
// We support the mode where apps can be created from public repositories. Test the
|
||||
// repo to make sure it is publicly accessible
|
||||
err = git.TestRepo(spec.Source.RepoURL, "", "", "")
|
||||
err = git.TestRepo(spec.Source.RepoURL, "", "", "", false)
|
||||
if err != nil {
|
||||
conditions = append(conditions, argoappv1.ApplicationCondition{
|
||||
Type: argoappv1.ApplicationConditionInvalidSpecError,
|
||||
@@ -174,12 +174,13 @@ func GetSpecErrors(
|
||||
repoAccessable = true
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
return nil, "", err
|
||||
}
|
||||
} else {
|
||||
repoAccessable = true
|
||||
}
|
||||
|
||||
var appSourceType argoappv1.ApplicationSourceType
|
||||
// Verify only one source type is defined
|
||||
explicitSourceType, err := spec.Source.ExplicitType()
|
||||
if err != nil {
|
||||
@@ -190,7 +191,6 @@ func GetSpecErrors(
|
||||
}
|
||||
|
||||
if repoAccessable {
|
||||
var appSourceType argoappv1.ApplicationSourceType
|
||||
if explicitSourceType != nil {
|
||||
appSourceType = *explicitSourceType
|
||||
} else {
|
||||
@@ -249,11 +249,11 @@ func GetSpecErrors(
|
||||
Message: fmt.Sprintf("cluster '%s' has not been configured", spec.Destination.Server),
|
||||
})
|
||||
} else {
|
||||
return nil, err
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return conditions, nil
|
||||
return conditions, appSourceType, nil
|
||||
}
|
||||
|
||||
// GetAppProject returns a project from an application
|
||||
@@ -261,27 +261,6 @@ func GetAppProject(spec *argoappv1.ApplicationSpec, projLister applicationsv1.Ap
|
||||
return projLister.AppProjects(ns).Get(spec.GetProject())
|
||||
}
|
||||
|
||||
// QueryAppSourceType queries repo server for yaml files in a directory, and determines its
|
||||
// application source type based on the files in the directory.
|
||||
// This code is redundant to the logic in argo.GetSpecErrors, but since it's is hard to
|
||||
// extract out of there. We will be throwing away this code when we remove
|
||||
// componentParameterOverrides.
|
||||
func QueryAppSourceType(ctx context.Context, app *argoappv1.Application, repoClientset reposerver.Clientset, db db.ArgoDB) (argoappv1.ApplicationSourceType, error) {
|
||||
if t, _ := app.Spec.Source.ExplicitType(); t != nil {
|
||||
return *t, nil
|
||||
}
|
||||
repoRes, err := db.GetRepository(ctx, app.Spec.Source.RepoURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conn, repoClient, err := repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer util.Close(conn)
|
||||
return queryAppSourceType(ctx, &app.Spec, repoRes, repoClient)
|
||||
}
|
||||
|
||||
func queryAppSourceType(ctx context.Context, spec *argoappv1.ApplicationSpec, repoRes *argoappv1.Repository, repoClient repository.RepoServerServiceClient) (argoappv1.ApplicationSourceType, error) {
|
||||
req := repository.ListDirRequest{
|
||||
Repo: &argoappv1.Repository{
|
||||
|
||||
@@ -2,7 +2,7 @@ package argo
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
2
util/cache/client_test.go
vendored
2
util/cache/client_test.go
vendored
@@ -28,7 +28,7 @@ func TestCache(t *testing.T) {
|
||||
cacheObj.Foo = "baz"
|
||||
err = c.Get("key", &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.EqualValues(t, string(obj.Foo), "foo")
|
||||
assert.EqualValues(t, obj.Foo, "foo")
|
||||
assert.EqualValues(t, string(obj.Bar), "bar")
|
||||
|
||||
err = c.Delete("key")
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/term"
|
||||
|
||||
"github.com/argoproj/argo-cd"
|
||||
argocd "github.com/argoproj/argo-cd"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package db
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
|
||||
@@ -61,7 +61,10 @@ func (db *db) CreateRepository(ctx context.Context, r *appsv1.Repository) (*apps
|
||||
data[sshPrivateKey] = []byte(r.SSHPrivateKey)
|
||||
}
|
||||
|
||||
repoInfo := settings.RepoCredentials{URL: r.Repo}
|
||||
repoInfo := settings.RepoCredentials{
|
||||
URL: r.Repo,
|
||||
InsecureIgnoreHostKey: r.InsecureIgnoreHostKey,
|
||||
}
|
||||
err = db.updateSecrets(&repoInfo, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -87,12 +90,15 @@ func (db *db) GetRepository(ctx context.Context, repoURL string) (*appsv1.Reposi
|
||||
return nil, status.Errorf(codes.NotFound, "repo '%s' not found", repoURL)
|
||||
}
|
||||
repoInfo := s.Repositories[index]
|
||||
repo := &appsv1.Repository{Repo: repoInfo.URL}
|
||||
repo := &appsv1.Repository{
|
||||
Repo: repoInfo.URL,
|
||||
InsecureIgnoreHostKey: repoInfo.InsecureIgnoreHostKey,
|
||||
}
|
||||
|
||||
err = db.unmarshalFromSecretsStr(map[*string]*apiv1.SecretKeySelector{
|
||||
&repo.Username: repoInfo.UsernameSecret,
|
||||
&repo.Password: repoInfo.PasswordSecret,
|
||||
&repo.SSHPrivateKey: repoInfo.SshPrivateKeySecret,
|
||||
&repo.SSHPrivateKey: repoInfo.SSHPrivateKeySecret,
|
||||
}, make(map[string]*apiv1.Secret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -179,7 +185,7 @@ func (db *db) updateSecrets(repoInfo *settings.RepoCredentials, r *appsv1.Reposi
|
||||
|
||||
repoInfo.UsernameSecret = setSecretData(repoInfo.UsernameSecret, r.Username, username)
|
||||
repoInfo.PasswordSecret = setSecretData(repoInfo.PasswordSecret, r.Password, password)
|
||||
repoInfo.SshPrivateKeySecret = setSecretData(repoInfo.SshPrivateKeySecret, r.SSHPrivateKey, sshPrivateKey)
|
||||
repoInfo.SSHPrivateKeySecret = setSecretData(repoInfo.SSHPrivateKeySecret, r.SSHPrivateKey, sshPrivateKey)
|
||||
for k, v := range secretsData {
|
||||
err := db.upsertSecret(k, v)
|
||||
if err != nil {
|
||||
|
||||
@@ -31,7 +31,7 @@ type Client interface {
|
||||
// ClientFactory is a factory of Git Clients
|
||||
// Primarily used to support creation of mock git clients during unit testing
|
||||
type ClientFactory interface {
|
||||
NewClient(repoURL, path, username, password, sshPrivateKey string) (Client, error)
|
||||
NewClient(repoURL, path, username, password, sshPrivateKey string, insecureIgnoreHostKey bool) (Client, error)
|
||||
}
|
||||
|
||||
// nativeGitClient implements Client interface using git CLI
|
||||
@@ -47,7 +47,7 @@ func NewFactory() ClientFactory {
|
||||
return &factory{}
|
||||
}
|
||||
|
||||
func (f *factory) NewClient(repoURL, path, username, password, sshPrivateKey string) (Client, error) {
|
||||
func (f *factory) NewClient(repoURL, path, username, password, sshPrivateKey string, insecureIgnoreHostKey bool) (Client, error) {
|
||||
clnt := nativeGitClient{
|
||||
repoURL: repoURL,
|
||||
root: path,
|
||||
@@ -58,7 +58,9 @@ func (f *factory) NewClient(repoURL, path, username, password, sshPrivateKey str
|
||||
return nil, err
|
||||
}
|
||||
auth := &ssh2.PublicKeys{User: "git", Signer: signer}
|
||||
auth.HostKeyCallback = ssh.InsecureIgnoreHostKey()
|
||||
if insecureIgnoreHostKey {
|
||||
auth.HostKeyCallback = ssh.InsecureIgnoreHostKey()
|
||||
}
|
||||
clnt.auth = auth
|
||||
} else if username != "" || password != "" {
|
||||
auth := &http.BasicAuth{Username: username, Password: password}
|
||||
@@ -103,6 +105,18 @@ func (m *nativeGitClient) Init() error {
|
||||
// Fetch fetches latest updates from origin
|
||||
func (m *nativeGitClient) Fetch() error {
|
||||
log.Debugf("Fetching repo %s at %s", m.repoURL, m.root)
|
||||
// Two techniques are used for fetching the remote depending if the remote is SSH vs. HTTPS
|
||||
// If http, we fork/exec the git CLI since the go-git client does not properly support git
|
||||
// providers such as AWS CodeCommit and Azure DevOps.
|
||||
if _, ok := m.auth.(*ssh2.PublicKeys); ok {
|
||||
return m.goGitFetch()
|
||||
}
|
||||
_, err := m.runCredentialedCmd("git", "fetch", "origin", "--tags", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// goGitFetch fetches the remote using go-git
|
||||
func (m *nativeGitClient) goGitFetch() error {
|
||||
repo, err := git.PlainOpen(m.root)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -119,17 +133,6 @@ func (m *nativeGitClient) Fetch() error {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
||||
// git fetch does not update the HEAD reference. The following command will update the local
|
||||
// knowledge of what remote considers the “default branch”
|
||||
// See: https://stackoverflow.com/questions/8839958/how-does-origin-head-get-set
|
||||
// NOTE(jessesuen): disabling the following code because:
|
||||
// 1. we no longer perform a `git checkout HEAD`, instead relying on `ls-remote` and checking
|
||||
// out a specific SHA1.
|
||||
// 2. This command is the only other command that we use (excluding fetch/ls-remote) which
|
||||
// requires remote access, and there appears to be no go-git equivalent to this command.
|
||||
// _, err = m.runCmd("git", "remote", "set-head", "origin", "-a")
|
||||
// return err
|
||||
}
|
||||
|
||||
// LsFiles lists the local working tree, including only files that are under source control
|
||||
@@ -241,13 +244,27 @@ func (m *nativeGitClient) CommitSHA() (string, error) {
|
||||
// runCmd is a convenience function to run a command in a given directory and return its output
|
||||
func (m *nativeGitClient) runCmd(command string, args ...string) (string, error) {
|
||||
cmd := exec.Command(command, args...)
|
||||
return m.runCmdOutput(cmd)
|
||||
}
|
||||
|
||||
// runCredentialedCmd is a convenience function to run a git command with username/password credentials
|
||||
func (m *nativeGitClient) runCredentialedCmd(command string, args ...string) (string, error) {
|
||||
cmd := exec.Command(command, args...)
|
||||
if auth, ok := m.auth.(*http.BasicAuth); ok {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_ASKPASS=git-ask-pass.sh"))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_USERNAME=%s", auth.Username))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PASSWORD=%s", auth.Password))
|
||||
}
|
||||
return m.runCmdOutput(cmd)
|
||||
}
|
||||
|
||||
func (m *nativeGitClient) runCmdOutput(cmd *exec.Cmd) (string, error) {
|
||||
log.Debug(strings.Join(cmd.Args, " "))
|
||||
cmd.Dir = m.root
|
||||
env := os.Environ()
|
||||
env = append(env, "HOME=/dev/null")
|
||||
env = append(env, "GIT_CONFIG_NOSYSTEM=true")
|
||||
env = append(env, "GIT_ASKPASS=")
|
||||
cmd.Env = env
|
||||
cmd.Env = append(cmd.Env, os.Environ()...)
|
||||
cmd.Env = append(cmd.Env, "HOME=/dev/null")
|
||||
cmd.Env = append(cmd.Env, "GIT_CONFIG_NOSYSTEM=true")
|
||||
cmd.Env = append(cmd.Env, "GIT_CONFIG_NOGLOBAL=true")
|
||||
out, err := cmd.Output()
|
||||
if len(out) > 0 {
|
||||
log.Debug(string(out))
|
||||
|
||||
@@ -65,8 +65,8 @@ func IsSSHURL(url string) bool {
|
||||
}
|
||||
|
||||
// TestRepo tests if a repo exists and is accessible with the given credentials
|
||||
func TestRepo(repo, username, password string, sshPrivateKey string) error {
|
||||
clnt, err := NewFactory().NewClient(repo, "", username, password, sshPrivateKey)
|
||||
func TestRepo(repo, username, password string, sshPrivateKey string, insecureIgnoreHostKey bool) error {
|
||||
clnt, err := NewFactory().NewClient(repo, "", username, password, sshPrivateKey, insecureIgnoreHostKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,13 +1,24 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TODO: move this into shared test package after resolving import cycle
|
||||
const (
|
||||
// This is a throwaway gitlab test account/repo with a read-only personal access token for the
|
||||
// purposes of testing private git repos
|
||||
PrivateGitRepo = "https://gitlab.com/argo-cd-test/test-apps.git"
|
||||
PrivateGitUsername = "blah"
|
||||
PrivateGitPassword = "B5sBDeoqAVUouoHkrovy"
|
||||
)
|
||||
|
||||
func TestIsCommitSHA(t *testing.T) {
|
||||
assert.True(t, IsCommitSHA("9d921f65f3c5373b682e2eb4b37afba6592e8f8b"))
|
||||
assert.True(t, IsCommitSHA("9D921F65F3C5373B682E2EB4B37AFBA6592E8F8B"))
|
||||
@@ -92,7 +103,7 @@ func TestSameURL(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLsRemote(t *testing.T) {
|
||||
clnt, err := NewFactory().NewClient("https://github.com/argoproj/argo-cd.git", "/tmp", "", "", "")
|
||||
clnt, err := NewFactory().NewClient("https://github.com/argoproj/argo-cd.git", "/tmp", "", "", "", false)
|
||||
assert.NoError(t, err)
|
||||
xpass := []string{
|
||||
"HEAD",
|
||||
@@ -127,43 +138,35 @@ func TestLsRemote(t *testing.T) {
|
||||
func TestGitClient(t *testing.T) {
|
||||
testRepos := []string{
|
||||
"https://github.com/argoproj/argocd-example-apps",
|
||||
// TODO: add this back when azure repos are supported
|
||||
//"https://jsuen0437@dev.azure.com/jsuen0437/jsuen/_git/jsuen",
|
||||
"https://jsuen0437@dev.azure.com/jsuen0437/jsuen/_git/jsuen",
|
||||
}
|
||||
for _, repo := range testRepos {
|
||||
dirName, err := ioutil.TempDir("", "git-client-test-")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(dirName) }()
|
||||
|
||||
clnt, err := NewFactory().NewClient(repo, dirName, "", "", "")
|
||||
clnt, err := NewFactory().NewClient(repo, dirName, "", "", "", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
testGitClient(t, clnt)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPrivateGitRepo tests the ability to operate on a private git repo. This test needs to be run
|
||||
// manually since we do not have a private git repo for testing
|
||||
//
|
||||
// export TEST_REPO=https://github.com/jessesuen/private-argocd-example-apps
|
||||
// export GITHUB_TOKEN=<YOURGITHUBTOKEN>
|
||||
// go test -v -run ^(TestPrivateGitRepo)$ ./util/git/...
|
||||
// TestPrivateGitRepo tests the ability to operate on a private git repo.
|
||||
func TestPrivateGitRepo(t *testing.T) {
|
||||
repo := os.Getenv("TEST_REPO")
|
||||
username := os.Getenv("TEST_USERNAME")
|
||||
password := os.Getenv("GITHUB_TOKEN")
|
||||
if username == "" {
|
||||
username = "git" // username does not matter for tokens
|
||||
}
|
||||
if repo == "" || password == "" {
|
||||
t.Skip("skipping private git repo test since no repo or password supplied")
|
||||
}
|
||||
// add the hack path which has the git-ask-pass.sh shell script
|
||||
osPath := os.Getenv("PATH")
|
||||
hackPath, err := filepath.Abs("../../hack")
|
||||
assert.NoError(t, err)
|
||||
err = os.Setenv("PATH", fmt.Sprintf("%s:%s", osPath, hackPath))
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.Setenv("PATH", osPath) }()
|
||||
|
||||
dirName, err := ioutil.TempDir("", "git-client-test-")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(dirName) }()
|
||||
|
||||
clnt, err := NewFactory().NewClient(repo, dirName, username, password, "")
|
||||
clnt, err := NewFactory().NewClient(PrivateGitRepo, dirName, PrivateGitUsername, PrivateGitPassword, "", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
testGitClient(t, clnt)
|
||||
|
||||
@@ -5,14 +5,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/logging"
|
||||
grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
|
||||
ctx_logrus "github.com/grpc-ecosystem/go-grpc-middleware/tags/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/go-grpc-middleware/logging"
|
||||
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
|
||||
"github.com/grpc-ecosystem/go-grpc-middleware/tags/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func logRequest(entry *logrus.Entry, info string, pbMsg interface{}, ctx context.Context, logClaims bool) {
|
||||
|
||||
@@ -3,8 +3,8 @@ package health
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
coreV1 "k8s.io/api/core/v1"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
|
||||
@@ -191,6 +191,7 @@ func (h *helm) helmCmd(args ...string) (string, error) {
|
||||
|
||||
func (h *helm) helmCmdExt(args []string, logFormat func(string) string) (string, error) {
|
||||
cmd := exec.Command("helm", args...)
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Dir = h.path
|
||||
if h.home != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("HELM_HOME=%s", h.home))
|
||||
|
||||
205
util/kube/ctl.go
205
util/kube/ctl.go
@@ -2,24 +2,18 @@ package kube
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -29,170 +23,91 @@ import (
|
||||
"github.com/argoproj/argo-cd/util/diff"
|
||||
)
|
||||
|
||||
type ResourcesBatch struct {
|
||||
GVK schema.GroupVersionKind
|
||||
ResourceInfo metav1.APIResource
|
||||
ListResourceVersion string
|
||||
Objects []unstructured.Unstructured
|
||||
Error error
|
||||
}
|
||||
|
||||
type CacheRefreshEvent struct {
|
||||
GVK schema.GroupVersionKind
|
||||
ResourceVersion string
|
||||
Objects []unstructured.Unstructured
|
||||
}
|
||||
|
||||
type WatchEvent struct {
|
||||
WatchEvent *watch.Event
|
||||
CacheRefresh *CacheRefreshEvent
|
||||
}
|
||||
|
||||
type CachedVersionSource func(gk schema.GroupKind) (string, error)
|
||||
|
||||
type Kubectl interface {
|
||||
ApplyResource(config *rest.Config, obj *unstructured.Unstructured, namespace string, dryRun, force bool) (string, error)
|
||||
ConvertToVersion(obj *unstructured.Unstructured, group, version string) (*unstructured.Unstructured, error)
|
||||
DeleteResource(config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, forceDelete bool) error
|
||||
GetResource(config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error)
|
||||
PatchResource(config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, patchType types.PatchType, patchBytes []byte) (*unstructured.Unstructured, error)
|
||||
WatchResources(ctx context.Context, config *rest.Config, resourceFilter ResourceFilter, versionSource CachedVersionSource) (chan WatchEvent, error)
|
||||
GetResources(config *rest.Config, resourceFilter ResourceFilter, namespace string) (chan ResourcesBatch, error)
|
||||
GetAPIResources(config *rest.Config) ([]*metav1.APIResourceList, error)
|
||||
GetAPIResources(config *rest.Config, resourceFilter ResourceFilter) ([]APIResourceInfo, error)
|
||||
}
|
||||
|
||||
type KubectlCmd struct{}
|
||||
|
||||
func (k KubectlCmd) GetAPIResources(config *rest.Config) ([]*metav1.APIResourceList, error) {
|
||||
type APIResourceInfo struct {
|
||||
GroupKind schema.GroupKind
|
||||
Meta metav1.APIResource
|
||||
Interface dynamic.ResourceInterface
|
||||
}
|
||||
|
||||
type filterFunc func(apiResource *metav1.APIResource) bool
|
||||
|
||||
func filterAPIResources(config *rest.Config, resourceFilter ResourceFilter, filter filterFunc, namespace string) ([]APIResourceInfo, error) {
|
||||
dynamicIf, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
disco, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return disco.ServerResources()
|
||||
}
|
||||
|
||||
// GetResources returns all kubernetes resources
|
||||
func (k KubectlCmd) GetResources(config *rest.Config, resourceFilter ResourceFilter, namespace string) (chan ResourcesBatch, error) {
|
||||
res := make(chan ResourcesBatch)
|
||||
|
||||
listSupported := func(apiResource *metav1.APIResource) bool {
|
||||
return isSupportedVerb(apiResource, listVerb)
|
||||
}
|
||||
apiResIfs, err := filterAPIResources(config, resourceFilter, listSupported, namespace)
|
||||
serverResources, err := disco.ServerPreferredResources()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(serverResources) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
log.Warnf("Partial success when performing preferred resource discovery: %v", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(res)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(apiResIfs))
|
||||
for _, apiResIf := range apiResIfs {
|
||||
go func(apiResIf apiResourceInterface) {
|
||||
defer wg.Done()
|
||||
batch := ResourcesBatch{
|
||||
GVK: apiResIf.groupVersion.WithKind(apiResIf.apiResource.Kind),
|
||||
ResourceInfo: apiResIf.apiResource,
|
||||
}
|
||||
list, err := apiResIf.resourceIf.List(metav1.ListOptions{})
|
||||
apiResIfs := make([]APIResourceInfo, 0)
|
||||
for _, apiResourcesList := range serverResources {
|
||||
gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion)
|
||||
if err != nil {
|
||||
gv = schema.GroupVersion{}
|
||||
}
|
||||
for _, apiResource := range apiResourcesList.APIResources {
|
||||
if resourceFilter.IsExcludedResource(gv.Group, apiResource.Kind, config.Host) {
|
||||
continue
|
||||
}
|
||||
if _, ok := isObsoleteExtensionsGroupKind(gv.Group, apiResource.Kind); ok {
|
||||
continue
|
||||
}
|
||||
if filter(&apiResource) {
|
||||
resource := ToGroupVersionResource(apiResourcesList.GroupVersion, &apiResource)
|
||||
resourceIf := ToResourceInterface(dynamicIf, &apiResource, resource, namespace)
|
||||
gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion)
|
||||
if err != nil {
|
||||
if !apierr.IsNotFound(err) {
|
||||
batch.Error = err
|
||||
res <- batch
|
||||
}
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
batch.ListResourceVersion = list.GetResourceVersion()
|
||||
batch.Objects = list.Items
|
||||
res <- batch
|
||||
}(apiResIf)
|
||||
apiResIf := APIResourceInfo{
|
||||
GroupKind: schema.GroupKind{Group: gv.Group, Kind: apiResource.Kind},
|
||||
Meta: apiResource,
|
||||
Interface: resourceIf,
|
||||
}
|
||||
apiResIfs = append(apiResIfs, apiResIf)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
return res, nil
|
||||
}
|
||||
return apiResIfs, nil
|
||||
}
|
||||
|
||||
const watchResourcesRetryTimeout = 1 * time.Second
|
||||
|
||||
// WatchResources watches all the existing resources in the cluster provided by the config. Method retries watch with the most recent resource version stored in cache.
|
||||
// The WatchResources returns channel which container either watch event with updated resource info or new list of resources if cached resource version had expired.
|
||||
func (k KubectlCmd) WatchResources(
|
||||
ctx context.Context,
|
||||
config *rest.Config,
|
||||
resourceFilter ResourceFilter,
|
||||
versionSource CachedVersionSource,
|
||||
) (chan WatchEvent, error) {
|
||||
watchSupported := func(apiResource *metav1.APIResource) bool {
|
||||
return isSupportedVerb(apiResource, watchVerb)
|
||||
// isSupportedVerb returns whether or not a APIResource supports a specific verb
|
||||
func isSupportedVerb(apiResource *metav1.APIResource, verb string) bool {
|
||||
for _, v := range apiResource.Verbs {
|
||||
if v == verb {
|
||||
return true
|
||||
}
|
||||
}
|
||||
log.Infof("Start watching for resources changes with in cluster %s", config.Host)
|
||||
apiResIfs, err := filterAPIResources(config, resourceFilter, watchSupported, "")
|
||||
return false
|
||||
}
|
||||
|
||||
func (k KubectlCmd) GetAPIResources(config *rest.Config, resourceFilter ResourceFilter) ([]APIResourceInfo, error) {
|
||||
apiResIfs, err := filterAPIResources(config, resourceFilter, func(apiResource *metav1.APIResource) bool {
|
||||
return isSupportedVerb(apiResource, listVerb) && isSupportedVerb(apiResource, watchVerb)
|
||||
}, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch := make(chan WatchEvent)
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(apiResIfs))
|
||||
for _, a := range apiResIfs {
|
||||
go func(apiResIf apiResourceInterface) {
|
||||
defer wg.Done()
|
||||
|
||||
util.RetryUntilSucceed(func() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
message := fmt.Sprintf("Recovered from panic: %+v\n%s", r, debug.Stack())
|
||||
log.Error(message)
|
||||
err = errors.New(message)
|
||||
}
|
||||
}()
|
||||
watchCh := WatchWithRetry(ctx, func() (i watch.Interface, e error) {
|
||||
gvk := apiResIf.groupVersion.WithKind(apiResIf.apiResource.Kind)
|
||||
resourceVersion, err := versionSource(gvk.GroupKind())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w, err := apiResIf.resourceIf.Watch(metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
})
|
||||
if apierr.IsGone(err) {
|
||||
log.Infof("Resource version of %s has expired at cluster %s, reloading list", gvk, config.Host)
|
||||
list, err := apiResIf.resourceIf.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch <- WatchEvent{
|
||||
CacheRefresh: &CacheRefreshEvent{
|
||||
GVK: gvk,
|
||||
ResourceVersion: list.GetResourceVersion(),
|
||||
Objects: list.Items,
|
||||
},
|
||||
}
|
||||
return apiResIf.resourceIf.Watch(metav1.ListOptions{ResourceVersion: list.GetResourceVersion()})
|
||||
}
|
||||
return w, err
|
||||
})
|
||||
for next := range watchCh {
|
||||
if next.Error != nil {
|
||||
return next.Error
|
||||
}
|
||||
ch <- WatchEvent{
|
||||
WatchEvent: &watch.Event{
|
||||
Object: next.Object,
|
||||
Type: next.Type,
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, fmt.Sprintf("watch resources %s %s/%s", config.Host, apiResIf.groupVersion, apiResIf.apiResource.Kind), ctx, watchResourcesRetryTimeout)
|
||||
}(a)
|
||||
}
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
log.Infof("Stop watching for resources changes with in cluster %s", config.Host)
|
||||
}()
|
||||
return ch, nil
|
||||
return apiResIfs, err
|
||||
}
|
||||
|
||||
// GetResource returns resource
|
||||
|
||||
@@ -75,6 +75,10 @@ func (k *ResourceKey) String() string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", k.Group, k.Kind, k.Namespace, k.Name)
|
||||
}
|
||||
|
||||
func (k ResourceKey) GroupKind() schema.GroupKind {
|
||||
return schema.GroupKind{Group: k.Group, Kind: k.Kind}
|
||||
}
|
||||
|
||||
func isObsoleteExtensionsGroupKind(group string, kind string) (string, bool) {
|
||||
if group == "extensions" {
|
||||
newGroup, ok := obsoleteExtensionsKinds[kind]
|
||||
@@ -246,72 +250,6 @@ func IsCRD(obj *unstructured.Unstructured) bool {
|
||||
return IsCRDGroupVersionKind(obj.GroupVersionKind())
|
||||
}
|
||||
|
||||
type apiResourceInterface struct {
|
||||
groupVersion schema.GroupVersion
|
||||
apiResource metav1.APIResource
|
||||
resourceIf dynamic.ResourceInterface
|
||||
}
|
||||
|
||||
type filterFunc func(apiResource *metav1.APIResource) bool
|
||||
|
||||
func filterAPIResources(config *rest.Config, resourceFilter ResourceFilter, filter filterFunc, namespace string) ([]apiResourceInterface, error) {
|
||||
dynamicIf, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
disco, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serverResources, err := disco.ServerPreferredResources()
|
||||
if err != nil {
|
||||
if len(serverResources) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
log.Warnf("Partial success when performing preferred resource discovery: %v", err)
|
||||
}
|
||||
apiResIfs := make([]apiResourceInterface, 0)
|
||||
for _, apiResourcesList := range serverResources {
|
||||
gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion)
|
||||
if err != nil {
|
||||
gv = schema.GroupVersion{}
|
||||
}
|
||||
if resourceFilter.IsExcludedResource(gv.Group, apiResourcesList.Kind, config.Host) {
|
||||
continue
|
||||
}
|
||||
for _, apiResource := range apiResourcesList.APIResources {
|
||||
if _, ok := isObsoleteExtensionsGroupKind(gv.Group, apiResource.Kind); ok || gv.Group == "" && apiResource.Kind == "Event" {
|
||||
continue
|
||||
}
|
||||
if filter(&apiResource) {
|
||||
resource := ToGroupVersionResource(apiResourcesList.GroupVersion, &apiResource)
|
||||
resourceIf := ToResourceInterface(dynamicIf, &apiResource, resource, namespace)
|
||||
gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
apiResIf := apiResourceInterface{
|
||||
groupVersion: gv,
|
||||
apiResource: apiResource,
|
||||
resourceIf: resourceIf,
|
||||
}
|
||||
apiResIfs = append(apiResIfs, apiResIf)
|
||||
}
|
||||
}
|
||||
}
|
||||
return apiResIfs, nil
|
||||
}
|
||||
|
||||
// isSupportedVerb returns whether or not a APIResource supports a specific verb
|
||||
func isSupportedVerb(apiResource *metav1.APIResource, verb string) bool {
|
||||
for _, v := range apiResource.Verbs {
|
||||
if v == verb {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// See: https://github.com/ksonnet/ksonnet/blob/master/utils/client.go
|
||||
func ServerResourceForGroupVersionKind(disco discovery.DiscoveryInterface, gvk schema.GroupVersionKind) (*metav1.APIResource, error) {
|
||||
resources, err := disco.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package kubetest
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
@@ -18,27 +16,15 @@ type KubectlOutput struct {
|
||||
}
|
||||
|
||||
type MockKubectlCmd struct {
|
||||
APIResources []*v1.APIResourceList
|
||||
Resources []kube.ResourcesBatch
|
||||
APIResources []kube.APIResourceInfo
|
||||
Commands map[string]KubectlOutput
|
||||
Events chan kube.WatchEvent
|
||||
Events chan watch.Event
|
||||
}
|
||||
|
||||
func (k MockKubectlCmd) GetAPIResources(config *rest.Config) ([]*v1.APIResourceList, error) {
|
||||
func (k MockKubectlCmd) GetAPIResources(config *rest.Config, resourceFilter kube.ResourceFilter) ([]kube.APIResourceInfo, error) {
|
||||
return k.APIResources, nil
|
||||
}
|
||||
|
||||
func (k MockKubectlCmd) GetResources(config *rest.Config, resourceFilter kube.ResourceFilter, namespace string) (chan kube.ResourcesBatch, error) {
|
||||
res := make(chan kube.ResourcesBatch)
|
||||
go func() {
|
||||
defer close(res)
|
||||
for i := range k.Resources {
|
||||
res <- k.Resources[i]
|
||||
}
|
||||
}()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (k MockKubectlCmd) GetResource(config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -47,12 +33,6 @@ func (k MockKubectlCmd) PatchResource(config *rest.Config, gvk schema.GroupVersi
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (k MockKubectlCmd) WatchResources(
|
||||
ctx context.Context, config *rest.Config, resourceFilter kube.ResourceFilter, versionSource kube.CachedVersionSource) (chan kube.WatchEvent, error) {
|
||||
|
||||
return k.Events, nil
|
||||
}
|
||||
|
||||
func (k MockKubectlCmd) DeleteResource(config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, forceDelete bool) error {
|
||||
command, ok := k.Commands[name]
|
||||
if !ok {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
argoexec "github.com/argoproj/pkg/exec"
|
||||
@@ -24,13 +25,22 @@ type Kustomize interface {
|
||||
Build(opts *v1alpha1.ApplicationSourceKustomize) ([]*unstructured.Unstructured, []*v1alpha1.KustomizeImageTag, error)
|
||||
}
|
||||
|
||||
type GitCredentials struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// NewKustomizeApp create a new wrapper to run commands on the `kustomize` command-line tool.
|
||||
func NewKustomizeApp(path string) Kustomize {
|
||||
return &kustomize{path: path}
|
||||
func NewKustomizeApp(path string, creds *GitCredentials) Kustomize {
|
||||
return &kustomize{
|
||||
path: path,
|
||||
creds: creds,
|
||||
}
|
||||
}
|
||||
|
||||
type kustomize struct {
|
||||
path string
|
||||
path string
|
||||
creds *GitCredentials
|
||||
}
|
||||
|
||||
func (k *kustomize) Build(opts *v1alpha1.ApplicationSourceKustomize) ([]*unstructured.Unstructured, []*v1alpha1.KustomizeImageTag, error) {
|
||||
@@ -66,7 +76,15 @@ func (k *kustomize) Build(opts *v1alpha1.ApplicationSourceKustomize) ([]*unstruc
|
||||
}
|
||||
}
|
||||
|
||||
out, err := argoexec.RunCommand(commandName, "build", k.path)
|
||||
cmd := exec.Command(commandName, "build", k.path)
|
||||
cmd.Env = os.Environ()
|
||||
if k.creds != nil {
|
||||
cmd.Env = append(cmd.Env, "GIT_ASKPASS=git-ask-pass.sh")
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_USERNAME=%s", k.creds.Username))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PASSWORD=%s", k.creds.Password))
|
||||
}
|
||||
|
||||
out, err := argoexec.RunCommandExt(cmd)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -103,7 +121,6 @@ var KustomizationNames = []string{"kustomization.yaml", "kustomization.yml", "Ku
|
||||
func (k *kustomize) findKustomization() (string, error) {
|
||||
for _, file := range KustomizationNames {
|
||||
kustomization := filepath.Join(k.path, file)
|
||||
log.Infof("path=%s, file=%s", k.path, file)
|
||||
if _, err := os.Stat(kustomization); err == nil {
|
||||
return kustomization, nil
|
||||
}
|
||||
@@ -131,8 +148,6 @@ func (k *kustomize) getKustomizationVersion() (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
log.Infof("using kustomization=%s", kustomizationFile)
|
||||
|
||||
dat, err := ioutil.ReadFile(kustomizationFile)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -170,6 +185,9 @@ func getImageParameters(objs []*unstructured.Unstructured) []*v1alpha1.Kustomize
|
||||
Value: version,
|
||||
})
|
||||
}
|
||||
sort.Slice(params, func(i, j int) bool {
|
||||
return params[i].Name < params[j].Name
|
||||
})
|
||||
return params
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +1,28 @@
|
||||
package kustomize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/pkg/exec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/pkg/exec"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// TODO: move this into shared test package after resolving import cycle
|
||||
const (
|
||||
// This is a throwaway gitlab test account/repo with a read-only personal access token for the
|
||||
// purposes of testing private git repos
|
||||
PrivateGitRepo = "https://gitlab.com/argo-cd-test/test-apps.git"
|
||||
PrivateGitUsername = "blah"
|
||||
PrivateGitPassword = "B5sBDeoqAVUouoHkrovy"
|
||||
)
|
||||
|
||||
const kustomization1 = "kustomization_yaml"
|
||||
const kustomization2a = "kustomization_yml"
|
||||
const kustomization2b = "Kustomization"
|
||||
@@ -33,7 +43,7 @@ func TestKustomizeBuild(t *testing.T) {
|
||||
appPath, err := testDataDir()
|
||||
assert.Nil(t, err)
|
||||
namePrefix := "namePrefix-"
|
||||
kustomize := NewKustomizeApp(appPath)
|
||||
kustomize := NewKustomizeApp(appPath, nil)
|
||||
kustomizeSource := v1alpha1.ApplicationSourceKustomize{
|
||||
NamePrefix: namePrefix,
|
||||
ImageTags: []v1alpha1.KustomizeImageTag{
|
||||
@@ -98,9 +108,28 @@ func TestGetCommandName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsKustomization(t *testing.T) {
|
||||
|
||||
assert.True(t, IsKustomization("kustomization.yaml"))
|
||||
assert.True(t, IsKustomization("kustomization.yml"))
|
||||
assert.True(t, IsKustomization("Kustomization"))
|
||||
assert.False(t, IsKustomization("rubbish.yml"))
|
||||
}
|
||||
|
||||
// TestPrivateRemoteBase verifies we can supply git credentials to a private remote base
|
||||
func TestPrivateRemoteBase(t *testing.T) {
|
||||
os.Setenv("GIT_CONFIG_NOSYSTEM", "true")
|
||||
defer os.Unsetenv("GIT_CONFIG_NOSYSTEM")
|
||||
|
||||
// add the hack path which has the git-ask-pass.sh shell script
|
||||
osPath := os.Getenv("PATH")
|
||||
hackPath, err := filepath.Abs("../../hack")
|
||||
assert.NoError(t, err)
|
||||
err = os.Setenv("PATH", fmt.Sprintf("%s:%s", osPath, hackPath))
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.Setenv("PATH", osPath) }()
|
||||
|
||||
kust := NewKustomizeApp("./testdata/private-remote-base", &GitCredentials{Username: PrivateGitUsername, Password: PrivateGitPassword})
|
||||
|
||||
objs, _, err := kust.Build(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, objs, 2)
|
||||
}
|
||||
|
||||
7
util/kustomize/testdata/private-remote-base/kustomization.yaml
vendored
Normal file
7
util/kustomize/testdata/private-remote-base/kustomization.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
bases:
|
||||
- https://gitlab.com/argo-cd-test/test-apps//remote-base
|
||||
|
||||
namePrefix: child-
|
||||
@@ -62,11 +62,6 @@ type ClientApp struct {
|
||||
cache *cache.Cache
|
||||
}
|
||||
|
||||
type appState struct {
|
||||
// ReturnURL is the URL in which to redirect a user back to after completing an OAuth2 login
|
||||
ReturnURL string `json:"returnURL"`
|
||||
}
|
||||
|
||||
// NewClientApp will register the Argo CD client app (either via Dex or external OIDC) and return an
|
||||
// object which has HTTP handlers for handling the HTTP responses for login and callback
|
||||
func NewClientApp(settings *settings.ArgoCDSettings, cache *cache.Cache, dexServerAddr string) (*ClientApp, error) {
|
||||
|
||||
@@ -83,13 +83,15 @@ type OIDCConfig struct {
|
||||
Issuer string `json:"issuer,omitempty"`
|
||||
ClientID string `json:"clientID,omitempty"`
|
||||
ClientSecret string `json:"clientSecret,omitempty"`
|
||||
CLIClientID string `json:"cliClientID,omitempty"`
|
||||
}
|
||||
|
||||
type RepoCredentials struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty"`
|
||||
PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty"`
|
||||
SshPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty"`
|
||||
PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty"`
|
||||
SSHPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty"`
|
||||
InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty"`
|
||||
}
|
||||
|
||||
type HelmRepoCredentials struct {
|
||||
@@ -234,7 +236,7 @@ func (mgr *SettingsManager) MigrateLegacyRepoSettings(settings *ArgoCDSettings)
|
||||
}
|
||||
}
|
||||
if sshPrivateKey, ok := s.Data["sshPrivateKey"]; ok && string(sshPrivateKey) != "" {
|
||||
cred.SshPrivateKeySecret = &apiv1.SecretKeySelector{
|
||||
cred.SSHPrivateKeySecret = &apiv1.SecretKeySelector{
|
||||
LocalObjectReference: apiv1.LocalObjectReference{Name: s.Name},
|
||||
Key: "sshPrivateKey",
|
||||
}
|
||||
@@ -818,6 +820,7 @@ func (a *ArgoCDSettings) GetAppInstanceLabelKey() string {
|
||||
func (a *ArgoCDSettings) getExcludedResources() []ExcludedResource {
|
||||
coreExcludedResources := []ExcludedResource{
|
||||
{APIGroups: []string{"events.k8s.io", "metrics.k8s.io"}},
|
||||
{APIGroups: []string{""}, Kinds: []string{"Event"}},
|
||||
}
|
||||
return append(coreExcludedResources, a.ResourceExclusions...)
|
||||
}
|
||||
|
||||
@@ -50,6 +50,15 @@ type CertOptions struct {
|
||||
|
||||
type ConfigCustomizer = func(*tls.Config)
|
||||
|
||||
// BestEffortSystemCertPool returns system cert pool as best effort, otherwise an empty cert pool
|
||||
func BestEffortSystemCertPool() *x509.CertPool {
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
return x509.NewCertPool()
|
||||
}
|
||||
return rootCAs
|
||||
}
|
||||
|
||||
func getTLSVersionByString(version string) (uint16, error) {
|
||||
if version == "" {
|
||||
return 0, nil
|
||||
|
||||
12
util/util.go
12
util/util.go
@@ -33,7 +33,9 @@ type Closer interface {
|
||||
// Close is a convenience function to close a object that has a Close() method, ignoring any errors
|
||||
// Used to satisfy errcheck lint
|
||||
func Close(c Closer) {
|
||||
_ = c.Close()
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close %v: %v", c, err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFile is best effort deletion of a file
|
||||
@@ -83,10 +85,13 @@ func MakeSignature(size int) ([]byte, error) {
|
||||
// RetryUntilSucceed keep retrying given action with specified timeout until action succeed or specified context is done.
|
||||
func RetryUntilSucceed(action func() error, desc string, ctx context.Context, timeout time.Duration) {
|
||||
ctxCompleted := false
|
||||
stop := make(chan bool)
|
||||
defer close(stop)
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ctxCompleted = true
|
||||
case <-stop:
|
||||
}
|
||||
}()
|
||||
for {
|
||||
@@ -130,7 +135,10 @@ func RunAllAsync(count int, action func(i int) error) (err error) {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
err = action(index)
|
||||
actionErr := action(index)
|
||||
if actionErr != nil {
|
||||
err = actionErr
|
||||
}
|
||||
}(i)
|
||||
if err != nil {
|
||||
break
|
||||
|
||||
Reference in New Issue
Block a user