Compare commits

...

17 Commits

Author SHA1 Message Date
Alexander Matyushentsev
1aaf76f230 Update manifests to v1.2.2 2019-09-24 07:53:42 -07:00
Gregor Krmelj
71ad60e89f Add cache-control HTTP header to badge response (#2328)
Since we serve the badge as an image using HTTP GET, cache systems
(incl. GitHub's CDN - Fastly) like to cache the image thus the
badge becomes stale rendering it useless. Adding the appropriate
Cache-Control HTTP header we direct cache systems and web browsers
not to cache the contents of the response.
2019-09-24 07:45:06 -07:00
Gustav Paul
9c46f77bb1 util/localconfig: prefer HOME env var over os/user (#2326)
* util/localconfig: prefer HOME env var over os/user

The os/user package requires that the current user be in /etc/passwd.
That complicates executing the argocd command in a docker container
when the UID:GID of the executing user is overridden.

This is often done in order to have files generated inside a docker
container have their ownership set to match the uid/gid of the host
user.

For example,

```sh
docker run -ti -u "$(id -u "${USER}"):$(id -g "${USER}")" argocd:latest ...
```

* Makefile: use pinned dev image dependencies to run make lint
2019-09-24 07:21:52 -07:00
Alexander Matyushentsev
b6256aae9f Codegen (#2343) 2019-09-23 14:22:49 -07:00
Alexander Matyushentsev
f802190a24 Issue #2339 - Make sure controller uses latest git version if app reconciliation result expired (#2346) 2019-09-23 14:22:36 -07:00
Alexander Matyushentsev
e07c1edece Don't fix imports in auto-generated files (#2342) 2019-09-23 14:22:31 -07:00
Alex Collins
ed15da5271 Adds support for Github Enterprise URLs (#2344) 2019-09-23 13:24:52 -07:00
Jesse Suen
d34dbeac0d Add restart action to Deployment/StatefulSet/DaemonSet (#2300) 2019-09-13 02:57:25 -07:00
Alexander Matyushentsev
c4d3a54126 Merge branch 'release-1.2' of github.com:argoproj/argo-cd into release-1.2 2019-09-12 10:26:23 -07:00
Alex Collins
622671ece4 removed e2e tests that do not work and I should not have merged 2019-09-12 10:08:49 -07:00
Alex Collins
cf6a7abd30 ported FailOnErr from master 2019-09-12 09:56:43 -07:00
Alexander Matyushentsev
a6a394ba93 Update manifests to v1.2.1 2019-09-12 09:56:15 -07:00
Alex Collins
d315814020 Fixes issue diffing secrets (#2271)
# Conflicts:
#	test/e2e/app_management_test.go
2019-09-12 09:55:15 -07:00
Seiya Muramatsu
d46872d7e8 Add --self-heal flag to argocd cli (#2296) 2019-09-12 09:45:14 -07:00
Alexander Matyushentsev
ba7f0fcb47 Issue #2290 - Fix nil pointer dereference in application controller (#2291) 2019-09-12 09:44:59 -07:00
Alexander Matyushentsev
5fcfc22298 Issue #2022 - Support limiting number of concurrent kubectl fork/execs (#2264) 2019-09-12 09:43:35 -07:00
Mitz Amano
9e486dfad4 Fix degraded proxy support for http(s) git repository (#2243) (#2249) 2019-09-05 15:38:45 -07:00
59 changed files with 1257 additions and 268 deletions

View File

@@ -60,10 +60,8 @@ commands:
name: Install Go deps
command: |
set -x
go get github.com/golangci/golangci-lint/cmd/golangci-lint
go get github.com/jstemmer/go-junit-report
go get github.com/mattn/goreman
go get golang.org/x/tools/cmd/goimports
install_tools:
steps:
- run:
@@ -71,7 +69,14 @@ commands:
command: mkdir -p /tmp/dl
- restore_cache:
keys:
- dl-v6
- dl-v7
- run:
name: Install Kubectl v1.14.0
command: |
set -x
[ -e /tmp/dl/kubectl ] || curl -sLf -C - -o /tmp/dl/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
sudo cp /tmp/dl/kubectl /usr/local/bin/kubectl
sudo chmod +x /usr/local/bin/kubectl
- run:
name: Install Kubectx v0.6.3
command: |
@@ -119,7 +124,7 @@ commands:
sudo chmod +x /usr/local/go/bin/kustomize
kustomize version
- save_cache:
key: dl-v6
key: dl-v7
paths:
- /tmp/dl
save_go_cache:
@@ -208,14 +213,13 @@ jobs:
name: Create namespace
command: |
set -x
cat /etc/rancher/k3s/k3s.yaml | sed "s/localhost/`hostname`/" | tee ~/.kube/config
echo "127.0.0.1 `hostname`" | sudo tee -a /etc/hosts
kubectl create ns argocd-e2e
kubens argocd-e2e
# install the certificates (not 100% sure we need this)
sudo cp /var/lib/rancher/k3s/server/tls/token-ca.crt /usr/local/share/ca-certificates/k3s.crt
sudo update-ca-certificates
# create the kubecfg, again - not sure we need this
cat /etc/rancher/k3s/k3s.yaml | sed "s/localhost/`hostname`/" | tee ~/.kube/config
echo "127.0.0.1 `hostname`" | sudo tee -a /etc/hosts
- run:
name: Apply manifests
command: kustomize build test/manifests/base | kubectl apply -f -

9
Gopkg.lock generated
View File

@@ -89,6 +89,14 @@
pruneopts = ""
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:a6ee710e45210bafe11f2f28963571be2ac8809f9a7b675a6d2c02302a1ce1a9"
name = "github.com/bouk/monkey"
packages = ["."]
pruneopts = ""
revision = "5df1f207ff77e025801505ae4d903133a0b4353f"
version = "v1.0.0"
[[projects]]
digest = "1:e04162bd6a6d4950541bae744c968108e14913b1cebccf29f7650b573f44adb3"
name = "github.com/casbin/casbin"
@@ -1589,6 +1597,7 @@
"github.com/argoproj/pkg/errors",
"github.com/argoproj/pkg/exec",
"github.com/argoproj/pkg/time",
"github.com/bouk/monkey",
"github.com/casbin/casbin",
"github.com/casbin/casbin/model",
"github.com/casbin/casbin/persist",

View File

@@ -153,12 +153,16 @@ builder-image:
dep-ensure:
dep ensure -no-vendor
.PHONY: lint
lint:
.PHONY: lint-local
lint-local: build
# golangci-lint does not do a good job of formatting imports
goimports -local github.com/argoproj/argo-cd -w `find . ! -path './vendor/*' ! -path './pkg/client/*' -type f -name '*.go'`
goimports -local github.com/argoproj/argo-cd -w `find . ! -path './vendor/*' ! -path './pkg/client/*' ! -path '*.pb.go' ! -path '*.gw.go' -type f -name '*.go'`
GOGC=$(LINT_GOGC) golangci-lint run --fix --verbose --concurrency $(LINT_CONCURRENCY) --deadline $(LINT_DEADLINE)
.PHONY: lint
lint: dev-tools-image
$(call run-in-dev-tool,make lint-local LINT_CONCURRENCY=$(LINT_CONCURRENCY) LINT_DEADLINE=$(LINT_DEADLINE) LINT_GOGC=$(LINT_GOGC))
.PHONY: build
build:
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`

View File

@@ -1 +1 @@
1.2.0
1.2.2

View File

@@ -46,6 +46,7 @@ func newCommand() *cobra.Command {
logLevel string
glogLevel int
metricsPort int
kubectlParallelismLimit int64
cacheSrc func() (*cache.Cache, error)
)
var command = cobra.Command{
@@ -84,7 +85,8 @@ func newCommand() *cobra.Command {
cache,
resyncDuration,
time.Duration(selfHealTimeoutSeconds)*time.Second,
metricsPort)
metricsPort,
kubectlParallelismLimit)
errors.CheckError(err)
log.Infof("Application Controller (version: %s) starting (namespace: %s)", common.GetVersion(), namespace)
@@ -109,6 +111,7 @@ func newCommand() *cobra.Command {
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", 5, "Specifies timeout between application self heal attempts")
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", 0, "Number of allowed concurrent kubectl fork/execs.")
cacheSrc = cache.AddCacheFlagsToCmd(&command)
return &command

View File

@@ -5,11 +5,8 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
@@ -20,7 +17,6 @@ import (
"time"
"github.com/ghodss/yaml"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -444,6 +440,12 @@ func setAppOptions(flags *pflag.FlagSet, app *argoappv1.Application, appOpts *ap
}
app.Spec.SyncPolicy.Automated.Prune = appOpts.autoPrune
}
if flags.Changed("self-heal") {
if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil {
log.Fatal("Cannot set --self-helf: application not configured with automatic sync")
}
app.Spec.SyncPolicy.Automated.SelfHeal = appOpts.selfHeal
}
return visited
}
@@ -544,6 +546,7 @@ type appOptions struct {
project string
syncPolicy string
autoPrune bool
selfHeal bool
namePrefix string
directoryRecurse bool
configManagementPlugin string
@@ -565,6 +568,7 @@ func addAppFlags(command *cobra.Command, opts *appOptions) {
command.Flags().StringVar(&opts.project, "project", "", "Application project name")
command.Flags().StringVar(&opts.syncPolicy, "sync-policy", "", "Set the sync policy (one of: automated, none)")
command.Flags().BoolVar(&opts.autoPrune, "auto-prune", false, "Set automatic pruning when sync is automated")
command.Flags().BoolVar(&opts.selfHeal, "self-heal", false, "Set self healing when sync is automated")
command.Flags().StringVar(&opts.namePrefix, "nameprefix", "", "Kustomize nameprefix")
command.Flags().BoolVar(&opts.directoryRecurse, "directory-recurse", false, "Recurse directory")
command.Flags().StringVar(&opts.configManagementPlugin, "config-management-plugin", "", "Config management plugin name")
@@ -871,7 +875,8 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
}
foundDiffs = true
printDiff(item.key.Name, target, live)
err = diff.PrintDiff(item.key.Name, target, live)
errors.CheckError(err)
}
}
if foundDiffs {
@@ -886,43 +891,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
return command
}
func printDiff(name string, live *unstructured.Unstructured, target *unstructured.Unstructured) {
tempDir, err := ioutil.TempDir("", "argocd-diff")
errors.CheckError(err)
targetFile := path.Join(tempDir, name)
targetData := []byte("")
if target != nil {
targetData, err = yaml.Marshal(target)
errors.CheckError(err)
}
err = ioutil.WriteFile(targetFile, targetData, 0644)
errors.CheckError(err)
liveFile := path.Join(tempDir, fmt.Sprintf("%s-live.yaml", name))
liveData := []byte("")
if live != nil {
liveData, err = yaml.Marshal(live)
errors.CheckError(err)
}
err = ioutil.WriteFile(liveFile, liveData, 0644)
errors.CheckError(err)
cmdBinary := "diff"
var args []string
if envDiff := os.Getenv("KUBECTL_EXTERNAL_DIFF"); envDiff != "" {
parts, err := shlex.Split(envDiff)
errors.CheckError(err)
cmdBinary = parts[0]
args = parts[1:]
}
cmd := exec.Command(cmdBinary, append(args, liveFile, targetFile)...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
_ = cmd.Run()
}
// NewApplicationDeleteCommand returns a new instance of an `argocd app delete` command
func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (

View File

@@ -12,6 +12,7 @@ import (
"time"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
v1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -85,6 +86,7 @@ type ApplicationController struct {
refreshRequestedApps map[string]CompareWith
refreshRequestedAppsMutex *sync.Mutex
metricsServer *metrics.MetricsServer
kubectlSemaphore *semaphore.Weighted
}
type ApplicationControllerConfig struct {
@@ -103,6 +105,7 @@ func NewApplicationController(
appResyncPeriod time.Duration,
selfHealTimeout time.Duration,
metricsPort int,
kubectlParallelismLimit int64,
) (*ApplicationController, error) {
db := db.NewDB(namespace, settingsMgr, kubeClientset)
kubectlCmd := kube.KubectlCmd{}
@@ -123,7 +126,12 @@ func NewApplicationController(
settingsMgr: settingsMgr,
selfHealTimeout: selfHealTimeout,
}
if kubectlParallelismLimit > 0 {
ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
}
kubectlCmd.OnKubectlRun = ctrl.onKubectlRun
appInformer, appLister := ctrl.newApplicationInformerAndLister()
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{})
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
ctrl.metricsServer = metrics.NewMetricsServer(metricsAddr, appLister, func() error {
@@ -141,6 +149,23 @@ func NewApplicationController(
return &ctrl, nil
}
func (ctrl *ApplicationController) onKubectlRun(command string) (util.Closer, error) {
ctrl.metricsServer.IncKubectlExec(command)
if ctrl.kubectlSemaphore != nil {
if err := ctrl.kubectlSemaphore.Acquire(context.Background(), 1); err != nil {
return nil, err
}
ctrl.metricsServer.IncKubectlExecPending(command)
}
return util.NewCloser(func() error {
if ctrl.kubectlSemaphore != nil {
ctrl.kubectlSemaphore.Release(1)
ctrl.metricsServer.DecKubectlExecPending(command)
}
return nil
}), nil
}
func isSelfReferencedApp(app *appv1.Application, ref v1.ObjectReference) bool {
gvk := ref.GroupVersionKind()
return ref.UID == app.UID &&
@@ -683,7 +708,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
}
var localManifests []string
if opState := app.Status.OperationState; opState != nil {
if opState := app.Status.OperationState; opState != nil && opState.Operation.Sync != nil {
localManifests = opState.Operation.Sync.Manifests
}
@@ -731,9 +756,13 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
compareWith := CompareWithLatest
refreshType := appv1.RefreshTypeNormal
expired := app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
if requestedType, ok := app.IsRefreshRequested(); ok {
refreshType = requestedType
reason = fmt.Sprintf("%s refresh requested", refreshType)
if requestedType, ok := app.IsRefreshRequested(); ok || expired {
if ok {
refreshType = requestedType
reason = fmt.Sprintf("%s refresh requested", refreshType)
} else if expired {
reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
}
} else if requested, level := ctrl.isRefreshRequested(app.Name); requested {
compareWith = level
reason = fmt.Sprintf("controller refresh requested")
@@ -743,8 +772,6 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
reason = "spec.source differs"
} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
reason = "spec.destination differs"
} else if expired {
reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
}
if reason != "" {
logCtx.Infof("Refreshing app status (%s), level (%d)", reason, compareWith)

View File

@@ -83,6 +83,7 @@ func newFakeController(data *fakeData) *ApplicationController {
time.Minute,
time.Minute,
common.DefaultPortArgoCDMetrics,
0,
)
if err != nil {
panic(err)
@@ -524,13 +525,29 @@ func TestNeedRefreshAppStatus(t *testing.T) {
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
// execute hard refresh if app has refresh annotation
app.Annotations = map[string]string{
common.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
{
// refresh app using the 'latest' level if comparison expired
app := app.DeepCopy()
ctrl.requestAppRefresh(app.Name, CompareWithRecent)
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
app.Status.ReconciledAt = &reconciledAt
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
}
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
{
app := app.DeepCopy()
// execute hard refresh if app has refresh annotation
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
app.Status.ReconciledAt = &reconciledAt
app.Annotations = map[string]string{
common.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
}
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
}
}

View File

@@ -18,9 +18,11 @@ import (
type MetricsServer struct {
*http.Server
syncCounter *prometheus.CounterVec
k8sRequestCounter *prometheus.CounterVec
reconcileHistogram *prometheus.HistogramVec
syncCounter *prometheus.CounterVec
k8sRequestCounter *prometheus.CounterVec
kubectlExecCounter *prometheus.CounterVec
kubectlExecPendingGauge *prometheus.GaugeVec
reconcileHistogram *prometheus.HistogramVec
}
const (
@@ -76,6 +78,16 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, health
append(descAppDefaultLabels, "phase"),
)
appRegistry.MustRegister(syncCounter)
kubectlExecCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "argocd_kubectl_exec_total",
Help: "Number of kubectl executions",
}, []string{"command"})
appRegistry.MustRegister(kubectlExecCounter)
kubectlExecPendingGauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "argocd_kubectl_exec_pending",
Help: "Number of pending kubectl executions",
}, []string{"command"})
appRegistry.MustRegister(kubectlExecPendingGauge)
k8sRequestCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "argocd_app_k8s_request_total",
@@ -102,9 +114,11 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, health
Addr: addr,
Handler: mux,
},
syncCounter: syncCounter,
k8sRequestCounter: k8sRequestCounter,
reconcileHistogram: reconcileHistogram,
syncCounter: syncCounter,
k8sRequestCounter: k8sRequestCounter,
reconcileHistogram: reconcileHistogram,
kubectlExecCounter: kubectlExecCounter,
kubectlExecPendingGauge: kubectlExecPendingGauge,
}
}
@@ -126,6 +140,18 @@ func (m *MetricsServer) IncReconcile(app *argoappv1.Application, duration time.D
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject()).Observe(duration.Seconds())
}
func (m *MetricsServer) IncKubectlExec(command string) {
m.kubectlExecCounter.WithLabelValues(command).Inc()
}
func (m *MetricsServer) IncKubectlExecPending(command string) {
m.kubectlExecPendingGauge.WithLabelValues(command).Inc()
}
func (m *MetricsServer) DecKubectlExecPending(command string) {
m.kubectlExecPendingGauge.WithLabelValues(command).Dec()
}
type appCollector struct {
store applister.ApplicationLister
}

View File

@@ -11,6 +11,10 @@ func CheckError(err error) {
}
}
func FailOnErr(_ interface{}, err error) {
// panics if there is an error.
// This returns the first value so you can use it if you cast it:
// text := FailOrErr(Foo)).(string)
func FailOnErr(v interface{}, err error) interface{} {
CheckError(err)
return v
}

View File

@@ -12,7 +12,7 @@ bases:
images:
- name: argoproj/argocd
newName: argoproj/argocd
newTag: v1.2.0
newTag: v1.2.2
- name: argoproj/argocd-ui
newName: argoproj/argocd-ui
newTag: v1.2.0
newTag: v1.2.2

View File

@@ -18,7 +18,7 @@ bases:
images:
- name: argoproj/argocd
newName: argoproj/argocd
newTag: v1.2.0
newTag: v1.2.2
- name: argoproj/argocd-ui
newName: argoproj/argocd-ui
newTag: v1.2.0
newTag: v1.2.2

View File

@@ -2901,7 +2901,7 @@ spec:
- argocd-redis-ha-announce-2:26379
- --sentinelmaster
- argocd
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2955,7 +2955,7 @@ spec:
- cp
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -3010,7 +3010,7 @@ spec:
- argocd-redis-ha-announce-2:26379
- --sentinelmaster
- argocd
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -3084,7 +3084,7 @@ spec:
- argocd-redis-ha-announce-2:26379
- --sentinelmaster
- argocd
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -2816,7 +2816,7 @@ spec:
- argocd-redis-ha-announce-2:26379
- --sentinelmaster
- argocd
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2870,7 +2870,7 @@ spec:
- cp
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -2925,7 +2925,7 @@ spec:
- argocd-redis-ha-announce-2:26379
- --sentinelmaster
- argocd
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -2999,7 +2999,7 @@ spec:
- argocd-redis-ha-announce-2:26379
- --sentinelmaster
- argocd
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -2665,7 +2665,7 @@ spec:
- "20"
- --operation-processors
- "10"
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2719,7 +2719,7 @@ spec:
- cp
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -2782,7 +2782,7 @@ spec:
- argocd-repo-server
- --redis
- argocd-redis:6379
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -2833,7 +2833,7 @@ spec:
- argocd-server
- --staticassets
- /shared/app
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -2580,7 +2580,7 @@ spec:
- "20"
- --operation-processors
- "10"
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2634,7 +2634,7 @@ spec:
- cp
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -2697,7 +2697,7 @@ spec:
- argocd-repo-server
- --redis
- argocd-redis:6379
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -2748,7 +2748,7 @@ spec:
- argocd-server
- --staticassets
- /shared/app
image: argoproj/argocd:v1.2.0
image: argoproj/argocd:v1.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -36,6 +36,7 @@ import (
versionpkg "github.com/argoproj/argo-cd/pkg/apiclient/version"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/localconfig"
oidcutil "github.com/argoproj/argo-cd/util/oidc"
@@ -387,7 +388,7 @@ func (c *client) newConn() (*grpc.ClientConn, io.Closer, error) {
}
conn, e := grpc_util.BlockingDial(context.Background(), network, serverAddr, creds, dialOpts...)
closers = append(closers, conn)
return conn, &inlineCloser{close: func() error {
return conn, util.NewCloser(func() error {
var firstErr error
for i := range closers {
err := closers[i].Close()
@@ -396,7 +397,7 @@ func (c *client) newConn() (*grpc.ClientConn, io.Closer, error) {
}
}
return firstErr
}}, e
}), e
}
func (c *client) tlsConfig() (*tls.Config, error) {

View File

@@ -43,14 +43,6 @@ func (noopCodec) String() string {
return "bytes"
}
type inlineCloser struct {
close func() error
}
func (c *inlineCloser) Close() error {
return c.close()
}
func toFrame(msg []byte) []byte {
frame := append([]byte{0, 0, 0, 0}, msg...)
binary.BigEndian.PutUint32(frame, uint32(len(msg)))
@@ -185,7 +177,7 @@ func (c *client) useGRPCProxy() (net.Addr, io.Closer, error) {
}
c.proxyUsersCount = c.proxyUsersCount + 1
return c.proxyListener.Addr(), &inlineCloser{close: func() error {
return c.proxyListener.Addr(), util.NewCloser(func() error {
c.proxyMutex.Lock()
defer c.proxyMutex.Unlock()
c.proxyUsersCount = c.proxyUsersCount - 1
@@ -196,5 +188,5 @@ func (c *client) useGRPCProxy() (net.Addr, io.Closer, error) {
return nil
}
return nil
}}, nil
}), nil
}

View File

@@ -0,0 +1,4 @@
actionTests:
- action: restart
inputPath: testdata/daemonset.yaml
expectedOutputPath: testdata/daemonset-restarted.yaml

View File

@@ -0,0 +1,3 @@
actions = {}
actions["restart"] = {}
return actions

View File

@@ -0,0 +1,9 @@
local os = require("os")
if obj.spec.template.metadata == nil then
obj.spec.template.metadata = {}
end
if obj.spec.template.metadata.annotations == nil then
obj.spec.template.metadata.annotations = {}
end
obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ")
return obj

View File

@@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: "3"
creationTimestamp: "2019-09-13T08:52:50Z"
generation: 3
labels:
app.kubernetes.io/instance: extensions
name: daemonset
namespace: statefulset
resourceVersion: "7472656"
selfLink: /apis/apps/v1/namespaces/statefulset/daemonsets/daemonset
uid: de04d075-d603-11e9-9e69-42010aa8005f
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
name: daemonset
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z"
labels:
name: daemonset
spec:
containers:
- image: k8s.gcr.io/nginx-slim:0.8
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
status:
currentNumberScheduled: 4
desiredNumberScheduled: 4
numberAvailable: 4
numberMisscheduled: 0
numberReady: 4
observedGeneration: 3
updatedNumberScheduled: 4

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: "3"
creationTimestamp: "2019-09-13T08:52:50Z"
generation: 3
labels:
app.kubernetes.io/instance: extensions
name: daemonset
namespace: statefulset
resourceVersion: "7472656"
selfLink: /apis/apps/v1/namespaces/statefulset/daemonsets/daemonset
uid: de04d075-d603-11e9-9e69-42010aa8005f
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
name: daemonset
template:
metadata:
labels:
name: daemonset
spec:
containers:
- image: k8s.gcr.io/nginx-slim:0.8
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
status:
currentNumberScheduled: 4
desiredNumberScheduled: 4
numberAvailable: 4
numberMisscheduled: 0
numberReady: 4
observedGeneration: 3
updatedNumberScheduled: 4

View File

@@ -0,0 +1,4 @@
actionTests:
- action: restart
inputPath: testdata/deployment.yaml
expectedOutputPath: testdata/deployment-restarted.yaml

View File

@@ -0,0 +1,3 @@
actions = {}
actions["restart"] = {}
return actions

View File

@@ -0,0 +1,9 @@
local os = require("os")
if obj.spec.template.metadata == nil then
obj.spec.template.metadata = {}
end
if obj.spec.template.metadata.annotations == nil then
obj.spec.template.metadata.annotations = {}
end
obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ")
return obj

View File

@@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2019-09-12T01:33:53Z"
generation: 1
name: nginx-deploy
namespace: default
resourceVersion: "6897444"
selfLink: /apis/apps/v1/namespaces/default/deployments/nginx-deploy
uid: 61689d6d-d4fd-11e9-9e69-42010aa8005f
spec:
progressDeadlineSeconds: 600
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: nginx
annotations:
kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z"
spec:
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 2
conditions:
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:33:53Z"
message: Deployment does not have minimum availability.
reason: MinimumReplicasUnavailable
status: "False"
type: Available
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:34:05Z"
message: ReplicaSet "nginx-deploy-9cb4784bd" is progressing.
reason: ReplicaSetUpdated
status: "True"
type: Progressing
observedGeneration: 1
readyReplicas: 2
replicas: 3
unavailableReplicas: 1
updatedReplicas: 3

View File

@@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2019-09-12T01:33:53Z"
generation: 1
name: nginx-deploy
namespace: default
resourceVersion: "6897444"
selfLink: /apis/apps/v1/namespaces/default/deployments/nginx-deploy
uid: 61689d6d-d4fd-11e9-9e69-42010aa8005f
spec:
progressDeadlineSeconds: 600
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 2
conditions:
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:33:53Z"
message: Deployment does not have minimum availability.
reason: MinimumReplicasUnavailable
status: "False"
type: Available
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:34:05Z"
message: ReplicaSet "nginx-deploy-9cb4784bd" is progressing.
reason: ReplicaSetUpdated
status: "True"
type: Progressing
observedGeneration: 1
readyReplicas: 2
replicas: 3
unavailableReplicas: 1
updatedReplicas: 3

View File

@@ -0,0 +1,4 @@
actionTests:
- action: restart
inputPath: testdata/statefulset.yaml
expectedOutputPath: testdata/statefulset-restarted.yaml

View File

@@ -0,0 +1,3 @@
actions = {}
actions["restart"] = {}
return actions

View File

@@ -0,0 +1,9 @@
local os = require("os")
if obj.spec.template.metadata == nil then
obj.spec.template.metadata = {}
end
if obj.spec.template.metadata.annotations == nil then
obj.spec.template.metadata.annotations = {}
end
obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ")
return obj

View File

@@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
creationTimestamp: "2019-09-13T08:52:54Z"
generation: 2
labels:
app.kubernetes.io/instance: extensions
name: statefulset
namespace: statefulset
resourceVersion: "7471813"
selfLink: /apis/apps/v1/namespaces/statefulset/statefulsets/statefulset
uid: dfe8fadf-d603-11e9-9e69-42010aa8005f
spec:
podManagementPolicy: OrderedReady
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: statefulset
serviceName: statefulset
template:
metadata:
labels:
app: statefulset
annotations:
kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z"
spec:
containers:
- image: k8s.gcr.io/nginx-slim:0.8
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
status:
collisionCount: 0
currentReplicas: 3
currentRevision: statefulset-85b7f767c6
observedGeneration: 2
readyReplicas: 3
replicas: 3
updateRevision: statefulset-85b7f767c6
updatedReplicas: 3

View File

@@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
creationTimestamp: "2019-09-13T08:52:54Z"
generation: 2
labels:
app.kubernetes.io/instance: extensions
name: statefulset
namespace: statefulset
resourceVersion: "7471813"
selfLink: /apis/apps/v1/namespaces/statefulset/statefulsets/statefulset
uid: dfe8fadf-d603-11e9-9e69-42010aa8005f
spec:
podManagementPolicy: OrderedReady
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: statefulset
serviceName: statefulset
template:
metadata:
labels:
app: statefulset
spec:
containers:
- image: k8s.gcr.io/nginx-slim:0.8
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
status:
collisionCount: 0
currentReplicas: 3
currentRevision: statefulset-85b7f767c6
observedGeneration: 2
readyReplicas: 3
replicas: 3
updateRevision: statefulset-85b7f767c6
updatedReplicas: 3

View File

@@ -7,13 +7,13 @@ import (
"path/filepath"
"strings"
"testing"
"time"
"github.com/bouk/monkey"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
"github.com/yudai/gojsondiff/formatter"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/errors"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/lua"
@@ -47,14 +47,14 @@ func TestLuaResourceActionsScript(t *testing.T) {
if !strings.Contains(path, "action_test.yaml") {
return nil
}
errors.CheckError(err)
assert.NoError(t, err)
dir := filepath.Dir(path)
//TODO: Change to path
yamlBytes, err := ioutil.ReadFile(dir + "/action_test.yaml")
errors.CheckError(err)
assert.NoError(t, err)
var resourceTest ActionTestStructure
err = yaml.Unmarshal(yamlBytes, &resourceTest)
errors.CheckError(err)
assert.NoError(t, err)
for i := range resourceTest.DiscoveryTests {
test := resourceTest.DiscoveryTests[i]
testName := fmt.Sprintf("discovery/%s", test.InputPath)
@@ -64,9 +64,9 @@ func TestLuaResourceActionsScript(t *testing.T) {
}
obj := getObj(filepath.Join(dir, test.InputPath))
discoveryLua, err := vm.GetResourceActionDiscovery(obj)
errors.CheckError(err)
assert.NoError(t, err)
result, err := vm.ExecuteResourceActionDiscovery(obj, discoveryLua)
errors.CheckError(err)
assert.NoError(t, err)
assert.Equal(t, test.Result, result)
})
}
@@ -75,20 +75,28 @@ func TestLuaResourceActionsScript(t *testing.T) {
testName := fmt.Sprintf("actions/%s/%s", test.Action, test.InputPath)
t.Run(testName, func(t *testing.T) {
vm := lua.VM{
UseOpenLibs: true,
// Uncomment the following line if you need to use lua libraries debugging
// purposes. Otherwise, leave this false to ensure tests reflect the same
// privileges that API server has.
//UseOpenLibs: true,
}
obj := getObj(filepath.Join(dir, test.InputPath))
action, err := vm.GetResourceAction(obj, test.Action)
errors.CheckError(err)
assert.NoError(t, err)
// freeze time so that lua test has predictable time output (will return 0001-01-01T00:00:00Z)
patch := monkey.Patch(time.Now, func() time.Time { return time.Time{} })
result, err := vm.ExecuteResourceAction(obj, action.ActionLua)
errors.CheckError(err)
patch.Unpatch()
assert.NoError(t, err)
expectedObj := getObj(filepath.Join(dir, test.ExpectedOutputPath))
// Ideally, we would use a assert.Equal to detect the difference, but the Lua VM returns a object with float64 instead of the originial int32. As a result, the assert.Equal is never true despite that the change has been applied.
// Ideally, we would use a assert.Equal to detect the difference, but the Lua VM returns a object with float64 instead of the original int32. As a result, the assert.Equal is never true despite that the change has been applied.
diffResult := diff.Diff(expectedObj, result, testNormalizer{})
if diffResult.Modified {
output, err := diffResult.ASCIIFormat(expectedObj, formatter.AsciiFormatterConfig{})
errors.CheckError(err)
assert.Fail(t, "Output does not match input:", output)
t.Error("Output does not match input:")
err = diff.PrintDiff(test.Action, expectedObj, result)
assert.NoError(t, err)
}
})
}

View File

@@ -0,0 +1,4 @@
actionTests:
- action: restart
inputPath: testdata/daemonset.yaml
expectedOutputPath: testdata/daemonset-restarted.yaml

View File

@@ -0,0 +1,3 @@
actions = {}
actions["restart"] = {}
return actions

View File

@@ -0,0 +1,9 @@
local os = require("os")
if obj.spec.template.metadata == nil then
obj.spec.template.metadata = {}
end
if obj.spec.template.metadata.annotations == nil then
obj.spec.template.metadata.annotations = {}
end
obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ")
return obj

View File

@@ -0,0 +1,47 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
annotations:
creationTimestamp: "2019-09-13T08:52:50Z"
generation: 2
labels:
app.kubernetes.io/instance: extensions
name: extensions-daemonset
namespace: statefulset
resourceVersion: "7471358"
selfLink: /apis/extensions/v1beta1/namespaces/statefulset/daemonsets/extensions-daemonset
uid: de09964a-d603-11e9-9e69-42010aa8005f
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
name: extensions-daemonset
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z"
labels:
name: extensions-daemonset
spec:
containers:
- image: k8s.gcr.io/nginx-slim:0.8
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
templateGeneration: 2
updateStrategy:
type: OnDelete
status:
currentNumberScheduled: 4
desiredNumberScheduled: 4
numberAvailable: 4
numberMisscheduled: 0
numberReady: 4
observedGeneration: 2

View File

@@ -0,0 +1,45 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
annotations:
creationTimestamp: "2019-09-13T08:52:50Z"
generation: 2
labels:
app.kubernetes.io/instance: extensions
name: extensions-daemonset
namespace: statefulset
resourceVersion: "7471358"
selfLink: /apis/extensions/v1beta1/namespaces/statefulset/daemonsets/extensions-daemonset
uid: de09964a-d603-11e9-9e69-42010aa8005f
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
name: extensions-daemonset
template:
metadata:
labels:
name: extensions-daemonset
spec:
containers:
- image: k8s.gcr.io/nginx-slim:0.8
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
templateGeneration: 2
updateStrategy:
type: OnDelete
status:
currentNumberScheduled: 4
desiredNumberScheduled: 4
numberAvailable: 4
numberMisscheduled: 0
numberReady: 4
observedGeneration: 2

View File

@@ -0,0 +1,4 @@
actionTests:
- action: restart
inputPath: testdata/deployment.yaml
expectedOutputPath: testdata/deployment-restarted.yaml

View File

@@ -0,0 +1,3 @@
actions = {}
actions["restart"] = {}
return actions

View File

@@ -0,0 +1,9 @@
local os = require("os")
if obj.spec.template.metadata == nil then
obj.spec.template.metadata = {}
end
if obj.spec.template.metadata.annotations == nil then
obj.spec.template.metadata.annotations = {}
end
obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ")
return obj

View File

@@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2019-09-12T01:33:53Z"
generation: 1
name: nginx-deploy
namespace: default
resourceVersion: "6897444"
selfLink: /apis/apps/v1/namespaces/default/deployments/nginx-deploy
uid: 61689d6d-d4fd-11e9-9e69-42010aa8005f
spec:
progressDeadlineSeconds: 600
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: nginx
annotations:
kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z"
spec:
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 2
conditions:
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:33:53Z"
message: Deployment does not have minimum availability.
reason: MinimumReplicasUnavailable
status: "False"
type: Available
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:34:05Z"
message: ReplicaSet "nginx-deploy-9cb4784bd" is progressing.
reason: ReplicaSetUpdated
status: "True"
type: Progressing
observedGeneration: 1
readyReplicas: 2
replicas: 3
unavailableReplicas: 1
updatedReplicas: 3

View File

@@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2019-09-12T01:33:53Z"
generation: 1
name: nginx-deploy
namespace: default
resourceVersion: "6897444"
selfLink: /apis/apps/v1/namespaces/default/deployments/nginx-deploy
uid: 61689d6d-d4fd-11e9-9e69-42010aa8005f
spec:
progressDeadlineSeconds: 600
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 2
conditions:
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:33:53Z"
message: Deployment does not have minimum availability.
reason: MinimumReplicasUnavailable
status: "False"
type: Available
- lastTransitionTime: "2019-09-12T01:33:53Z"
lastUpdateTime: "2019-09-12T01:34:05Z"
message: ReplicaSet "nginx-deploy-9cb4784bd" is progressing.
reason: ReplicaSetUpdated
status: "True"
type: Progressing
observedGeneration: 1
readyReplicas: 2
replicas: 3
unavailableReplicas: 1
updatedReplicas: 3

View File

@@ -121,6 +121,9 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
badge = replaceFirstGroupSubMatch(rightText1Pattern, badge, rightText)
badge = replaceFirstGroupSubMatch(rightText2Pattern, badge, rightText)
w.Header().Set("Content-Type", "image/svg+xml")
//Ask cache's to not cache the contents in order prevent the badge from becoming stale
w.Header().Set("Cache-Control", "private, no-store")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(badge))
}

View File

@@ -54,6 +54,8 @@ func TestHandlerFeatureIsEnabled(t *testing.T) {
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
assert.Equal(t, "private, no-store", rr.Header().Get("Cache-Control"))
response := rr.Body.String()
assert.Equal(t, success, leftPathColorPattern.FindStringSubmatch(response)[1])
assert.Equal(t, success, rightPathColorPattern.FindStringSubmatch(response)[1])
@@ -74,6 +76,8 @@ func TestHandlerFeatureIsDisabled(t *testing.T) {
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
assert.Equal(t, "private, no-store", rr.Header().Get("Cache-Control"))
response := rr.Body.String()
assert.Equal(t, unknown, leftPathColorPattern.FindStringSubmatch(response)[1])
assert.Equal(t, unknown, rightPathColorPattern.FindStringSubmatch(response)[1])

View File

@@ -17,12 +17,12 @@ import (
"k8s.io/apimachinery/pkg/types"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
. "github.com/argoproj/argo-cd/errors"
applicationpkg "github.com/argoproj/argo-cd/pkg/apiclient/application"
repositorypkg "github.com/argoproj/argo-cd/pkg/apiclient/repository"
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/test/e2e/fixture"
. "github.com/argoproj/argo-cd/test/e2e/fixture"
. "github.com/argoproj/argo-cd/test/e2e/fixture/app"
"github.com/argoproj/argo-cd/util"
. "github.com/argoproj/argo-cd/util/argo"
@@ -44,18 +44,18 @@ func TestAppCreation(t *testing.T) {
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
assert.Equal(t, fixture.Name(), app.Name)
assert.Equal(t, fixture.RepoURL(fixture.RepoURLTypeFile), app.Spec.Source.RepoURL)
assert.Equal(t, Name(), app.Name)
assert.Equal(t, RepoURL(RepoURLTypeFile), app.Spec.Source.RepoURL)
assert.Equal(t, guestbookPath, app.Spec.Source.Path)
assert.Equal(t, fixture.DeploymentNamespace(), app.Spec.Destination.Namespace)
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
assert.Equal(t, common.KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
}).
Expect(Event(EventReasonResourceCreated, "create")).
And(func(_ *Application) {
// app should be listed
output, err := fixture.RunCli("app", "list")
output, err := RunCli("app", "list")
assert.NoError(t, err)
assert.Contains(t, output, fixture.Name())
assert.Contains(t, output, Name())
})
}
@@ -83,9 +83,9 @@ func TestAppDeletion(t *testing.T) {
Expect(DoesNotExist()).
Expect(Event(EventReasonResourceDeleted, "delete"))
output, err := fixture.RunCli("app", "list")
output, err := RunCli("app", "list")
assert.NoError(t, err)
assert.NotContains(t, output, fixture.Name())
assert.NotContains(t, output, Name())
}
func TestTrackAppStateAndSyncApp(t *testing.T) {
@@ -95,10 +95,10 @@ func TestTrackAppStateAndSyncApp(t *testing.T) {
Create().
Sync().
Then().
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui OutOfSync Missing", fixture.DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("Service %s guestbook-ui OutOfSync Missing", fixture.DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced Healthy service/guestbook-ui created", fixture.DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced Healthy deployment.apps/guestbook-ui created", fixture.DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui OutOfSync Missing", DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("Service %s guestbook-ui OutOfSync Missing", DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced Healthy service/guestbook-ui created", DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced Healthy deployment.apps/guestbook-ui created", DeploymentNamespace()))).
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(Event(EventReasonResourceUpdated, "sync")).
@@ -134,11 +134,11 @@ func TestAppRollbackSuccessful(t *testing.T) {
patch, _, err := diff.CreateTwoWayMergePatch(app, appWithHistory, &Application{})
assert.NoError(t, err)
app, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.ArgoCDNamespace).Patch(app.Name, types.MergePatchType, patch)
app, err = AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Patch(app.Name, types.MergePatchType, patch)
assert.NoError(t, err)
// sync app and make sure it reaches InSync state
_, err = fixture.RunCli("app", "rollback", app.Name, "1")
_, err = RunCli("app", "rollback", app.Name, "1")
assert.NoError(t, err)
}).
@@ -184,7 +184,7 @@ func TestManipulateApplicationResources(t *testing.T) {
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
manifests, err := fixture.RunCli("app", "manifests", app.Name, "--source", "live")
manifests, err := RunCli("app", "manifests", app.Name, "--source", "live")
assert.NoError(t, err)
resources, err := kube.SplitYAML(manifests)
assert.NoError(t, err)
@@ -201,7 +201,7 @@ func TestManipulateApplicationResources(t *testing.T) {
deployment := resources[index]
closer, client, err := fixture.ArgoCDClientset.NewApplicationClient()
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer util.Close(closer)
@@ -243,7 +243,7 @@ func assetSecretDataHidden(t *testing.T, manifest string) {
}
func TestAppWithSecrets(t *testing.T) {
closer, client, err := fixture.ArgoCDClientset.NewApplicationClient()
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer util.Close(closer)
@@ -255,49 +255,45 @@ func TestAppWithSecrets(t *testing.T) {
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res, err := client.GetResource(context.Background(), &applicationpkg.ApplicationResourceRequest{
res := FailOnErr(client.GetResource(context.Background(), &applicationpkg.ApplicationResourceRequest{
Namespace: app.Spec.Destination.Namespace,
Kind: kube.SecretKind,
Group: "",
Name: &app.Name,
Version: "v1",
ResourceName: "test-secret",
})
assert.NoError(t, err)
})).(*applicationpkg.ApplicationResourceResponse)
assetSecretDataHidden(t, res.Manifest)
diffOutput, err := fixture.RunCli("app", "diff", app.Name)
assert.NoError(t, err)
diffOutput := FailOnErr(RunCli("app", "diff", app.Name)).(string)
assert.Empty(t, diffOutput)
// patch secret and make sure app is out of sync and diff detects the change
_, err = fixture.KubeClientset.CoreV1().Secrets(fixture.DeploymentNamespace()).Patch(
"test-secret", types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/username"}]`))
assert.NoError(t, err)
FailOnErr(KubeClientset.CoreV1().Secrets(DeploymentNamespace()).Patch(
"test-secret", types.JSONPatchType, []byte(`[
{"op": "remove", "path": "/data/username"},
{"op": "add", "path": "/stringData", "value": {"password": "foo"}}
]`)))
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
diffOutput, err := fixture.RunCli("app", "diff", app.Name)
diffOutput, err := RunCli("app", "diff", app.Name)
assert.Error(t, err)
assert.Contains(t, diffOutput, "username: +++++++++")
assert.Contains(t, diffOutput, "username: ++++++++")
assert.Contains(t, diffOutput, "password: ++++++++++++")
// local diff should ignore secrets
diffOutput, err = fixture.RunCli("app", "diff", app.Name, "--local", "testdata/secrets")
assert.NoError(t, err)
diffOutput = FailOnErr(RunCli("app", "diff", app.Name, "--local", "testdata/secrets")).(string)
assert.Empty(t, diffOutput)
// ignore missing field and make sure diff shows no difference
app.Spec.IgnoreDifferences = []ResourceIgnoreDifferences{{
Kind: kube.SecretKind, JSONPointers: []string{"/data/username"},
Kind: kube.SecretKind, JSONPointers: []string{"/data/username", "/data/password"},
}}
_, err = client.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{Name: &app.Name, Spec: app.Spec})
assert.NoError(t, err)
FailOnErr(client.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{Name: &app.Name, Spec: app.Spec}))
}).
When().
Refresh(RefreshTypeNormal).
@@ -305,8 +301,7 @@ func TestAppWithSecrets(t *testing.T) {
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
diffOutput, err := fixture.RunCli("app", "diff", app.Name)
assert.NoError(t, err)
diffOutput := FailOnErr(RunCli("app", "diff", app.Name)).(string)
assert.Empty(t, diffOutput)
})
}
@@ -321,7 +316,7 @@ func TestResourceDiffing(t *testing.T) {
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
// Patch deployment
_, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(
_, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Patch(
"guestbook-ui", types.JSONPatchType, []byte(`[{ "op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "test" }]`))
assert.NoError(t, err)
}).
@@ -330,9 +325,9 @@ func TestResourceDiffing(t *testing.T) {
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
diffOutput, err := fixture.RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
diffOutput, err := RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
assert.Error(t, err)
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", fixture.DeploymentNamespace()))
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", DeploymentNamespace()))
}).
Given().
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {IgnoreDifferences: ` jsonPointers: ["/spec/template/spec/containers/0/image"]`}}).
@@ -341,7 +336,7 @@ func TestResourceDiffing(t *testing.T) {
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
diffOutput, err := fixture.RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
diffOutput, err := RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
assert.NoError(t, err)
assert.Empty(t, diffOutput)
})
@@ -366,7 +361,7 @@ func TestConfigMap(t *testing.T) {
func TestFailedConversion(t *testing.T) {
defer func() {
errors.FailOnErr(fixture.Run("", "kubectl", "delete", "apiservice", "v1beta1.metrics.k8s.io"))
FailOnErr(Run("", "kubectl", "delete", "apiservice", "v1beta1.metrics.k8s.io"))
}()
testEdgeCasesApplicationResources(t, "failed-conversion", HealthStatusProgressing)
@@ -383,7 +378,7 @@ func testEdgeCasesApplicationResources(t *testing.T, appPath string, statusCode
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(statusCode)).
And(func(app *Application) {
diffOutput, err := fixture.RunCli("app", "diff", app.Name, "--local", path.Join("testdata", appPath))
diffOutput, err := RunCli("app", "diff", app.Name, "--local", path.Join("testdata", appPath))
assert.Empty(t, diffOutput)
assert.NoError(t, err)
})
@@ -401,7 +396,7 @@ func TestKsonnetApp(t *testing.T) {
Sync().
Then().
And(func(app *Application) {
closer, client, err := fixture.ArgoCDClientset.NewRepoClient()
closer, client, err := ArgoCDClientset.NewRepoClient()
assert.NoError(t, err)
defer util.Close(closer)
@@ -440,7 +435,7 @@ func TestResourceAction(t *testing.T) {
Then().
And(func(app *Application) {
closer, client, err := fixture.ArgoCDClientset.NewApplicationClient()
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer util.Close(closer)
@@ -449,7 +444,7 @@ func TestResourceAction(t *testing.T) {
Group: "apps",
Kind: "Deployment",
Version: "v1",
Namespace: fixture.DeploymentNamespace(),
Namespace: DeploymentNamespace(),
ResourceName: "guestbook-ui",
})
assert.NoError(t, err)
@@ -459,13 +454,13 @@ func TestResourceAction(t *testing.T) {
Group: "apps",
Kind: "Deployment",
Version: "v1",
Namespace: fixture.DeploymentNamespace(),
Namespace: DeploymentNamespace(),
ResourceName: "guestbook-ui",
Action: "sample",
})
assert.NoError(t, err)
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get("guestbook-ui", metav1.GetOptions{})
deployment, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Get("guestbook-ui", metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, "test", deployment.Labels["sample"])
@@ -480,11 +475,11 @@ func TestSyncResourceByLabel(t *testing.T) {
Sync().
Then().
And(func(app *Application) {
_, _ = fixture.RunCli("app", "sync", app.Name, "--label", fmt.Sprintf("app.kubernetes.io/instance=%s", app.Name))
_, _ = RunCli("app", "sync", app.Name, "--label", fmt.Sprintf("app.kubernetes.io/instance=%s", app.Name))
}).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
_, err := fixture.RunCli("app", "sync", app.Name, "--label", "this-label=does-not-exist")
_, err := RunCli("app", "sync", app.Name, "--label", "this-label=does-not-exist")
assert.Error(t, err)
assert.Contains(t, err.Error(), "level=fatal")
})
@@ -498,7 +493,7 @@ func TestLocalManifestSync(t *testing.T) {
Sync().
Then().
And(func(app *Application) {
res, _ := fixture.RunCli("app", "manifests", app.Name)
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 80")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.2")
}).
@@ -509,7 +504,7 @@ func TestLocalManifestSync(t *testing.T) {
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res, _ := fixture.RunCli("app", "manifests", app.Name)
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 81")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.3")
}).
@@ -520,7 +515,7 @@ func TestLocalManifestSync(t *testing.T) {
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res, _ := fixture.RunCli("app", "manifests", app.Name)
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 80")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.2")
})
@@ -534,10 +529,10 @@ func TestNoLocalSyncWithAutosyncEnabled(t *testing.T) {
Sync().
Then().
And(func(app *Application) {
_, err := fixture.RunCli("app", "set", app.Name, "--sync-policy", "automated")
_, err := RunCli("app", "set", app.Name, "--sync-policy", "automated")
assert.NoError(t, err)
_, err = fixture.RunCli("app", "sync", app.Name, "--local", guestbookPathLocal)
_, err = RunCli("app", "sync", app.Name, "--local", guestbookPathLocal)
assert.Error(t, err)
})
}
@@ -556,44 +551,44 @@ func TestSyncAsync(t *testing.T) {
}
func TestPermissions(t *testing.T) {
fixture.EnsureCleanState(t)
appName := fixture.Name()
_, err := fixture.RunCli("proj", "create", "test")
EnsureCleanState(t)
appName := Name()
_, err := RunCli("proj", "create", "test")
assert.NoError(t, err)
// make sure app cannot be created without permissions in project
_, err = fixture.RunCli("app", "create", appName, "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", common.KubernetesInternalAPIServerAddr, "--dest-namespace", fixture.DeploymentNamespace())
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", common.KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
assert.Error(t, err)
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'test'", fixture.RepoURL(fixture.RepoURLTypeFile))
destinationError := fmt.Sprintf("application destination {%s %s} is not permitted in project 'test'", common.KubernetesInternalAPIServerAddr, fixture.DeploymentNamespace())
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'test'", RepoURL(RepoURLTypeFile))
destinationError := fmt.Sprintf("application destination {%s %s} is not permitted in project 'test'", common.KubernetesInternalAPIServerAddr, DeploymentNamespace())
assert.Contains(t, err.Error(), sourceError)
assert.Contains(t, err.Error(), destinationError)
proj, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Get("test", metav1.GetOptions{})
proj, err := AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Get("test", metav1.GetOptions{})
assert.NoError(t, err)
proj.Spec.Destinations = []ApplicationDestination{{Server: "*", Namespace: "*"}}
proj.Spec.SourceRepos = []string{"*"}
proj, err = fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Update(proj)
proj, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(proj)
assert.NoError(t, err)
// make sure controller report permissions issues in conditions
_, err = fixture.RunCli("app", "create", appName, "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", common.KubernetesInternalAPIServerAddr, "--dest-namespace", fixture.DeploymentNamespace())
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", common.KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
assert.NoError(t, err)
defer func() {
err = fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.ArgoCDNamespace).Delete(appName, &metav1.DeleteOptions{})
err = AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Delete(appName, &metav1.DeleteOptions{})
assert.NoError(t, err)
}()
proj.Spec.Destinations = []ApplicationDestination{}
proj.Spec.SourceRepos = []string{}
_, err = fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Update(proj)
_, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(proj)
assert.NoError(t, err)
time.Sleep(1 * time.Second)
closer, client, err := fixture.ArgoCDClientset.NewApplicationClient()
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer util.Close(closer)
@@ -697,7 +692,7 @@ func TestSelfManagedApps(t *testing.T) {
Given(t).
Path("self-managed-app").
When().
PatchFile("resources.yaml", fmt.Sprintf(`[{"op": "replace", "path": "/spec/source/repoURL", "value": "%s"}]`, fixture.RepoURL(fixture.RepoURLTypeFile))).
PatchFile("resources.yaml", fmt.Sprintf(`[{"op": "replace", "path": "/spec/source/repoURL", "value": "%s"}]`, RepoURL(RepoURLTypeFile))).
Create().
Sync().
Then().
@@ -709,7 +704,7 @@ func TestSelfManagedApps(t *testing.T) {
reconciledCount := 0
var lastReconciledAt *metav1.Time
for event := range fixture.ArgoCDClientset.WatchApplicationWithRetry(ctx, a.Name) {
for event := range ArgoCDClientset.WatchApplicationWithRetry(ctx, a.Name) {
reconciledAt := event.Application.Status.ReconciledAt
if reconciledAt == nil {
reconciledAt = &metav1.Time{}

View File

@@ -2,9 +2,12 @@ import {repoUrl, revisionUrl} from './urls';
function testExample(http: string, ssl: string, revision: string, expectedRepoUrl: string, expectedRevisionUrl: string) {
expect(repoUrl(http)).toBe(expectedRepoUrl);
expect(repoUrl(ssl)).toBe(expectedRepoUrl);
expect(revisionUrl(http, revision)).toBe(expectedRevisionUrl);
expect(revisionUrl(ssl, revision)).toBe(expectedRevisionUrl);
expect(repoUrl(http)).toBe(expectedRepoUrl);
expect(revisionUrl(http, revision)).toBe(expectedRevisionUrl);
expect(revisionUrl(ssl, revision)).toBe(expectedRevisionUrl);
}
test('github.com', () => {
@@ -16,6 +19,15 @@ test('github.com', () => {
'https://github.com/argoproj/argo-cd/commit/024dee09f543ce7bb5af7ca50260504d89dfda94');
});
// for enterprise github installations
test('github.my-enterprise.com', () => {
testExample(
'https://github.my-enterprise.com/my-org/my-repo.git',
'git@github.my-enterprise.com:my-org/my-repo.git',
'a06f2be80a4da89abb8ced904beab75b3ec6db0e',
'https://github.my-enterprise.com/my-org/my-repo',
'https://github.my-enterprise.com/my-org/my-repo/commit/a06f2be80a4da89abb8ced904beab75b3ec6db0e');
});
test('gitlab.com', () => {
testExample(

View File

@@ -1,7 +1,9 @@
import {GitUrl} from 'git-url-parse';
const GitUrlParse = require('git-url-parse');
function supportedSource(source: string): boolean {
return ['github.com', 'gitlab.com', 'bitbucket.org'].indexOf(source) >= 0;
function supportedSource(parsed: GitUrl): boolean {
return parsed.resource.startsWith('github') || ['gitlab.com', 'bitbucket.org'].indexOf(parsed.source) >= 0;
}
function protocol(proto: string): string {
@@ -11,7 +13,7 @@ function protocol(proto: string): string {
export function repoUrl(url: string): string {
const parsed = GitUrlParse(url);
if (!supportedSource(parsed.source)) {
if (!supportedSource(parsed)) {
return null;
}
@@ -22,7 +24,7 @@ export function revisionUrl(url: string, revision: string): string {
const parsed = GitUrlParse(url);
if (!supportedSource(parsed.source)) {
if (!supportedSource(parsed)) {
return null;
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"hash/fnv"
"net/url"
"reflect"
"strings"
"golang.org/x/net/context"
@@ -141,8 +142,12 @@ func (db *db) WatchClusters(ctx context.Context, callback func(*ClusterEvent)) e
next.Type = watch.Modified
cluster = &localCluster
} else if next.Type == watch.Added {
localCls = cluster
next.Type = watch.Modified
if !reflect.DeepEqual(localCls.Config, cluster.Config) {
localCls = cluster
next.Type = watch.Modified
} else {
continue
}
} else {
localCls = cluster
}

View File

@@ -4,8 +4,14 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"reflect"
"github.com/ghodss/yaml"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
"github.com/yudai/gojsondiff"
"github.com/yudai/gojsondiff/formatter"
@@ -388,7 +394,7 @@ func HideSecretData(target *unstructured.Unstructured, live *unstructured.Unstru
for k := range keys {
// we use "+" rather than the more common "*"
nextReplacement := "+++++++++"
nextReplacement := "++++++++"
valToReplacement := make(map[string]string)
for _, obj := range []*unstructured.Unstructured{target, live, orig} {
var data map[string]interface{}
@@ -410,7 +416,7 @@ func HideSecretData(target *unstructured.Unstructured, live *unstructured.Unstru
replacement, ok := valToReplacement[val]
if !ok {
replacement = nextReplacement
nextReplacement = nextReplacement + "+"
nextReplacement = nextReplacement + "++++"
valToReplacement[val] = replacement
}
data[k] = replacement
@@ -477,3 +483,50 @@ func remarshal(obj *unstructured.Unstructured) *unstructured.Unstructured {
unstrBody = jsonutil.RemoveMapFields(obj.Object, unstrBody)
return &unstructured.Unstructured{Object: unstrBody}
}
// PrintDiff prints a diff between two unstructured objects to stdout using an external diff utility
// Honors the diff utility set in the KUBECTL_EXTERNAL_DIFF environment variable
func PrintDiff(name string, live *unstructured.Unstructured, target *unstructured.Unstructured) error {
tempDir, err := ioutil.TempDir("", "argocd-diff")
if err != nil {
return err
}
targetFile := path.Join(tempDir, name)
targetData := []byte("")
if target != nil {
targetData, err = yaml.Marshal(target)
if err != nil {
return err
}
}
err = ioutil.WriteFile(targetFile, targetData, 0644)
if err != nil {
return err
}
liveFile := path.Join(tempDir, fmt.Sprintf("%s-live.yaml", name))
liveData := []byte("")
if live != nil {
liveData, err = yaml.Marshal(live)
if err != nil {
return err
}
}
err = ioutil.WriteFile(liveFile, liveData, 0644)
if err != nil {
return err
}
cmdBinary := "diff"
var args []string
if envDiff := os.Getenv("KUBECTL_EXTERNAL_DIFF"); envDiff != "" {
parts, err := shlex.Split(envDiff)
if err != nil {
return err
}
cmdBinary = parts[0]
args = parts[1:]
}
cmd := exec.Command(cmdBinary, append(args, liveFile, targetFile)...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
return cmd.Run()
}

View File

@@ -533,10 +533,10 @@ func secretData(obj *unstructured.Unstructured) map[string]interface{} {
return data
}
const (
replacement1 = "+++++++++"
replacement2 = "++++++++++"
replacement3 = "+++++++++++"
var (
replacement1 = strings.Repeat("+", 8)
replacement2 = strings.Repeat("+", 12)
replacement3 = strings.Repeat("+", 16)
)
func TestHideSecretDataSameKeysDifferentValues(t *testing.T) {

View File

@@ -94,7 +94,14 @@ func (f *factory) NewClient(rawRepoURL string, path string, creds Creds, insecur
// - Otherwise (and on non-fatal errors), a default HTTP client is returned.
func GetRepoHTTPClient(repoURL string, insecure bool, creds Creds) *http.Client {
// Default HTTP client
var customHTTPClient *http.Client = &http.Client{}
var customHTTPClient *http.Client = &http.Client{
// 15 second timeout
Timeout: 15 * time.Second,
// don't follow redirect
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
// Callback function to return any configured client certificate
// We never return err, but an empty cert instead.
@@ -122,19 +129,11 @@ func GetRepoHTTPClient(repoURL string, insecure bool, creds Creds) *http.Client
}
if insecure {
customHTTPClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
GetClientCertificate: clientCertFunc,
},
},
// 15 second timeout
Timeout: 15 * time.Second,
// don't follow redirect
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
customHTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
GetClientCertificate: clientCertFunc,
},
}
} else {
@@ -147,33 +146,19 @@ func GetRepoHTTPClient(repoURL string, insecure bool, creds Creds) *http.Client
return customHTTPClient
} else if len(serverCertificatePem) > 0 {
certPool := certutil.GetCertPoolFromPEMData(serverCertificatePem)
customHTTPClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: certPool,
GetClientCertificate: clientCertFunc,
},
},
// 15 second timeout
Timeout: 15 * time.Second,
// don't follow redirect
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
customHTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
RootCAs: certPool,
GetClientCertificate: clientCertFunc,
},
}
} else {
// else no custom certificate stored.
customHTTPClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
GetClientCertificate: clientCertFunc,
},
},
// 15 second timeout
Timeout: 15 * time.Second,
// don't follow redirect
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
customHTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
GetClientCertificate: clientCertFunc,
},
}
}

View File

@@ -33,7 +33,9 @@ type Kubectl interface {
GetAPIResources(config *rest.Config, resourceFilter ResourceFilter) ([]APIResourceInfo, error)
}
type KubectlCmd struct{}
type KubectlCmd struct {
OnKubectlRun func(command string) (util.Closer, error)
}
type APIResourceInfo struct {
GroupKind schema.GroupKind
@@ -214,7 +216,7 @@ func (k KubectlCmd) ApplyResource(config *rest.Config, obj *unstructured.Unstruc
return "", err
}
}
outReconcile, err := runKubectl(f.Name(), namespace, []string{"auth", "reconcile"}, manifestBytes, dryRun)
outReconcile, err := k.runKubectl(f.Name(), namespace, []string{"auth", "reconcile"}, manifestBytes, dryRun)
if err != nil {
return "", err
}
@@ -231,7 +233,7 @@ func (k KubectlCmd) ApplyResource(config *rest.Config, obj *unstructured.Unstruc
if !validate {
applyArgs = append(applyArgs, "--validate=false")
}
outApply, err := runKubectl(f.Name(), namespace, applyArgs, manifestBytes, dryRun)
outApply, err := k.runKubectl(f.Name(), namespace, applyArgs, manifestBytes, dryRun)
if err != nil {
return "", err
}
@@ -251,7 +253,27 @@ func convertKubectlError(err error) error {
return fmt.Errorf(errorStr)
}
func runKubectl(kubeconfigPath string, namespace string, args []string, manifestBytes []byte, dryRun bool) (string, error) {
func (k *KubectlCmd) processKubectlRun(args []string) (util.Closer, error) {
if k.OnKubectlRun != nil {
cmd := "unknown"
if len(args) > 0 {
cmd = args[0]
}
return k.OnKubectlRun(cmd)
}
return util.NewCloser(func() error {
return nil
// do nothing
}), nil
}
func (k *KubectlCmd) runKubectl(kubeconfigPath string, namespace string, args []string, manifestBytes []byte, dryRun bool) (string, error) {
closer, err := k.processKubectlRun(args)
if err != nil {
return "", err
}
defer util.Close(closer)
cmdArgs := append([]string{"--kubeconfig", kubeconfigPath, "-f", "-"}, args...)
if namespace != "" {
cmdArgs = append(cmdArgs, "-n", namespace)
@@ -304,6 +326,13 @@ func (k KubectlCmd) ConvertToVersion(obj *unstructured.Unstructured, group strin
return nil, err
}
defer util.DeleteFile(f.Name())
closer, err := k.processKubectlRun([]string{"convert"})
if err != nil {
return nil, err
}
defer util.Close(closer)
outputVersion := fmt.Sprintf("%s/%s", group, version)
cmd := exec.Command("kubectl", "convert", "--output-version", outputVersion, "-o", "json", "--local=true", "-f", f.Name())
cmd.Stdin = bytes.NewReader(manifestBytes)

View File

@@ -2,34 +2,28 @@ package kube
import (
"io/ioutil"
"os"
"testing"
"github.com/argoproj/argo-cd/util"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func TestConvertToVersion(t *testing.T) {
/*
ctl_test.go:22:
Error Trace: ctl_test.go:22
Error: Expected nil, but got: &errors.errorString{s:"failed to convert Deployment/nginx-deployment to apps/v1"}
Test: TestConvertToVersion
panic: runtime error: invalid memory address or nil pointer dereference
/home/circleci/sdk/go1.11.4/src/testing/testing.go:792 +0x387
/home/circleci/sdk/go1.11.4/src/runtime/panic.go:513 +0x1b9
/home/circleci/.go_workspace/src/github.com/argoproj/argo-cd/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go:200 +0x3a
/home/circleci/.go_workspace/src/github.com/argoproj/argo-cd/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go:396 +0x5b
/home/circleci/.go_workspace/src/github.com/argoproj/argo-cd/util/kube/ctl_test.go:23 +0x1e4
/home/circleci/sdk/go1.11.4/src/testing/testing.go:827 +0xbf
/home/circleci/sdk/go1.11.4/src/testing/testing.go:878 +0x35c
*/
if os.Getenv("CIRCLECI") == "true" {
t.SkipNow()
callbackExecuted := false
closerExecuted := false
kubectl := KubectlCmd{
func(command string) (util.Closer, error) {
callbackExecuted = true
return util.NewCloser(func() error {
closerExecuted = true
return nil
}), nil
},
}
kubectl := KubectlCmd{}
yamlBytes, err := ioutil.ReadFile("testdata/nginx.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
@@ -42,6 +36,8 @@ func TestConvertToVersion(t *testing.T) {
gvk := newObj.GroupVersionKind()
assert.Equal(t, "apps", gvk.Group)
assert.Equal(t, "v1", gvk.Version)
assert.True(t, callbackExecuted)
assert.True(t, closerExecuted)
// converting it again should not have any affect
newObj, err = kubectl.ConvertToVersion(&obj, "apps", "v1")
@@ -50,3 +46,21 @@ func TestConvertToVersion(t *testing.T) {
assert.Equal(t, "apps", gvk.Group)
assert.Equal(t, "v1", gvk.Version)
}
func TestRunKubectl(t *testing.T) {
callbackExecuted := false
closerExecuted := false
kubectl := KubectlCmd{
func(command string) (util.Closer, error) {
callbackExecuted = true
return util.NewCloser(func() error {
closerExecuted = true
return nil
}), nil
},
}
_, _ = kubectl.runKubectl("/dev/null", "default", []string{"command-name"}, nil, false)
assert.True(t, callbackExecuted)
assert.True(t, closerExecuted)
}

View File

@@ -234,11 +234,15 @@ func (l *LocalConfig) IsEmpty() bool {
// DefaultConfigDir returns the local configuration path for settings such as cached authentication tokens.
func DefaultConfigDir() (string, error) {
usr, err := user.Current()
if err != nil {
return "", err
homeDir := os.Getenv("HOME")
if homeDir == "" {
usr, err := user.Current()
if err != nil {
return "", err
}
homeDir = usr.HomeDir
}
return path.Join(usr.HomeDir, ".argocd"), nil
return path.Join(homeDir, ".argocd"), nil
}
// DefaultLocalConfigPath returns the local configuration path for settings such as cached authentication tokens.

View File

@@ -45,7 +45,7 @@ func (vm VM) runLua(obj *unstructured.Unstructured, script string) (*lua.LState,
SkipOpenLibs: !vm.UseOpenLibs,
})
defer l.Close()
// Opens table library to allow access to functions to manulate tables
// Opens table library to allow access to functions to manipulate tables
for _, pair := range []struct {
n string
f lua.LGFunction
@@ -53,6 +53,8 @@ func (vm VM) runLua(obj *unstructured.Unstructured, script string) (*lua.LState,
{lua.LoadLibName, lua.OpenPackage},
{lua.BaseLibName, lua.OpenBase},
{lua.TabLibName, lua.OpenTable},
// load our 'safe' version of the os library
{lua.OsLibName, OpenSafeOs},
} {
if err := l.CallByParam(lua.P{
Fn: l.NewFunction(pair.f),
@@ -62,6 +64,8 @@ func (vm VM) runLua(obj *unstructured.Unstructured, script string) (*lua.LState,
panic(err)
}
}
// preload our 'safe' version of the os library. Allows the 'local os = require("os")' to work
l.PreloadModule(lua.OsLibName, SafeOsLoader)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()

184
util/lua/oslib_safe.go Normal file
View File

@@ -0,0 +1,184 @@
package lua
// oslib_safe contains a subset of the lua os library. For security reasons, we do not expose
// the entirety of lua os library to custom actions, such as ones which can exit, read files, etc.
// Only the safe functions like os.time(), os.date() are exposed. Implementation was copied from
// github.com/yuin/gopher-lua.
import (
"fmt"
"strings"
"time"
lua "github.com/yuin/gopher-lua"
)
func OpenSafeOs(L *lua.LState) int {
tabmod := L.RegisterModule(lua.TabLibName, osFuncs)
L.Push(tabmod)
return 1
}
func SafeOsLoader(L *lua.LState) int {
mod := L.SetFuncs(L.NewTable(), osFuncs)
L.Push(mod)
return 1
}
var osFuncs = map[string]lua.LGFunction{
"time": osTime,
"date": osDate,
}
func osTime(L *lua.LState) int {
if L.GetTop() == 0 {
L.Push(lua.LNumber(time.Now().Unix()))
} else {
tbl := L.CheckTable(1)
sec := getIntField(tbl, "sec", 0)
min := getIntField(tbl, "min", 0)
hour := getIntField(tbl, "hour", 12)
day := getIntField(tbl, "day", -1)
month := getIntField(tbl, "month", -1)
year := getIntField(tbl, "year", -1)
isdst := getBoolField(tbl, "isdst", false)
t := time.Date(year, time.Month(month), day, hour, min, sec, 0, time.Local)
// TODO dst
if false {
print(isdst)
}
L.Push(lua.LNumber(t.Unix()))
}
return 1
}
func getIntField(tb *lua.LTable, key string, v int) int {
ret := tb.RawGetString(key)
if ln, ok := ret.(lua.LNumber); ok {
return int(ln)
}
return v
}
func getBoolField(tb *lua.LTable, key string, v bool) bool {
ret := tb.RawGetString(key)
if lb, ok := ret.(lua.LBool); ok {
return bool(lb)
}
return v
}
func osDate(L *lua.LState) int {
t := time.Now()
cfmt := "%c"
if L.GetTop() >= 1 {
cfmt = L.CheckString(1)
if strings.HasPrefix(cfmt, "!") {
t = time.Now().UTC()
cfmt = strings.TrimLeft(cfmt, "!")
}
if L.GetTop() >= 2 {
t = time.Unix(L.CheckInt64(2), 0)
}
if strings.HasPrefix(cfmt, "*t") {
ret := L.NewTable()
ret.RawSetString("year", lua.LNumber(t.Year()))
ret.RawSetString("month", lua.LNumber(t.Month()))
ret.RawSetString("day", lua.LNumber(t.Day()))
ret.RawSetString("hour", lua.LNumber(t.Hour()))
ret.RawSetString("min", lua.LNumber(t.Minute()))
ret.RawSetString("sec", lua.LNumber(t.Second()))
ret.RawSetString("wday", lua.LNumber(t.Weekday()+1))
// TODO yday & dst
ret.RawSetString("yday", lua.LNumber(0))
ret.RawSetString("isdst", lua.LFalse)
L.Push(ret)
return 1
}
}
L.Push(lua.LString(strftime(t, cfmt)))
return 1
}
var cDateFlagToGo = map[byte]string{
'a': "mon", 'A': "Monday", 'b': "Jan", 'B': "January", 'c': "02 Jan 06 15:04 MST", 'd': "02",
'F': "2006-01-02", 'H': "15", 'I': "03", 'm': "01", 'M': "04", 'p': "PM", 'P': "pm", 'S': "05",
'x': "15/04/05", 'X': "15:04:05", 'y': "06", 'Y': "2006", 'z': "-0700", 'Z': "MST"}
func strftime(t time.Time, cfmt string) string {
sc := newFlagScanner('%', "", "", cfmt)
for c, eos := sc.Next(); !eos; c, eos = sc.Next() {
if !sc.ChangeFlag {
if sc.HasFlag {
if v, ok := cDateFlagToGo[c]; ok {
sc.AppendString(t.Format(v))
} else {
switch c {
case 'w':
sc.AppendString(fmt.Sprint(int(t.Weekday())))
default:
sc.AppendChar('%')
sc.AppendChar(c)
}
}
sc.HasFlag = false
} else {
sc.AppendChar(c)
}
}
}
return sc.String()
}
type flagScanner struct {
flag byte
start string
end string
buf []byte
str string
Length int
Pos int
HasFlag bool
ChangeFlag bool
}
func newFlagScanner(flag byte, start, end, str string) *flagScanner {
return &flagScanner{flag, start, end, make([]byte, 0, len(str)), str, len(str), 0, false, false}
}
func (fs *flagScanner) AppendString(str string) { fs.buf = append(fs.buf, str...) }
func (fs *flagScanner) AppendChar(ch byte) { fs.buf = append(fs.buf, ch) }
func (fs *flagScanner) String() string { return string(fs.buf) }
func (fs *flagScanner) Next() (byte, bool) {
c := byte('\000')
fs.ChangeFlag = false
if fs.Pos == fs.Length {
if fs.HasFlag {
fs.AppendString(fs.end)
}
return c, true
} else {
c = fs.str[fs.Pos]
if c == fs.flag {
if fs.Pos < (fs.Length-1) && fs.str[fs.Pos+1] == fs.flag {
fs.HasFlag = false
fs.AppendChar(fs.flag)
fs.Pos += 2
return fs.Next()
} else if fs.Pos != fs.Length-1 {
if fs.HasFlag {
fs.AppendString(fs.end)
}
fs.AppendString(fs.start)
fs.ChangeFlag = true
fs.HasFlag = true
}
}
}
fs.Pos++
return c, false
}

View File

@@ -30,6 +30,18 @@ type Closer interface {
Close() error
}
type inlineCloser struct {
close func() error
}
func (c *inlineCloser) Close() error {
return c.close()
}
func NewCloser(close func() error) Closer {
return &inlineCloser{close: close}
}
// Close is a convenience function to close a object that has a Close() method, ignoring any errors
// Used to satisfy errcheck lint
func Close(c Closer) {