Compare commits

...

23 Commits

Author SHA1 Message Date
Alexander Matyushentsev
c2c19f42ad Update manifests to v1.5.2 2020-04-15 09:20:36 -07:00
Alexander Matyushentsev
8a3b36bd28 Update manifests to v1.5.1 2020-04-06 08:53:21 -07:00
Alexander Matyushentsev
35a7350b74 fix: return 401 error code if username does not exist (#3369) 2020-04-06 08:19:49 -07:00
jannfis
241e6d0a6e fix: Do not panic while running hooks with short revision (#3368) 2020-04-06 08:19:45 -07:00
jannfis
dbd6b406ac chore: Keep autogenerated message in upstream.yaml for HA manifests (#3367)
* fix: Keep autogenerated message in upstream.yaml
2020-04-06 08:19:40 -07:00
jannfis
a069639135 fix: Increase HAProxy check interval to prevent intermittent failures (#3356)
* Increase HAProxy check interval to prevent intermittent failures/state flapping

* Restore original namespace
2020-04-06 08:19:33 -07:00
May Zhang
76241f9d3b fix: Helm v3 CRD are not deployed (#3345)
* Fixing could not find plugin issue when app sync and app diff

* Fixing codegen error

* Revert "Fixing codegen error"

This reverts commit b2dcfb81

* Fixing codegen error

* If user is logged in, settings API would return ConfigManagementPlugins

* For helm3, add flag --include-crds when calling helm template to support helm3 crd

* Fixing typo.

* Added further assertion of ResourceSyncStatusIs for CRD resources.
2020-04-06 08:19:22 -07:00
Alexander Matyushentsev
bdda410463 Update manifests to v1.5.0 2020-04-02 09:34:43 -07:00
dthomson25
2faa5c89d1 Add V0.8 changes to Rollouts healthcheck (#3331) 2020-04-02 09:01:19 -07:00
Alexander Matyushentsev
a12b7bdb74 fix: argocd fails to connect clusters with IAM authentication configuration (#3325) 2020-03-31 17:50:09 -07:00
Alexander Matyushentsev
9b21c25783 Update manifests to v1.5.0-rc3 2020-03-30 15:00:06 -07:00
Alexander Matyushentsev
e1deca2a9e fix: avoid nil pointer dereference in badge handler (#3316) 2020-03-30 14:30:38 -07:00
Alexander Matyushentsev
62621428b1 fix: pass APIVersions value to manifest generation request during app validation and during app manifests loading (#3312)
* fix: pass APIVersions value to manifest generation request during app validation and during app manifests loading
2020-03-30 13:38:15 -07:00
Shuwei Hao
ab1f9e4658 fix: update help info about argcd account can-i (#3310)
Signed-off-by: Shuwei Hao <haoshuwei24@gmail.com>
2020-03-30 13:23:30 -07:00
jannfis
36d1b42d5c Fix possible panic when generating Dex config from malformed YAML (#3303) 2020-03-30 13:23:26 -07:00
Alexander Matyushentsev
8b9d25f6e3 fix: SSO user unable to change local account password (#3297) (#3298)
* fix: SSO user unable to change local account password (#3297)

* apply code review notes
2020-03-30 13:23:22 -07:00
Alexander Matyushentsev
323af4d562 fix: use pagination while loading initial cluster state to avoid memory spikes (#3299) 2020-03-30 13:23:19 -07:00
Alexander Matyushentsev
a946b70b5e fix: fix Cannot read property 'length' of undefined error (#3296) 2020-03-30 13:23:15 -07:00
Alexander Matyushentsev
f9f1bdaabe Update manifests to v1.5.0-rc2 2020-03-25 22:12:54 -07:00
Alexander Matyushentsev
e66b6109f7 fix: implement workaround for helm/helm#6870 bug (#3290)
* fix: implement workaround for  helm/helm#6870 bug

* Update app_management_test.go
2020-03-25 22:11:46 -07:00
Jesse Suen
53897e5019 improvement: remove app name and project labels from reconcliation histogram to reduce cardinality (#3271) 2020-03-25 12:42:23 -07:00
Alexander Matyushentsev
7e0d8a490c fix: increase max connections count to support clusters with very large number of CRDs (#3278) 2020-03-25 10:03:40 -07:00
Alexander Matyushentsev
3684a10332 Update manifests to v1.5.0-rc1 2020-03-20 13:48:19 -07:00
47 changed files with 789 additions and 195 deletions

View File

@@ -1 +1 @@
1.5.0
1.5.2

View File

@@ -187,7 +187,7 @@
"ApplicationService"
],
"summary": "List returns list of applications",
"operationId": "ListMixin8",
"operationId": "List",
"parameters": [
{
"type": "string",
@@ -237,7 +237,7 @@
"ApplicationService"
],
"summary": "Create creates an application",
"operationId": "CreateMixin8",
"operationId": "Create",
"parameters": [
{
"name": "body",
@@ -264,7 +264,7 @@
"ApplicationService"
],
"summary": "Update updates an application",
"operationId": "UpdateMixin8",
"operationId": "Update",
"parameters": [
{
"type": "string",
@@ -395,7 +395,7 @@
"ApplicationService"
],
"summary": "Get returns an application by name",
"operationId": "GetMixin8",
"operationId": "Get",
"parameters": [
{
"type": "string",
@@ -445,7 +445,7 @@
"ApplicationService"
],
"summary": "Delete deletes an application",
"operationId": "DeleteMixin8",
"operationId": "Delete",
"parameters": [
{
"type": "string",
@@ -1084,7 +1084,7 @@
"ClusterService"
],
"summary": "List returns list of clusters",
"operationId": "List",
"operationId": "ListMixin4",
"parameters": [
{
"type": "string",
@@ -1106,7 +1106,7 @@
"ClusterService"
],
"summary": "Create creates a cluster",
"operationId": "Create",
"operationId": "CreateMixin4",
"parameters": [
{
"name": "body",
@@ -1133,7 +1133,7 @@
"ClusterService"
],
"summary": "Update updates a cluster",
"operationId": "Update",
"operationId": "UpdateMixin4",
"parameters": [
{
"type": "string",
@@ -1166,7 +1166,7 @@
"ClusterService"
],
"summary": "Get returns a cluster by server address",
"operationId": "GetMixin2",
"operationId": "GetMixin4",
"parameters": [
{
"type": "string",
@@ -1189,7 +1189,7 @@
"ClusterService"
],
"summary": "Delete deletes a cluster",
"operationId": "Delete",
"operationId": "DeleteMixin4",
"parameters": [
{
"type": "string",
@@ -1239,7 +1239,7 @@
"ProjectService"
],
"summary": "List returns list of projects",
"operationId": "ListMixin6",
"operationId": "ListMixin5",
"parameters": [
{
"type": "string",
@@ -1261,7 +1261,7 @@
"ProjectService"
],
"summary": "Create a new project.",
"operationId": "CreateMixin6",
"operationId": "CreateMixin5",
"parameters": [
{
"name": "body",
@@ -1288,7 +1288,7 @@
"ProjectService"
],
"summary": "Get returns a project by name",
"operationId": "GetMixin6",
"operationId": "GetMixin5",
"parameters": [
{
"type": "string",
@@ -1311,7 +1311,7 @@
"ProjectService"
],
"summary": "Delete deletes a project",
"operationId": "DeleteMixin6",
"operationId": "DeleteMixin5",
"parameters": [
{
"type": "string",
@@ -1386,7 +1386,7 @@
"ProjectService"
],
"summary": "Update updates a project",
"operationId": "UpdateMixin6",
"operationId": "UpdateMixin5",
"parameters": [
{
"type": "string",
@@ -1905,7 +1905,7 @@
"SettingsService"
],
"summary": "Get returns Argo CD settings",
"operationId": "Get",
"operationId": "GetMixin7",
"responses": {
"200": {
"description": "(empty)",

View File

@@ -20,6 +20,7 @@ import (
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver/apiclient"
appstatecache "github.com/argoproj/argo-cd/util/cache/appstate"
@@ -59,8 +60,7 @@ func newCommand() *cobra.Command {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
kubeClient := kubernetes.NewForConfigOrDie(config)
appClient := appclientset.NewForConfigOrDie(config)

View File

@@ -11,6 +11,7 @@ import (
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/server"
@@ -48,8 +49,7 @@ func NewCommand() *cobra.Command {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)

View File

@@ -24,6 +24,7 @@ import (
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/localconfig"
sessionutil "github.com/argoproj/argo-cd/util/session"
)
func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
@@ -59,14 +60,20 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
c.HelpFunc()(c, args)
os.Exit(1)
}
acdClient := argocdclient.NewClientOrDie(clientOpts)
conn, usrIf := acdClient.NewAccountClientOrDie()
defer util.Close(conn)
if currentPassword == "" {
userInfo := getCurrentAccount(acdClient)
if userInfo.Iss == sessionutil.SessionManagerClaimsIssuer && currentPassword == "" {
fmt.Print("*** Enter current password: ")
password, err := terminal.ReadPassword(int(os.Stdin.Fd()))
errors.CheckError(err)
currentPassword = string(password)
fmt.Print("\n")
}
if newPassword == "" {
var err error
newPassword, err = cli.ReadAndConfirmPassword()
@@ -79,16 +86,12 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
Name: account,
}
acdClient := argocdclient.NewClientOrDie(clientOpts)
conn, usrIf := acdClient.NewAccountClientOrDie()
defer util.Close(conn)
ctx := context.Background()
_, err := usrIf.UpdatePassword(ctx, &updatePasswordRequest)
errors.CheckError(err)
fmt.Printf("Password updated\n")
if account == "" || account == getCurrentAccount(acdClient) {
if account == "" || account == userInfo.Username {
// Get a new JWT token after updating the password
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
@@ -171,7 +174,7 @@ argocd account can-i sync applications '*'
argocd account can-i update projects 'default'
# Can I create a cluster?
argocd account can-i create cluster '*'
argocd account can-i create clusters '*'
Actions: %v
Resources: %v
@@ -246,12 +249,12 @@ func NewAccountListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
return cmd
}
func getCurrentAccount(clientset argocdclient.Client) string {
func getCurrentAccount(clientset argocdclient.Client) session.GetUserInfoResponse {
conn, client := clientset.NewSessionClientOrDie()
defer util.Close(conn)
userInfo, err := client.GetUserInfo(context.Background(), &session.GetUserInfoRequest{})
errors.CheckError(err)
return userInfo.Username
return *userInfo
}
func NewAccountGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
@@ -271,7 +274,7 @@ argocd account get --account <account-name>`,
clientset := argocdclient.NewClientOrDie(clientOpts)
if account == "" {
account = getCurrentAccount(clientset)
account = getCurrentAccount(clientset).Username
}
conn, client := clientset.NewAccountClientOrDie()
@@ -343,7 +346,7 @@ argocd account generate-token --account <account-name>`,
conn, client := clientset.NewAccountClientOrDie()
defer util.Close(conn)
if account == "" {
account = getCurrentAccount(clientset)
account = getCurrentAccount(clientset).Username
}
expiresIn, err := timeutil.ParseDuration(expiresIn)
errors.CheckError(err)
@@ -383,7 +386,7 @@ argocd account generate-token --account <account-name>`,
conn, client := clientset.NewAccountClientOrDie()
defer util.Close(conn)
if account == "" {
account = getCurrentAccount(clientset)
account = getCurrentAccount(clientset).Username
}
_, err := client.DeleteToken(context.Background(), &accountpkg.DeleteTokenRequest{Name: account, Id: id})
errors.CheckError(err)

View File

@@ -3,6 +3,7 @@ package common
import (
"os"
"strconv"
"time"
)
// Default service addresses and URLS of Argo CD internal services
@@ -65,6 +66,8 @@ const (
AuthCookieName = "argocd.token"
// RevisionHistoryLimit is the max number of successful sync to keep in history
RevisionHistoryLimit = 10
// ChangePasswordSSOTokenMaxAge is the max token age for password change operation
ChangePasswordSSOTokenMaxAge = time.Minute * 5
)
// Dex related constants
@@ -141,6 +144,8 @@ const (
EnvK8sClientQPS = "ARGOCD_K8S_CLIENT_QPS"
// EnvK8sClientBurst is the burst value used for the kubernetes client (default: twice the client QPS)
EnvK8sClientBurst = "ARGOCD_K8S_CLIENT_BURST"
// EnvK8sClientMaxIdleConnections is the number of max idle connections in K8s REST client HTTP transport (default: 500)
EnvK8sClientMaxIdleConnections = "ARGOCD_K8S_CLIENT_MAX_IDLE_CONNECTIONS"
)
const (
@@ -158,6 +163,8 @@ var (
K8sClientConfigQPS float32 = 50
// K8sClientConfigBurst controls the burst to be used in K8s REST client configs
K8sClientConfigBurst int = 100
// K8sMaxIdleConnections controls the number of max idle connections in K8s REST client HTTP transport
K8sMaxIdleConnections = 500
)
func init() {
@@ -173,4 +180,10 @@ func init() {
} else {
K8sClientConfigBurst = 2 * int(K8sClientConfigQPS)
}
if envMaxConn := os.Getenv(EnvK8sClientMaxIdleConnections); envMaxConn != "" {
if maxConn, err := strconv.Atoi(envMaxConn); err != nil {
K8sMaxIdleConnections = maxConn
}
}
}

View File

@@ -23,6 +23,8 @@ import (
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/health"
"github.com/argoproj/argo-cd/util/kube"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/pager"
)
const (
@@ -263,11 +265,26 @@ func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo,
err = runSynced(c.lock, func() error {
if info.resourceVersion == "" {
list, err := resClient.List(metav1.ListOptions{})
listPager := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
res, err := resClient.List(opts)
if err == nil {
info.resourceVersion = res.GetResourceVersion()
}
return res, err
})
var items []unstructured.Unstructured
err = listPager.EachListItem(ctx, metav1.ListOptions{}, func(obj runtime.Object) error {
if un, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("object %s/%s has an unexpected type", un.GroupVersionKind().String(), un.GetName())
} else {
items = append(items, *un)
}
return nil
})
if err != nil {
return err
return fmt.Errorf("failed to load initial state of resource %s: %v", api.GroupKind.String(), err)
}
c.replaceResourceCache(api.GroupKind, list.GetResourceVersion(), list.Items, ns)
c.replaceResourceCache(api.GroupKind, info.resourceVersion, items, ns)
}
return nil
})
@@ -354,6 +371,7 @@ func (c *clusterInfo) sync() (err error) {
}
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
c.nodes = make(map[kube.ResourceKey]*node)
c.namespacedResources = make(map[schema.GroupKind]bool)
config := c.cluster.RESTConfig()
version, err := c.kubectl.GetServerVersion(config)
if err != nil {
@@ -376,25 +394,47 @@ func (c *clusterInfo) sync() (err error) {
}
lock := sync.Mutex{}
err = util.RunAllAsync(len(apis), func(i int) error {
return c.processApi(client, apis[i], func(resClient dynamic.ResourceInterface, _ string) error {
list, err := resClient.List(metav1.ListOptions{})
api := apis[i]
lock.Lock()
ctx, cancel := context.WithCancel(context.Background())
info := &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel}
c.apisMeta[api.GroupKind] = info
c.namespacedResources[api.GroupKind] = api.Meta.Namespaced
lock.Unlock()
return c.processApi(client, api, func(resClient dynamic.ResourceInterface, ns string) error {
listPager := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
res, err := resClient.List(opts)
if err == nil {
lock.Lock()
info.resourceVersion = res.GetResourceVersion()
lock.Unlock()
}
return res, err
})
err = listPager.EachListItem(context.Background(), metav1.ListOptions{}, func(obj runtime.Object) error {
if un, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("object %s/%s has an unexpected type", un.GroupVersionKind().String(), un.GetName())
} else {
lock.Lock()
c.setNode(c.createObjInfo(un, c.cacheSettingsSrc().AppInstanceLabelKey))
lock.Unlock()
}
return nil
})
if err != nil {
return err
return fmt.Errorf("failed to load initial state of resource %s: %v", api.GroupKind.String(), err)
}
lock.Lock()
for i := range list.Items {
c.setNode(c.createObjInfo(&list.Items[i], c.cacheSettingsSrc().AppInstanceLabelKey))
}
lock.Unlock()
go c.watchEvents(ctx, api, info, resClient, ns)
return nil
})
})
if err == nil {
err = c.startMissingWatches()
}
if err != nil {
log.Errorf("Failed to sync cluster %s: %v", c.cluster.Server, err)
return err

View File

@@ -101,7 +101,7 @@ var (
// Buckets chosen after observing a ~2100ms mean reconcile time
Buckets: []float64{0.25, .5, 1, 2, 4, 8, 16},
},
append(descAppDefaultLabels, "dest_server"),
[]string{"namespace", "dest_server"},
)
clusterEventsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{
@@ -191,7 +191,7 @@ func (m *MetricsServer) IncKubernetesRequest(app *argoappv1.Application, server,
// IncReconcile increments the reconcile counter for an application
func (m *MetricsServer) IncReconcile(app *argoappv1.Application, duration time.Duration) {
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), app.Spec.Destination.Server).Observe(duration.Seconds())
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Spec.Destination.Server).Observe(duration.Seconds())
}
type appCollector struct {

View File

@@ -260,16 +260,16 @@ func TestReconcileMetrics(t *testing.T) {
appReconcileMetrics := `
# HELP argocd_app_reconcile Application reconciliation performance.
# TYPE argocd_app_reconcile histogram
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="0.25"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="0.5"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="1"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="2"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="4"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="8"} 1
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="16"} 1
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",le="+Inf"} 1
argocd_app_reconcile_sum{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project"} 5
argocd_app_reconcile_count{dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project"} 1
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="0.25"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="0.5"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="1"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="2"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="4"} 0
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="8"} 1
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="16"} 1
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="+Inf"} 1
argocd_app_reconcile_sum{dest_server="https://localhost:6443",namespace="argocd"} 5
argocd_app_reconcile_count{dest_server="https://localhost:6443",namespace="argocd"} 1
`
fakeApp := newFakeApp(fakeApp)
metricsServ.IncReconcile(fakeApp, 5*time.Second)

View File

@@ -140,12 +140,6 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
if err != nil {
return nil, nil, nil, err
}
var apiVersions []string
for _, g := range apiGroups {
for _, v := range g.Versions {
apiVersions = append(apiVersions, v.GroupVersion)
}
}
ts.AddCheckpoint("version_ms")
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
Repo: repo,
@@ -161,7 +155,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
BuildOptions: buildOptions,
},
KubeVersion: serverVersion,
ApiVersions: apiVersions,
ApiVersions: argo.APIGroupsToVersions(apiGroups),
})
if err != nil {
return nil, nil, nil, err

View File

@@ -47,6 +47,7 @@ type syncContext struct {
proj *v1alpha1.AppProject
compareResult *comparisonResult
config *rest.Config
rawConfig *rest.Config
dynamicIf dynamic.Interface
disco discovery.DiscoveryInterface
extensionsclientset *clientset.Clientset
@@ -173,6 +174,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
proj: proj,
compareResult: compareResult,
config: restConfig,
rawConfig: clst.RawRestConfig(),
dynamicIf: dynamicIf,
disco: disco,
extensionsclientset: extensionsclientset,
@@ -422,7 +424,13 @@ func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {
// metadata.generateName, then we will generate a formulated metadata.name before submission.
targetObj := obj.DeepCopy()
if targetObj.GetName() == "" {
postfix := strings.ToLower(fmt.Sprintf("%s-%s-%d", sc.syncRes.Revision[0:7], phase, sc.opState.StartedAt.UTC().Unix()))
var syncRevision string
if len(sc.syncRes.Revision) >= 8 {
syncRevision = sc.syncRes.Revision[0:7]
} else {
syncRevision = sc.syncRes.Revision
}
postfix := strings.ToLower(fmt.Sprintf("%s-%s-%d", syncRevision, phase, sc.opState.StartedAt.UTC().Unix()))
generateName := obj.GetGenerateName()
targetObj.SetName(fmt.Sprintf("%s%s", generateName, postfix))
}
@@ -549,7 +557,7 @@ func (sc *syncContext) ensureCRDReady(name string) {
// applyObject performs a `kubectl apply` of a single resource
func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun, force, validate bool) (v1alpha1.ResultCode, string) {
message, err := sc.kubectl.ApplyResource(sc.config, targetObj, targetObj.GetNamespace(), dryRun, force, validate)
message, err := sc.kubectl.ApplyResource(sc.rawConfig, targetObj, targetObj.GetNamespace(), dryRun, force, validate)
if err != nil {
return v1alpha1.ResultCodeSyncFailed, err.Error()
}

View File

@@ -44,6 +44,7 @@ func newTestSyncCtx(resources ...*v1.APIResourceList) *syncContext {
})
sc := syncContext{
config: &rest.Config{},
rawConfig: &rest.Config{},
namespace: test.FakeArgoCDNamespace,
server: test.FakeClusterURL,
syncRes: &v1alpha1.SyncOperationResult{
@@ -398,20 +399,40 @@ func TestSelectiveSyncOnly(t *testing.T) {
}
func TestUnnamedHooksGetUniqueNames(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
pod := test.NewPod()
pod.SetName("")
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
t.Run("Truncated revision", func(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
pod := test.NewPod()
pod.SetName("")
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
tasks, successful := syncCtx.getSyncTasks()
tasks, successful := syncCtx.getSyncTasks()
assert.True(t, successful)
assert.Len(t, tasks, 2)
assert.Contains(t, tasks[0].name(), "foobarb-presync-")
assert.Contains(t, tasks[1].name(), "foobarb-postsync-")
assert.Equal(t, "", pod.GetName())
})
t.Run("Short revision", func(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
pod := test.NewPod()
pod.SetName("")
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
syncCtx.syncRes.Revision = "foobar"
tasks, successful := syncCtx.getSyncTasks()
assert.True(t, successful)
assert.Len(t, tasks, 2)
assert.Contains(t, tasks[0].name(), "foobar-presync-")
assert.Contains(t, tasks[1].name(), "foobar-postsync-")
assert.Equal(t, "", pod.GetName())
})
assert.True(t, successful)
assert.Len(t, tasks, 2)
assert.Contains(t, tasks[0].name(), "foobarb-presync-")
assert.Contains(t, tasks[1].name(), "foobarb-postsync-")
assert.Equal(t, "", pod.GetName())
}
func TestManagedResourceAreNotNamed(t *testing.T) {

View File

@@ -12,4 +12,4 @@ bases:
images:
- name: argoproj/argocd
newName: argoproj/argocd
newTag: latest
newTag: v1.5.2

View File

@@ -18,4 +18,4 @@ bases:
images:
- name: argoproj/argocd
newName: argoproj/argocd
newTag: latest
newTag: v1.5.2

View File

@@ -155,9 +155,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE0
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# Check Sentinel and whether they are nominated master
backend check_if_redis_is_master_1
mode tcp
@@ -169,9 +169,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE1
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# Check Sentinel and whether they are nominated master
backend check_if_redis_is_master_2
mode tcp
@@ -183,9 +183,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE2
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# decide redis backend to use
#master
@@ -204,11 +204,11 @@ data:
tcp-check send QUIT\r\n
tcp-check expect string +OK
use-server R0 if { srv_is_up(R0) } { nbsrv(check_if_redis_is_master_0) ge 2 }
server R0 argocd-redis-ha-announce-0:6379 check inter 1s fall 1 rise 1
server R0 argocd-redis-ha-announce-0:6379 check inter 3s fall 1 rise 1
use-server R1 if { srv_is_up(R1) } { nbsrv(check_if_redis_is_master_1) ge 2 }
server R1 argocd-redis-ha-announce-1:6379 check inter 1s fall 1 rise 1
server R1 argocd-redis-ha-announce-1:6379 check inter 3s fall 1 rise 1
use-server R2 if { srv_is_up(R2) } { nbsrv(check_if_redis_is_master_2) ge 2 }
server R2 argocd-redis-ha-announce-2:6379 check inter 1s fall 1 rise 1
server R2 argocd-redis-ha-announce-2:6379 check inter 3s fall 1 rise 1
haproxy_init.sh: |
HAPROXY_CONF=/data/haproxy.cfg
cp /readonly/haproxy.cfg "$HAPROXY_CONF"

View File

@@ -16,4 +16,6 @@ helm2 template ./chart \
--name argocd \
--values ./chart/values.yaml \
${helm_execute} \
>> ./chart/upstream.yaml
>> ./chart/upstream_orig.yaml
sed -e 's/check inter 1s/check inter 3s/' ./chart/upstream_orig.yaml >> ./chart/upstream.yaml && rm ./chart/upstream_orig.yaml

View File

@@ -2213,9 +2213,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE0
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# Check Sentinel and whether they are nominated master
backend check_if_redis_is_master_1
mode tcp
@@ -2227,9 +2227,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE1
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# Check Sentinel and whether they are nominated master
backend check_if_redis_is_master_2
mode tcp
@@ -2241,9 +2241,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE2
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# decide redis backend to use
#master
@@ -2262,11 +2262,11 @@ data:
tcp-check send QUIT\r\n
tcp-check expect string +OK
use-server R0 if { srv_is_up(R0) } { nbsrv(check_if_redis_is_master_0) ge 2 }
server R0 argocd-redis-ha-announce-0:6379 check inter 1s fall 1 rise 1
server R0 argocd-redis-ha-announce-0:6379 check inter 3s fall 1 rise 1
use-server R1 if { srv_is_up(R1) } { nbsrv(check_if_redis_is_master_1) ge 2 }
server R1 argocd-redis-ha-announce-1:6379 check inter 1s fall 1 rise 1
server R1 argocd-redis-ha-announce-1:6379 check inter 3s fall 1 rise 1
use-server R2 if { srv_is_up(R2) } { nbsrv(check_if_redis_is_master_2) ge 2 }
server R2 argocd-redis-ha-announce-2:6379 check inter 1s fall 1 rise 1
server R2 argocd-redis-ha-announce-2:6379 check inter 3s fall 1 rise 1
haproxy_init.sh: |
HAPROXY_CONF=/data/haproxy.cfg
cp /readonly/haproxy.cfg "$HAPROXY_CONF"
@@ -2827,7 +2827,7 @@ spec:
- "10"
- --redis
- argocd-redis-ha-haproxy:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2882,7 +2882,7 @@ spec:
- -n
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -2932,7 +2932,7 @@ spec:
- argocd-repo-server
- --redis
- argocd-redis-ha-haproxy:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -3000,7 +3000,7 @@ spec:
- /shared/app
- --redis
- argocd-redis-ha-haproxy:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -2128,9 +2128,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE0
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# Check Sentinel and whether they are nominated master
backend check_if_redis_is_master_1
mode tcp
@@ -2142,9 +2142,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE1
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# Check Sentinel and whether they are nominated master
backend check_if_redis_is_master_2
mode tcp
@@ -2156,9 +2156,9 @@ data:
tcp-check expect string REPLACE_ANNOUNCE2
tcp-check send QUIT\r\n
tcp-check expect string +OK
server R0 argocd-redis-ha-announce-0:26379 check inter 1s
server R1 argocd-redis-ha-announce-1:26379 check inter 1s
server R2 argocd-redis-ha-announce-2:26379 check inter 1s
server R0 argocd-redis-ha-announce-0:26379 check inter 3s
server R1 argocd-redis-ha-announce-1:26379 check inter 3s
server R2 argocd-redis-ha-announce-2:26379 check inter 3s
# decide redis backend to use
#master
@@ -2177,11 +2177,11 @@ data:
tcp-check send QUIT\r\n
tcp-check expect string +OK
use-server R0 if { srv_is_up(R0) } { nbsrv(check_if_redis_is_master_0) ge 2 }
server R0 argocd-redis-ha-announce-0:6379 check inter 1s fall 1 rise 1
server R0 argocd-redis-ha-announce-0:6379 check inter 3s fall 1 rise 1
use-server R1 if { srv_is_up(R1) } { nbsrv(check_if_redis_is_master_1) ge 2 }
server R1 argocd-redis-ha-announce-1:6379 check inter 1s fall 1 rise 1
server R1 argocd-redis-ha-announce-1:6379 check inter 3s fall 1 rise 1
use-server R2 if { srv_is_up(R2) } { nbsrv(check_if_redis_is_master_2) ge 2 }
server R2 argocd-redis-ha-announce-2:6379 check inter 1s fall 1 rise 1
server R2 argocd-redis-ha-announce-2:6379 check inter 3s fall 1 rise 1
haproxy_init.sh: |
HAPROXY_CONF=/data/haproxy.cfg
cp /readonly/haproxy.cfg "$HAPROXY_CONF"
@@ -2742,7 +2742,7 @@ spec:
- "10"
- --redis
- argocd-redis-ha-haproxy:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2797,7 +2797,7 @@ spec:
- -n
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -2847,7 +2847,7 @@ spec:
- argocd-repo-server
- --redis
- argocd-redis-ha-haproxy:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -2915,7 +2915,7 @@ spec:
- /shared/app
- --redis
- argocd-redis-ha-haproxy:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -2327,7 +2327,7 @@ spec:
- "20"
- --operation-processors
- "10"
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2382,7 +2382,7 @@ spec:
- -n
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -2446,7 +2446,7 @@ spec:
- argocd-repo-server
- --redis
- argocd-redis:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -2497,7 +2497,7 @@ spec:
- argocd-server
- --staticassets
- /shared/app
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -2242,7 +2242,7 @@ spec:
- "20"
- --operation-processors
- "10"
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2297,7 +2297,7 @@ spec:
- -n
- /usr/local/bin/argocd-util
- /shared
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -2361,7 +2361,7 @@ spec:
- argocd-repo-server
- --redis
- argocd-redis:6379
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
initialDelaySeconds: 5
@@ -2412,7 +2412,7 @@ spec:
- argocd-server
- --staticassets
- /shared/app
image: argoproj/argocd:latest
image: argoproj/argocd:v1.5.2
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -3,6 +3,8 @@ package v1alpha1
import (
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
@@ -22,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
@@ -2172,8 +2175,45 @@ func (proj AppProject) IsDestinationPermitted(dst ApplicationDestination) bool {
return false
}
// RESTConfig returns a go-client REST config from cluster
func (c *Cluster) RESTConfig() *rest.Config {
// SetK8SConfigDefaults sets Kubernetes REST config default settings
func SetK8SConfigDefaults(config *rest.Config) error {
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
tlsConfig, err := rest.TLSConfigFor(config)
if err != nil {
return err
}
dial := (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext
transport := utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
MaxIdleConns: common.K8sMaxIdleConnections,
MaxIdleConnsPerHost: common.K8sMaxIdleConnections,
MaxConnsPerHost: common.K8sMaxIdleConnections,
DialContext: dial,
DisableCompression: config.DisableCompression,
})
tr, err := rest.HTTPWrappersForConfig(config, transport)
if err != nil {
return err
}
// set default tls config and remove auth/exec provides since we use it in a custom transport
config.TLSClientConfig = rest.TLSClientConfig{}
config.AuthProvider = nil
config.ExecProvider = nil
config.Transport = tr
return nil
}
// RawRestConfig returns a go-client REST config from cluster that might be serialized into the file using kube.WriteKubeConfig method.
func (c *Cluster) RawRestConfig() *rest.Config {
var config *rest.Config
var err error
if c.Server == common.KubernetesInternalAPIServerAddr && os.Getenv(common.EnvVarFakeInClusterConfig) == "true" {
@@ -2220,8 +2260,16 @@ func (c *Cluster) RESTConfig() *rest.Config {
if err != nil {
panic(fmt.Sprintf("Unable to create K8s REST config: %v", err))
}
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
return config
}
// RESTConfig returns a go-client REST config from cluster with tuned throttling and HTTP client settings.
func (c *Cluster) RESTConfig() *rest.Config {
config := c.RawRestConfig()
err := SetK8SConfigDefaults(config)
if err != nil {
panic(fmt.Sprintf("Unable to apply K8s REST config defaults: %v", err))
}
return config
}

View File

@@ -25,6 +25,17 @@ function checkReplicasStatus(obj)
return nil
end
-- In Argo Rollouts v0.8 we depreciate .status.canary.stableRS for .status.stableRS this func grabs the correct one
function getStableRS(obj)
if obj.status.stableRS ~= nil then
return obj.status.stableRS
end
if obj.status.canary ~= nil then
return obj.status.canary.stableRS
end
return ""
end
function getNumberValueOrDefault(field)
if field ~= nil then
return field
@@ -87,7 +98,7 @@ if obj.status ~= nil then
return hs
end
if obj.spec.strategy.canary ~= nil then
currentRSIsStable = obj.status.canary.stableRS == obj.status.currentPodHash
currentRSIsStable = getStableRS(obj) == obj.status.currentPodHash
if obj.spec.strategy.canary.steps ~= nil and table.getn(obj.spec.strategy.canary.steps) > 0 then
stepCount = table.getn(obj.spec.strategy.canary.steps)
if obj.status.currentStepIndex ~= nil then

View File

@@ -48,6 +48,10 @@ tests:
- healthStatus:
status: Healthy
message: The rollout has completed all steps
inputPath: testdata/canary/healthy_executedAllStepsPreV0.8.yaml
- healthStatus:
status: Healthy
message: The rollout has completed all steps
inputPath: testdata/canary/healthy_executedAllSteps.yaml
- healthStatus:
status: Progressing

View File

@@ -48,8 +48,7 @@ status:
HPAReplicas: 5
availableReplicas: 5
blueGreen: {}
canary:
stableRS: 84ccfddd66
canary: {}
conditions:
- lastTransitionTime: '2019-05-01T21:55:30Z'
lastUpdateTime: '2019-05-01T21:55:58Z'
@@ -70,4 +69,5 @@ status:
readyReplicas: 5
replicas: 5
selector: app=guestbook-canary
stableRS: 84ccfddd66
updatedReplicas: 5

View File

@@ -0,0 +1,73 @@
apiVersion: argoproj.io/v1alpha1
kind: Rollout
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"argoproj.io/v1alpha1","kind":"Rollout","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-canary","ksonnet.io/component":"guestbook-ui"},"name":"guestbook-canary","namespace":"default"},"spec":{"minReadySeconds":10,"replicas":5,"selector":{"matchLabels":{"app":"guestbook-canary"}},"strategy":{"canary":{"maxSurge":1,"maxUnavailable":0,"steps":[{"setWeight":20},{"pause":{"duration":30}},{"setWeight":40},{"pause":{}}]}},"template":{"metadata":{"labels":{"app":"guestbook-canary"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-canary","ports":[{"containerPort":80}]}]}}}}
rollout.argoproj.io/revision: '1'
clusterName: ''
creationTimestamp: '2019-05-01T21:55:30Z'
generation: 1
labels:
app.kubernetes.io/instance: guestbook-canary
ksonnet.io/component: guestbook-ui
name: guestbook-canary
namespace: default
resourceVersion: '955764'
selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-canary
uid: d6105ccd-6c5b-11e9-b8d7-025000000001
spec:
minReadySeconds: 10
replicas: 5
selector:
matchLabels:
app: guestbook-canary
strategy:
canary:
maxSurge: 1
maxUnavailable: 0
steps:
- setWeight: 20
- pause:
duration: 30
- setWeight: 40
- pause: {}
template:
metadata:
creationTimestamp: null
labels:
app: guestbook-canary
spec:
containers:
- image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
name: guestbook-canary
ports:
- containerPort: 80
resources: {}
status:
HPAReplicas: 5
availableReplicas: 5
blueGreen: {}
canary:
stableRS: 84ccfddd66
conditions:
- lastTransitionTime: '2019-05-01T21:55:30Z'
lastUpdateTime: '2019-05-01T21:55:58Z'
message: ReplicaSet "guestbook-canary-84ccfddd66" has successfully progressed.
reason: NewReplicaSetAvailable
status: 'True'
type: Progressing
- lastTransitionTime: '2019-05-01T21:55:58Z'
lastUpdateTime: '2019-05-01T21:55:58Z'
message: Rollout has minimum availability
reason: AvailableReason
status: 'True'
type: Available
currentPodHash: 84ccfddd66
currentStepHash: 5f8fbdf7bb
currentStepIndex: 4
observedGeneration: c45557fd9
readyReplicas: 5
replicas: 5
selector: app=guestbook-canary
updatedReplicas: 5

View File

@@ -1,6 +1,7 @@
package account
import (
"errors"
"fmt"
"sort"
"time"
@@ -12,6 +13,7 @@ import (
"google.golang.org/grpc/status"
"k8s.io/kubernetes/pkg/util/slice"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apiclient/account"
"github.com/argoproj/argo-cd/server/rbacpolicy"
"github.com/argoproj/argo-cd/util/password"
@@ -34,22 +36,44 @@ func NewServer(sessionMgr *session.SessionManager, settingsMgr *settings.Setting
// UpdatePassword updates the password of the currently authenticated account or the account specified in the request.
func (s *Server) UpdatePassword(ctx context.Context, q *account.UpdatePasswordRequest) (*account.UpdatePasswordResponse, error) {
issuer := session.Iss(ctx)
username := session.Sub(ctx)
if rbacpolicy.IsProjectSubject(username) || session.Iss(ctx) != session.SessionManagerClaimsIssuer {
return nil, status.Errorf(codes.InvalidArgument, "password can only be changed for local users, not user %q", username)
}
err := s.sessionMgr.VerifyUsernamePassword(username, q.CurrentPassword)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "current password does not match")
}
updatedUsername := username
if q.Name != "" && q.Name != username {
if q.Name != "" {
updatedUsername = q.Name
}
// check for permission is user is trying to change someone else's password
// assuming user is trying to update someone else if username is different or issuer is not Argo CD
if updatedUsername != username || issuer != session.SessionManagerClaimsIssuer {
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceAccounts, rbacpolicy.ActionUpdate, q.Name); err != nil {
return nil, err
}
updatedUsername = q.Name
}
if issuer == session.SessionManagerClaimsIssuer {
// local user is changing own password or another user password
// user is changing own password.
// ensure token belongs to a user, not project
if q.Name == "" && rbacpolicy.IsProjectSubject(username) {
return nil, status.Errorf(codes.InvalidArgument, "password can only be changed for local users, not user %q", username)
}
err := s.sessionMgr.VerifyUsernamePassword(username, q.CurrentPassword)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "current password does not match")
}
} else {
// SSO user is changing or local user password
iat, err := session.Iat(ctx)
if err != nil {
return nil, err
}
if time.Now().Sub(iat) > common.ChangePasswordSSOTokenMaxAge {
return nil, errors.New("SSO token is too old. Please use 'argocd relogin' to get a new token.")
}
}
hashedPassword, err := password.HashPassword(q.NewPassword)

View File

@@ -3,6 +3,7 @@ package account
import (
"context"
"testing"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/stretchr/testify/assert"
@@ -29,6 +30,12 @@ const (
// return an AccountServer which returns fake data
func newTestAccountServer(ctx context.Context, opts ...func(cm *v1.ConfigMap, secret *v1.Secret)) (*Server, *session.Server) {
return newTestAccountServerExt(ctx, func(claims jwt.Claims, rvals ...interface{}) bool {
return true
}, opts...)
}
func newTestAccountServerExt(ctx context.Context, enforceFn rbac.ClaimsEnforcerFunc, opts ...func(cm *v1.ConfigMap, secret *v1.Secret)) (*Server, *session.Server) {
bcrypt, err := password.HashPassword("oldpassword")
errors.CheckError(err)
cm := &v1.ConfigMap{
@@ -58,9 +65,7 @@ func newTestAccountServer(ctx context.Context, opts ...func(cm *v1.ConfigMap, se
settingsMgr := settings.NewSettingsManager(ctx, kubeclientset, testNamespace)
sessionMgr := sessionutil.NewSessionManager(settingsMgr, "")
enforcer := rbac.NewEnforcer(kubeclientset, testNamespace, common.ArgoCDRBACConfigMapName, nil)
enforcer.SetClaimsEnforcerFunc(func(claims jwt.Claims, rvals ...interface{}) bool {
return true
})
enforcer.SetClaimsEnforcerFunc(enforceFn)
return NewServer(sessionMgr, settingsMgr, enforcer), session.NewServer(sessionMgr, nil)
}
@@ -78,6 +83,21 @@ func adminContext(ctx context.Context) context.Context {
return context.WithValue(ctx, "claims", &jwt.StandardClaims{Subject: "admin", Issuer: sessionutil.SessionManagerClaimsIssuer})
}
func ssoAdminContext(ctx context.Context, iat time.Time) context.Context {
return context.WithValue(ctx, "claims", &jwt.StandardClaims{
Subject: "admin",
Issuer: "https://myargocdhost.com/api/dex",
IssuedAt: iat.Unix(),
})
}
func projTokenContext(ctx context.Context) context.Context {
return context.WithValue(ctx, "claims", &jwt.StandardClaims{
Subject: "proj:demo:deployer",
Issuer: sessionutil.SessionManagerClaimsIssuer,
})
}
func TestUpdatePassword(t *testing.T) {
accountServer, sessionServer := newTestAccountServer(context.Background())
ctx := adminContext(context.Background())
@@ -114,6 +134,76 @@ func TestUpdatePassword(t *testing.T) {
assert.NoError(t, err)
}
func TestUpdatePassword_AdminUpdatesAnotherUser(t *testing.T) {
accountServer, sessionServer := newTestAccountServer(context.Background(), func(cm *v1.ConfigMap, secret *v1.Secret) {
cm.Data["accounts.anotherUser"] = "login"
})
ctx := adminContext(context.Background())
_, err := accountServer.UpdatePassword(ctx, &account.UpdatePasswordRequest{CurrentPassword: "oldpassword", NewPassword: "newpassword", Name: "anotherUser"})
assert.NoError(t, err)
_, err = sessionServer.Create(ctx, &sessionpkg.SessionCreateRequest{Username: "anotherUser", Password: "newpassword"})
assert.NoError(t, err)
}
func TestUpdatePassword_DoesNotHavePermissions(t *testing.T) {
enforcer := func(claims jwt.Claims, rvals ...interface{}) bool {
return false
}
t.Run("LocalAccountUpdatesAnotherAccount", func(t *testing.T) {
accountServer, _ := newTestAccountServerExt(context.Background(), enforcer, func(cm *v1.ConfigMap, secret *v1.Secret) {
cm.Data["accounts.anotherUser"] = "login"
})
ctx := adminContext(context.Background())
_, err := accountServer.UpdatePassword(ctx, &account.UpdatePasswordRequest{CurrentPassword: "oldpassword", NewPassword: "newpassword", Name: "anotherUser"})
assert.Error(t, err)
assert.Contains(t, err.Error(), "permission denied")
})
t.Run("SSOAccountWithTheSameName", func(t *testing.T) {
accountServer, _ := newTestAccountServerExt(context.Background(), enforcer)
ctx := ssoAdminContext(context.Background(), time.Now())
_, err := accountServer.UpdatePassword(ctx, &account.UpdatePasswordRequest{CurrentPassword: "oldpassword", NewPassword: "newpassword", Name: "admin"})
assert.Error(t, err)
assert.Contains(t, err.Error(), "permission denied")
})
}
func TestUpdatePassword_ProjectToken(t *testing.T) {
accountServer, _ := newTestAccountServer(context.Background(), func(cm *v1.ConfigMap, secret *v1.Secret) {
cm.Data["accounts.anotherUser"] = "login"
})
ctx := projTokenContext(context.Background())
_, err := accountServer.UpdatePassword(ctx, &account.UpdatePasswordRequest{CurrentPassword: "oldpassword", NewPassword: "newpassword"})
assert.Error(t, err)
assert.Contains(t, err.Error(), "password can only be changed for local users")
}
func TestUpdatePassword_OldSSOToken(t *testing.T) {
accountServer, _ := newTestAccountServer(context.Background(), func(cm *v1.ConfigMap, secret *v1.Secret) {
cm.Data["accounts.anotherUser"] = "login"
})
ctx := ssoAdminContext(context.Background(), time.Now().Add(-2*common.ChangePasswordSSOTokenMaxAge))
_, err := accountServer.UpdatePassword(ctx, &account.UpdatePasswordRequest{CurrentPassword: "oldpassword", NewPassword: "newpassword", Name: "anotherUser"})
assert.Error(t, err)
}
func TestUpdatePassword_SSOUserUpdatesAnotherUser(t *testing.T) {
accountServer, sessionServer := newTestAccountServer(context.Background(), func(cm *v1.ConfigMap, secret *v1.Secret) {
cm.Data["accounts.anotherUser"] = "login"
})
ctx := ssoAdminContext(context.Background(), time.Now())
_, err := accountServer.UpdatePassword(ctx, &account.UpdatePasswordRequest{CurrentPassword: "oldpassword", NewPassword: "newpassword", Name: "anotherUser"})
assert.NoError(t, err)
_, err = sessionServer.Create(ctx, &sessionpkg.SessionCreateRequest{Username: "anotherUser", Password: "newpassword"})
assert.NoError(t, err)
}
func TestListAccounts_NoAccountsConfigured(t *testing.T) {
ctx := adminContext(context.Background())

View File

@@ -225,7 +225,12 @@ func (s *Server) GetManifests(ctx context.Context, q *application.ApplicationMan
if err != nil {
return nil, err
}
cluster.ServerVersion, err = s.kubectl.GetServerVersion(cluster.RESTConfig())
config := cluster.RESTConfig()
cluster.ServerVersion, err = s.kubectl.GetServerVersion(config)
if err != nil {
return nil, err
}
apiGroups, err := s.kubectl.GetAPIGroups(config)
if err != nil {
return nil, err
}
@@ -240,6 +245,7 @@ func (s *Server) GetManifests(ctx context.Context, q *application.ApplicationMan
Plugins: plugins,
KustomizeOptions: &kustomizeOptions,
KubeVersion: cluster.ServerVersion,
ApiVersions: argo.APIGroupsToVersions(apiGroups),
})
if err != nil {
return nil, err

View File

@@ -76,7 +76,9 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if app, err := h.appClientset.ArgoprojV1alpha1().Applications(h.namespace).Get(name[0], v1.GetOptions{}); err == nil {
health = app.Status.Health.Status
status = app.Status.Sync.Status
revision = app.Status.OperationState.SyncResult.Revision
if app.Status.OperationState != nil && app.Status.OperationState.SyncResult != nil {
revision = app.Status.OperationState.SyncResult.Revision
}
} else if errors.IsNotFound(err) {
notFound = true
}
@@ -115,7 +117,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
badge = replaceFirstGroupSubMatch(leftTextPattern, badge, leftText)
badge = replaceFirstGroupSubMatch(rightTextPattern, badge, rightText)
if !notFound && revisionEnabled {
if !notFound && revisionEnabled && revision != "" {
// Increase width of SVG and enable display of revision components
badge = svgWidthPattern.ReplaceAllString(badge, fmt.Sprintf(`<svg width="%d" $2`, svgWidthWithRevision))
badge = displayNonePattern.ReplaceAllString(badge, `display="inline"`)

View File

@@ -88,6 +88,28 @@ func TestHandlerFeatureIsEnabledRevisionIsEnabled(t *testing.T) {
assert.Contains(t, response, "(aa29b85)")
}
func TestHandlerRevisionIsEnabledNoOperationState(t *testing.T) {
app := testApp.DeepCopy()
app.Status.OperationState = nil
settingsMgr := settings.NewSettingsManager(context.Background(), fake.NewSimpleClientset(&argoCDCm, &argoCDSecret), "default")
handler := NewHandler(appclientset.NewSimpleClientset(app), settingsMgr, "default")
req, err := http.NewRequest("GET", "/api/badge?name=testApp&revision=true", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
assert.Equal(t, "private, no-store", rr.Header().Get("Cache-Control"))
response := rr.Body.String()
assert.Equal(t, toRGBString(Green), leftRectColorPattern.FindStringSubmatch(response)[1])
assert.Equal(t, toRGBString(Green), rightRectColorPattern.FindStringSubmatch(response)[1])
assert.Equal(t, "Healthy", leftTextPattern.FindStringSubmatch(response)[1])
assert.Equal(t, "Synced", rightTextPattern.FindStringSubmatch(response)[1])
assert.NotContains(t, response, "(aa29b85)")
}
func TestHandlerFeatureIsDisabled(t *testing.T) {
argoCDCmDisabled := argoCDCm.DeepCopy()

View File

@@ -27,7 +27,7 @@ func NewServer(mgr *sessionmgr.SessionManager, authenticator Authenticator) *Ser
// Create generates a JWT token signed by Argo CD intended for web/CLI logins of the admin user
// using username/password
func (s *Server) Create(ctx context.Context, q *session.SessionCreateRequest) (*session.SessionResponse, error) {
func (s *Server) Create(_ context.Context, q *session.SessionCreateRequest) (*session.SessionResponse, error) {
if q.Token != "" {
return nil, status.Errorf(codes.Unauthenticated, "token-based session creation no longer supported. please upgrade argocd cli to v0.7+")
}

View File

@@ -4,15 +4,15 @@ import (
"context"
"testing"
"github.com/argoproj/argo-cd/pkg/apiclient/session"
"github.com/argoproj/argo-cd/util"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/pkg/apiclient/session"
. "github.com/argoproj/argo-cd/test/e2e/fixture"
"github.com/argoproj/argo-cd/util"
)
func TestCreateAndUseAccount(t *testing.T) {
@@ -50,3 +50,29 @@ test true login, apiKey`, output)
assert.Equal(t, info.Username, "test")
}
func TestLoginBadCredentials(t *testing.T) {
EnsureCleanState(t)
closer, sessionClient := ArgoCDClientset.NewSessionClientOrDie()
defer util.Close(closer)
requests := []session.SessionCreateRequest{{
Username: "user-does-not-exist", Password: "some-password",
}, {
Username: "admin", Password: "bad-password",
}}
for _, r := range requests {
_, err := sessionClient.Create(context.Background(), &r)
if !assert.Error(t, err) {
return
}
errStatus, ok := status.FromError(err)
if !assert.True(t, ok) {
return
}
assert.Equal(t, codes.Unauthenticated, errStatus.Code())
assert.Equal(t, "Invalid username or password", errStatus.Message())
}
}

View File

@@ -290,3 +290,14 @@ func testHelmWithDependencies(t *testing.T, chartPath string, legacyRepo bool) {
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestHelm3CRD(t *testing.T) {
Given(t).
Path("helm3-crd").
When().
Create().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(ResourceSyncStatusIs("CustomResourceDefinition", "crontabs.stable.example.com", SyncStatusCodeSynced))
}

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: argocd-helm3-crd
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: 1.16.0

View File

@@ -0,0 +1,14 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: crontabs.stable.example.com
spec:
group: stable.example.com
version: v1
scope: Namespaced
names:
plural: crontabs
singular: crontab
kind: CronTab
shortNames:
- ct

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: my-map
data:
foo: bar

View File

View File

@@ -122,7 +122,7 @@ export class App extends React.Component<{}, {popupProps: PopupProps; error: Err
const ga = await import('react-ga');
ga.initialize(trackingID);
const trackPageView = () => {
if (loggedIn) {
if (loggedIn && username) {
const userId = !anonymizeUsers ? username : hashCode(username).toString();
ga.set({userId});
}

View File

@@ -236,11 +236,17 @@ func ValidateRepo(
})
return conditions, nil
}
cluster.ServerVersion, err = kubectl.GetServerVersion(cluster.RESTConfig())
config := cluster.RESTConfig()
cluster.ServerVersion, err = kubectl.GetServerVersion(config)
if err != nil {
return nil, err
}
conditions = append(conditions, verifyGenerateManifests(ctx, repo, helmRepos, app, repoClient, kustomizeOptions, plugins, cluster.ServerVersion)...)
apiGroups, err := kubectl.GetAPIGroups(config)
if err != nil {
return nil, err
}
conditions = append(conditions, verifyGenerateManifests(
ctx, repo, helmRepos, app, repoClient, kustomizeOptions, plugins, cluster.ServerVersion, APIGroupsToVersions(apiGroups))...)
return conditions, nil
}
@@ -310,6 +316,17 @@ func ValidatePermissions(ctx context.Context, spec *argoappv1.ApplicationSpec, p
return conditions, nil
}
// APIGroupsToVersions converts list of API Groups into versions string list
func APIGroupsToVersions(apiGroups []metav1.APIGroup) []string {
var apiVersions []string
for _, g := range apiGroups {
for _, v := range g.Versions {
apiVersions = append(apiVersions, v.GroupVersion)
}
}
return apiVersions
}
// GetAppProject returns a project from an application
func GetAppProject(spec *argoappv1.ApplicationSpec, projLister applicationsv1.AppProjectLister, ns string) (*argoappv1.AppProject, error) {
return projLister.AppProjects(ns).Get(spec.GetProject())
@@ -325,6 +342,7 @@ func verifyGenerateManifests(
kustomizeOptions *argoappv1.KustomizeOptions,
plugins []*argoappv1.ConfigManagementPlugin,
kubeVersion string,
apiVersions []string,
) []argoappv1.ApplicationCondition {
spec := &app.Spec
var conditions []argoappv1.ApplicationCondition
@@ -349,6 +367,7 @@ func verifyGenerateManifests(
Plugins: plugins,
KustomizeOptions: kustomizeOptions,
KubeVersion: kubeVersion,
ApiVersions: apiVersions,
}
req.Repo.CopyCredentialsFromRepo(repoRes)
req.Repo.CopySettingsFrom(repoRes)

View File

@@ -2,11 +2,14 @@ package argo
import (
"context"
"fmt"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/watch"
@@ -18,6 +21,10 @@ import (
"github.com/argoproj/argo-cd/pkg/client/informers/externalversions/application/v1alpha1"
applisters "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/reposerver/apiclient/mocks"
"github.com/argoproj/argo-cd/util"
dbmocks "github.com/argoproj/argo-cd/util/db/mocks"
"github.com/argoproj/argo-cd/util/kube/kubetest"
)
func TestRefreshApp(t *testing.T) {
@@ -210,3 +217,70 @@ func Test_enrichSpec(t *testing.T) {
assert.Equal(t, "my-namespace", spec.Destination.Namespace)
})
}
func TestAPIGroupsToVersions(t *testing.T) {
versions := APIGroupsToVersions([]metav1.APIGroup{{
Versions: []metav1.GroupVersionForDiscovery{{GroupVersion: "apps/v1beta1"}, {GroupVersion: "apps/v1beta2"}},
}, {
Versions: []metav1.GroupVersionForDiscovery{{GroupVersion: "extensions/v1beta1"}},
}})
assert.EqualValues(t, []string{"apps/v1beta1", "apps/v1beta2", "extensions/v1beta1"}, versions)
}
func TestValidateRepo(t *testing.T) {
repoPath, err := filepath.Abs("./../..")
assert.NoError(t, err)
apiGroups := []metav1.APIGroup{{Versions: []metav1.GroupVersionForDiscovery{{GroupVersion: "apps/v1beta1"}, {GroupVersion: "apps/v1beta2"}}}}
kubeVersion := "v1.16"
kustomizeOptions := &argoappv1.KustomizeOptions{BuildOptions: "sample options"}
repo := &argoappv1.Repository{Repo: fmt.Sprintf("file://%s", repoPath)}
cluster := &argoappv1.Cluster{Server: "sample server"}
app := &argoappv1.Application{
Spec: argoappv1.ApplicationSpec{
Source: argoappv1.ApplicationSource{
RepoURL: repo.Repo,
},
Destination: argoappv1.ApplicationDestination{
Server: cluster.Server,
Namespace: "default",
},
},
}
helmRepos := []*argoappv1.Repository{{Repo: "sample helm repo"}}
repoClient := &mocks.RepoServerServiceClient{}
repoClient.On("GetAppDetails", context.Background(), &apiclient.RepoServerAppDetailsQuery{
Repo: repo,
Source: &app.Spec.Source,
Repos: helmRepos,
KustomizeOptions: kustomizeOptions,
}).Return(&apiclient.RepoAppDetailsResponse{}, nil)
repoClientSet := &mocks.Clientset{}
repoClientSet.On("NewRepoServerClient").Return(util.NopCloser, repoClient, nil)
db := &dbmocks.ArgoDB{}
db.On("GetRepository", context.Background(), app.Spec.Source.RepoURL).Return(repo, nil)
db.On("ListHelmRepositories", context.Background()).Return(helmRepos, nil)
db.On("GetCluster", context.Background(), app.Spec.Destination.Server).Return(cluster, nil)
var receivedRequest *apiclient.ManifestRequest
repoClient.On("GenerateManifest", context.Background(), mock.MatchedBy(func(req *apiclient.ManifestRequest) bool {
receivedRequest = req
return true
})).Return(nil, nil)
conditions, err := ValidateRepo(context.Background(), app, repoClientSet, db, kustomizeOptions, nil, &kubetest.MockKubectlCmd{Version: kubeVersion, APIGroups: apiGroups})
assert.NoError(t, err)
assert.Empty(t, conditions)
assert.ElementsMatch(t, []string{"apps/v1beta1", "apps/v1beta2"}, receivedRequest.ApiVersions)
assert.Equal(t, kubeVersion, receivedRequest.KubeVersion)
assert.Equal(t, app.Spec.Destination.Namespace, receivedRequest.Namespace)
assert.Equal(t, &app.Spec.Source, receivedRequest.ApplicationSource)
assert.Equal(t, kustomizeOptions, receivedRequest.KustomizeOptions)
}

View File

@@ -58,14 +58,23 @@ func GenerateDexConfigYAML(settings *settings.ArgoCDSettings) ([]byte, error) {
if err != nil {
return nil, err
}
connectors := dexCfg["connectors"].([]interface{})
connectors, ok := dexCfg["connectors"].([]interface{})
if !ok {
return nil, fmt.Errorf("malformed Dex configuration found")
}
for i, connectorIf := range connectors {
connector := connectorIf.(map[string]interface{})
connector, ok := connectorIf.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("malformed Dex configuration found")
}
connectorType := connector["type"].(string)
if !needsRedirectURI(connectorType) {
continue
}
connectorCfg := connector["config"].(map[string]interface{})
connectorCfg, ok := connector["config"].(map[string]interface{})
if !ok {
return nil, fmt.Errorf("malformed Dex configuration found")
}
connectorCfg["redirectURI"] = dexRedirectURL
connector["config"] = connectorCfg
connectors[i] = connector

View File

@@ -6,6 +6,8 @@ import (
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"github.com/argoproj/argo-cd/util"
@@ -188,6 +190,14 @@ func cleanSetParameters(val string) string {
}
func (c *Cmd) template(chartPath string, opts *TemplateOpts) (string, error) {
if c.HelmVer.getPostTemplateCallback != nil {
if callback, err := c.HelmVer.getPostTemplateCallback(filepath.Clean(path.Join(c.WorkDir, chartPath))); err == nil {
defer callback()
} else {
return "", err
}
}
args := []string{"template", chartPath, c.templateNameArg, opts.Name}
if opts.Namespace != "" {
@@ -211,6 +221,9 @@ func (c *Cmd) template(chartPath string, opts *TemplateOpts) (string, error) {
for _, v := range opts.APIVersions {
args = append(args, "--api-versions", v)
}
if c.HelmVer.additionalTemplateArgs != nil {
args = append(args, c.HelmVer.additionalTemplateArgs...)
}
return c.run(args...)
}

View File

@@ -3,6 +3,7 @@ package helm
import (
"fmt"
"io/ioutil"
"os"
"path"
"gopkg.in/yaml.v2"
@@ -21,15 +22,37 @@ var (
}
// HelmV3 represents helm V3 specific settings
HelmV3 = HelmVer{
binaryName: "helm",
templateNameArg: "--name-template",
kubeVersionSupported: false,
showCommand: "show",
pullCommand: "pull",
initSupported: false,
binaryName: "helm",
templateNameArg: "--name-template",
kubeVersionSupported: false,
showCommand: "show",
pullCommand: "pull",
initSupported: false,
getPostTemplateCallback: cleanupChartLockFile,
additionalTemplateArgs: []string{"--include-crds"},
}
)
// workaround for Helm3 bug. Remove after https://github.com/helm/helm/issues/6870 is fixed.
// The `helm template` command generates Chart.lock after which `helm dependency build` does not work
// As workaround removing lock file unless it exists before running helm template
func cleanupChartLockFile(chartPath string) (func(), error) {
exists := true
lockPath := path.Join(chartPath, "Chart.lock")
if _, err := os.Stat(lockPath); err != nil {
if os.IsNotExist(err) {
exists = false
} else {
return nil, err
}
}
return func() {
if !exists {
_ = os.Remove(lockPath)
}
}, nil
}
func getHelmVersion(chartPath string) (*HelmVer, error) {
data, err := ioutil.ReadFile(path.Join(chartPath, "Chart.yaml"))
if err != nil {
@@ -53,10 +76,12 @@ func getHelmVersion(chartPath string) (*HelmVer, error) {
// HelmVer contains Helm version specific settings such as helm binary and command names
type HelmVer struct {
binaryName string
initSupported bool
templateNameArg string
showCommand string
pullCommand string
kubeVersionSupported bool
binaryName string
initSupported bool
templateNameArg string
showCommand string
pullCommand string
kubeVersionSupported bool
getPostTemplateCallback func(chartPath string) (func(), error)
additionalTemplateArgs []string
}

View File

@@ -9,13 +9,13 @@ import (
func TestGetHelmVersion_Helm3(t *testing.T) {
ver, err := getHelmVersion("./testdata/minio")
assert.NoError(t, err)
assert.Equal(t, *ver, HelmV3)
assert.Equal(t, ver.binaryName, HelmV3.binaryName)
}
func TestGetHelmVersion_Helm2(t *testing.T) {
ver, err := getHelmVersion("./testdata/helm2-dependency")
assert.NoError(t, err)
assert.Equal(t, *ver, HelmV2)
assert.Equal(t, ver.binaryName, HelmV2.binaryName)
}
func TestGetHelmVersion_InvalidVersion(t *testing.T) {

View File

@@ -1,10 +1,18 @@
apiVersion: v2
version: 1.0.0
name: has-dependency
name: foobar
description: A Helm chart for Kubernetes
home: https://localhost
dependencies:
- name: mariadb
version: 4.x.x
repository: https://kubernetes-charts.storage.googleapis.com/
condition: mariadb.enabled
tags:
- wordpress-database
- name: mongodb
version: 7.8.10
repository: https://charts.bitnami.com/bitnami
condition: mongodb.enabled
- name: eventstore
version: 0.2.5
repository: https://eventstore.github.io/EventStore.Charts
condition: eventstore.enabled
maintainers:
- name: estahn
type: application
version: 0.1.0
appVersion: 1.16.0

View File

@@ -25,6 +25,7 @@ type MockKubectlCmd struct {
LastValidate bool
Version string
DynamicClient dynamic.Interface
APIGroups []metav1.APIGroup
}
func (k *MockKubectlCmd) NewDynamicClient(config *rest.Config) (dynamic.Interface, error) {
@@ -70,7 +71,7 @@ func (k *MockKubectlCmd) GetServerVersion(config *rest.Config) (string, error) {
}
func (k *MockKubectlCmd) GetAPIGroups(config *rest.Config) ([]metav1.APIGroup, error) {
return nil, nil
return k.APIGroups, nil
}
func (k *MockKubectlCmd) SetOnKubectlRun(onKubectlRun func(command string) (util.Closer, error)) {

View File

@@ -2,6 +2,7 @@ package session
import (
"context"
"errors"
"fmt"
"net"
"net/http"
@@ -155,6 +156,9 @@ func (mgr *SessionManager) Parse(tokenString string) (jwt.Claims, error) {
func (mgr *SessionManager) VerifyUsernamePassword(username string, password string) error {
account, err := mgr.settingsMgr.GetAccount(username)
if err != nil {
if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.NotFound {
err = status.Errorf(codes.Unauthenticated, invalidLoginError)
}
return err
}
if !account.Enabled {
@@ -243,6 +247,23 @@ func Iss(ctx context.Context) string {
return jwtutil.GetField(mapClaims, "iss")
}
func Iat(ctx context.Context) (time.Time, error) {
mapClaims, ok := mapClaims(ctx)
if !ok {
return time.Time{}, errors.New("unable to extract token claims")
}
iatField, ok := mapClaims["iat"]
if !ok {
return time.Time{}, errors.New("token does not have iat claim")
}
if iat, ok := iatField.(float64); !ok {
return time.Time{}, errors.New("iat token field has unexpected type")
} else {
return time.Unix(int64(iat), 0), nil
}
}
func Sub(ctx context.Context) string {
mapClaims, ok := mapClaims(ctx)
if !ok {