mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-21 18:18:48 +01:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89ece31762 | ||
|
|
d565a0a119 | ||
|
|
1e2b554f01 | ||
|
|
8cb2f5d7e4 | ||
|
|
c5814d5946 | ||
|
|
a4a81d1de9 | ||
|
|
cb27cec021 | ||
|
|
e13e13e7ae | ||
|
|
88d41f8efa | ||
|
|
dbe09104a1 | ||
|
|
6a18870ec1 | ||
|
|
ca9f992fc2 | ||
|
|
063ff34f00 | ||
|
|
a9980c3025 | ||
|
|
3f5967c83e |
@@ -55,13 +55,13 @@ RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION
|
||||
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks
|
||||
|
||||
# Install helm
|
||||
ENV HELM_VERSION=2.9.1
|
||||
ENV HELM_VERSION=2.11.0
|
||||
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
|
||||
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
|
||||
mv /tmp/linux-amd64/helm /usr/local/bin/helm
|
||||
|
||||
# Install kustomize
|
||||
ENV KUSTOMIZE_VERSION=1.0.8
|
||||
ENV KUSTOMIZE_VERSION=1.0.10
|
||||
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
|
||||
chmod +x /usr/local/bin/kustomize
|
||||
|
||||
|
||||
@@ -24,10 +24,10 @@ resources:
|
||||
|
||||
imageTags:
|
||||
- name: argoproj/argocd-server
|
||||
newTag: latest
|
||||
newTag: v0.10.3
|
||||
- name: argoproj/argocd-ui
|
||||
newTag: latest
|
||||
newTag: v0.10.3
|
||||
- name: argoproj/argocd-repo-server
|
||||
newTag: latest
|
||||
newTag: v0.10.3
|
||||
- name: argoproj/argocd-application-controller
|
||||
newTag: latest
|
||||
newTag: v0.10.3
|
||||
|
||||
@@ -321,7 +321,7 @@ spec:
|
||||
- /argocd-application-controller
|
||||
- --repo-server
|
||||
- argocd-repo-server:8081
|
||||
image: argoproj/argocd-application-controller:latest
|
||||
image: argoproj/argocd-application-controller:v0.10.4
|
||||
name: application-controller
|
||||
serviceAccountName: application-controller
|
||||
---
|
||||
@@ -342,7 +342,7 @@ spec:
|
||||
containers:
|
||||
- command:
|
||||
- /argocd-repo-server
|
||||
image: argoproj/argocd-repo-server:latest
|
||||
image: argoproj/argocd-repo-server:v0.10.4
|
||||
name: argocd-repo-server
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
@@ -372,7 +372,7 @@ spec:
|
||||
- /shared/app
|
||||
- --repo-server
|
||||
- argocd-repo-server:8081
|
||||
image: argoproj/argocd-server:latest
|
||||
image: argoproj/argocd-server:v0.10.4
|
||||
name: argocd-server
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
@@ -389,7 +389,7 @@ spec:
|
||||
- -r
|
||||
- /app
|
||||
- /shared
|
||||
image: argoproj/argocd-ui:latest
|
||||
image: argoproj/argocd-ui:v0.10.4
|
||||
name: ui
|
||||
volumeMounts:
|
||||
- mountPath: /shared
|
||||
@@ -429,7 +429,7 @@ spec:
|
||||
- cp
|
||||
- /argocd-util
|
||||
- /shared
|
||||
image: argoproj/argocd-server:latest
|
||||
image: argoproj/argocd-server:v0.10.4
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /shared
|
||||
|
||||
@@ -261,7 +261,7 @@ spec:
|
||||
- /argocd-application-controller
|
||||
- --repo-server
|
||||
- argocd-repo-server:8081
|
||||
image: argoproj/argocd-application-controller:latest
|
||||
image: argoproj/argocd-application-controller:v0.10.4
|
||||
name: application-controller
|
||||
serviceAccountName: application-controller
|
||||
---
|
||||
@@ -282,7 +282,7 @@ spec:
|
||||
containers:
|
||||
- command:
|
||||
- /argocd-repo-server
|
||||
image: argoproj/argocd-repo-server:latest
|
||||
image: argoproj/argocd-repo-server:v0.10.4
|
||||
name: argocd-repo-server
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
@@ -312,7 +312,7 @@ spec:
|
||||
- /shared/app
|
||||
- --repo-server
|
||||
- argocd-repo-server:8081
|
||||
image: argoproj/argocd-server:latest
|
||||
image: argoproj/argocd-server:v0.10.4
|
||||
name: argocd-server
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
@@ -329,7 +329,7 @@ spec:
|
||||
- -r
|
||||
- /app
|
||||
- /shared
|
||||
image: argoproj/argocd-ui:latest
|
||||
image: argoproj/argocd-ui:v0.10.4
|
||||
name: ui
|
||||
volumeMounts:
|
||||
- mountPath: /shared
|
||||
@@ -369,7 +369,7 @@ spec:
|
||||
- cp
|
||||
- /argocd-util
|
||||
- /shared
|
||||
image: argoproj/argocd-server:latest
|
||||
image: argoproj/argocd-server:v0.10.4
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /shared
|
||||
|
||||
@@ -243,6 +243,7 @@ func (a *ArgoCDServer) Run(ctx context.Context, port int) {
|
||||
tlsConfig := tls.Config{
|
||||
Certificates: []tls.Certificate{*a.settings.Certificate},
|
||||
}
|
||||
a.TLSConfigCustomizer(&tlsConfig)
|
||||
tlsl = tls.NewListener(tlsl, &tlsConfig)
|
||||
|
||||
// Now, we build another mux recursively to match HTTPS and gRPC.
|
||||
|
||||
@@ -122,6 +122,7 @@ func RefreshApp(appIf v1alpha1.ApplicationInterface, name string) (*argoappv1.Ap
|
||||
}
|
||||
|
||||
// WaitForRefresh watches an application until its comparison timestamp is after the refresh timestamp
|
||||
// If refresh timestamp is not present, will use current timestamp at time of call
|
||||
func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *time.Duration) (*argoappv1.Application, error) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc
|
||||
@@ -136,6 +137,7 @@ func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *t
|
||||
return nil, err
|
||||
}
|
||||
defer watchIf.Stop()
|
||||
now := time.Now().UTC()
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -161,6 +163,9 @@ func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *t
|
||||
return nil, fmt.Errorf("Application event object failed conversion: %v", next)
|
||||
}
|
||||
refreshTimestampStr := app.ObjectMeta.Annotations[common.AnnotationKeyRefresh]
|
||||
if refreshTimestampStr == "" {
|
||||
refreshTimestampStr = now.String()
|
||||
}
|
||||
refreshTimestamp, err := time.Parse(time.RFC3339, refreshTimestampStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse '%s': %v", common.AnnotationKeyRefresh, err)
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
|
||||
"github.com/coreos/dex/api"
|
||||
oidc "github.com/coreos/go-oidc"
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/grpc"
|
||||
@@ -325,15 +324,9 @@ func (a *ClientApp) HandleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
idToken, err := a.verify(rawIDToken)
|
||||
claims, err := a.sessionMgr.VerifyToken(rawIDToken)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to verify ID token: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var claims jwt.MapClaims
|
||||
err = idToken.Claims(&claims)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to unmarshal claims: %v", err), http.StatusInternalServerError)
|
||||
http.Error(w, fmt.Sprintf("invalid session token: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
flags := []string{"path=/"}
|
||||
|
||||
@@ -149,7 +149,7 @@ func (m *nativeGitClient) Checkout(revision string) error {
|
||||
if _, err := m.runCmd("git", "checkout", "--force", revision); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := m.runCmd("git", "clean", "-fd"); err != nil {
|
||||
if _, err := m.runCmd("git", "clean", "-fdx"); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -9,40 +9,47 @@ import (
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
)
|
||||
|
||||
// GetAppHealth returns the health of a k8s resource
|
||||
func GetAppHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
|
||||
|
||||
var err error
|
||||
var health *appv1.HealthStatus
|
||||
|
||||
switch obj.GetKind() {
|
||||
case kube.DeploymentKind:
|
||||
health, err = getDeploymentHealth(kubectl, obj)
|
||||
case kube.ServiceKind:
|
||||
health, err = getServiceHealth(kubectl, obj)
|
||||
case kube.IngressKind:
|
||||
health, err = getIngressHealth(kubectl, obj)
|
||||
case kube.StatefulSetKind:
|
||||
health, err = getStatefulSetHealth(kubectl, obj)
|
||||
case kube.ReplicaSetKind:
|
||||
health, err = getReplicaSetHealth(kubectl, obj)
|
||||
case kube.DaemonSetKind:
|
||||
health, err = getDaemonSetHealth(kubectl, obj)
|
||||
case kube.PersistentVolumeClaimKind:
|
||||
health, err = getPvcHealth(kubectl, obj)
|
||||
default:
|
||||
health = &appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
|
||||
gvk := obj.GroupVersionKind()
|
||||
switch gvk.Group {
|
||||
case "apps", "extensions":
|
||||
switch gvk.Kind {
|
||||
case kube.DeploymentKind:
|
||||
health, err = getDeploymentHealth(kubectl, obj)
|
||||
case kube.IngressKind:
|
||||
health, err = getIngressHealth(kubectl, obj)
|
||||
case kube.StatefulSetKind:
|
||||
health, err = getStatefulSetHealth(kubectl, obj)
|
||||
case kube.ReplicaSetKind:
|
||||
health, err = getReplicaSetHealth(kubectl, obj)
|
||||
case kube.DaemonSetKind:
|
||||
health, err = getDaemonSetHealth(kubectl, obj)
|
||||
}
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
case kube.ServiceKind:
|
||||
health, err = getServiceHealth(kubectl, obj)
|
||||
case kube.PersistentVolumeClaimKind:
|
||||
health, err = getPVCHealth(kubectl, obj)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
health.Status = appv1.HealthStatusUnknown
|
||||
health.StatusDetails = err.Error()
|
||||
health = &appv1.HealthStatus{
|
||||
Status: appv1.HealthStatusUnknown,
|
||||
StatusDetails: err.Error(),
|
||||
}
|
||||
} else if health == nil {
|
||||
health = &appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
|
||||
}
|
||||
return health, err
|
||||
}
|
||||
@@ -71,7 +78,7 @@ func IsWorse(current, new appv1.HealthStatusCode) bool {
|
||||
return newIndex > currentIndex
|
||||
}
|
||||
|
||||
func getPvcHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
|
||||
func getPVCHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
|
||||
obj, err := kubectl.ConvertToVersion(obj, "", "v1")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -12,74 +12,45 @@ import (
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
)
|
||||
|
||||
func assertAppHealth(t *testing.T, yamlPath string, expectedStatus appv1.HealthStatusCode) {
|
||||
yamlBytes, err := ioutil.ReadFile(yamlPath)
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, expectedStatus, health.Status)
|
||||
}
|
||||
|
||||
func TestDeploymentHealth(t *testing.T) {
|
||||
yamlBytes, err := ioutil.ReadFile("../kube/testdata/nginx.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
|
||||
}
|
||||
|
||||
func TestDeploymentProgressing(t *testing.T) {
|
||||
yamlBytes, err := ioutil.ReadFile("./testdata/progressing.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, appv1.HealthStatusProgressing, health.Status)
|
||||
}
|
||||
|
||||
func TestDeploymentDegraded(t *testing.T) {
|
||||
yamlBytes, err := ioutil.ReadFile("./testdata/degraded.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, appv1.HealthStatusDegraded, health.Status)
|
||||
assertAppHealth(t, "../kube/testdata/nginx.yaml", appv1.HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/deployment-progressing.yaml", appv1.HealthStatusProgressing)
|
||||
assertAppHealth(t, "./testdata/deployment-degraded.yaml", appv1.HealthStatusDegraded)
|
||||
}
|
||||
|
||||
func TestStatefulSetHealth(t *testing.T) {
|
||||
yamlBytes, err := ioutil.ReadFile("./testdata/statefulset.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
|
||||
assertAppHealth(t, "./testdata/statefulset.yaml", appv1.HealthStatusHealthy)
|
||||
}
|
||||
|
||||
func TestPvcHealthy(t *testing.T) {
|
||||
yamlBytes, err := ioutil.ReadFile("./testdata/pvc-bound.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
|
||||
func TestPVCHealth(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/pvc-bound.yaml", appv1.HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/pvc-pending.yaml", appv1.HealthStatusProgressing)
|
||||
}
|
||||
|
||||
func TestPvcPending(t *testing.T) {
|
||||
yamlBytes, err := ioutil.ReadFile("./testdata/pvc-pending.yaml")
|
||||
assert.Nil(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
assert.Nil(t, err)
|
||||
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, appv1.HealthStatusProgressing, health.Status)
|
||||
func TestServiceHealth(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/svc-clusterip.yaml", appv1.HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/svc-loadbalancer.yaml", appv1.HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/svc-loadbalancer-unassigned.yaml", appv1.HealthStatusProgressing)
|
||||
}
|
||||
|
||||
func TestIngressHealth(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/ingress.yaml", appv1.HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/ingress-unassigned.yaml", appv1.HealthStatusProgressing)
|
||||
}
|
||||
|
||||
func TestCRD(t *testing.T) {
|
||||
// This ensures we do not try to compare only based on "Kind"
|
||||
assertAppHealth(t, "./testdata/knative-service.yaml", appv1.HealthStatusHealthy)
|
||||
}
|
||||
|
||||
24
util/health/testdata/ingress-unassigned.yaml
vendored
Normal file
24
util/health/testdata/ingress-unassigned.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
creationTimestamp: 2018-09-20T06:47:27Z
|
||||
generation: 9
|
||||
name: argocd-server-ingress
|
||||
namespace: argocd
|
||||
resourceVersion: "23207680"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
|
||||
spec:
|
||||
rules:
|
||||
- host: example.argoproj.io
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: argocd-server
|
||||
servicePort: https
|
||||
status:
|
||||
loadBalancer: {}
|
||||
26
util/health/testdata/ingress.yaml
vendored
Normal file
26
util/health/testdata/ingress.yaml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
creationTimestamp: 2018-09-20T06:47:27Z
|
||||
generation: 9
|
||||
name: argocd-server-ingress
|
||||
namespace: argocd
|
||||
resourceVersion: "23207680"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
|
||||
spec:
|
||||
rules:
|
||||
- host: example.argoproj.io
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: argocd-server
|
||||
servicePort: https
|
||||
status:
|
||||
loadBalancer:
|
||||
ingress:
|
||||
- ip: 1.2.3.4
|
||||
14
util/health/testdata/knative-service.yaml
vendored
Normal file
14
util/health/testdata/knative-service.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: serving.knative.dev/v1alpha1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helloworld
|
||||
spec:
|
||||
runLatest:
|
||||
configuration:
|
||||
revisionTemplate:
|
||||
spec:
|
||||
container:
|
||||
env:
|
||||
- name: TARGET
|
||||
value: world
|
||||
image: helloworld:latest
|
||||
25
util/health/testdata/svc-clusterip.yaml
vendored
Normal file
25
util/health/testdata/svc-clusterip.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"argocd-metrics","namespace":"argocd"},"spec":{"ports":[{"name":"http","port":8082,"protocol":"TCP","targetPort":8082}],"selector":{"app":"argocd-server"}}}
|
||||
creationTimestamp: 2018-10-27T06:36:27Z
|
||||
name: argocd-metrics
|
||||
namespace: argocd
|
||||
resourceVersion: "1131"
|
||||
selfLink: /api/v1/namespaces/argocd/services/argocd-metrics
|
||||
uid: a1f65069-d9b2-11e8-b3c1-9ae2f452bd03
|
||||
spec:
|
||||
clusterIP: 10.96.199.2
|
||||
ports:
|
||||
- name: http
|
||||
port: 8082
|
||||
protocol: TCP
|
||||
targetPort: 8082
|
||||
selector:
|
||||
app: argocd-server
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
25
util/health/testdata/svc-loadbalancer-unassigned.yaml
vendored
Normal file
25
util/health/testdata/svc-loadbalancer-unassigned.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: 2018-11-06T01:07:35Z
|
||||
name: argo-artifacts
|
||||
namespace: argo
|
||||
resourceVersion: "346792"
|
||||
selfLink: /api/v1/namespaces/argo/services/argo-artifacts
|
||||
uid: 586f5e57-e160-11e8-b3c1-9ae2f452bd03
|
||||
spec:
|
||||
clusterIP: 10.105.70.181
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- name: service
|
||||
nodePort: 32667
|
||||
port: 9000
|
||||
protocol: TCP
|
||||
targetPort: 9000
|
||||
selector:
|
||||
app: minio
|
||||
release: argo-artifacts
|
||||
sessionAffinity: None
|
||||
type: LoadBalancer
|
||||
status:
|
||||
loadBalancer: {}
|
||||
35
util/health/testdata/svc-loadbalancer.yaml
vendored
Normal file
35
util/health/testdata/svc-loadbalancer.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "600"
|
||||
creationTimestamp: 2018-06-05T23:34:58Z
|
||||
labels:
|
||||
applications.argoproj.io/app-name: argocd-cdp
|
||||
name: argocd-server
|
||||
namespace: argocd
|
||||
resourceVersion: "32559487"
|
||||
selfLink: /api/v1/namespaces/argocd/services/argocd-server
|
||||
uid: 0f5885a9-6919-11e8-ad29-020124679688
|
||||
spec:
|
||||
clusterIP: 100.69.46.185
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- name: http
|
||||
nodePort: 30354
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
- name: https
|
||||
nodePort: 31866
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: argocd-server
|
||||
sessionAffinity: None
|
||||
type: LoadBalancer
|
||||
status:
|
||||
loadBalancer:
|
||||
ingress:
|
||||
- hostname: abc123.us-west-2.elb.amazonaws.com
|
||||
@@ -160,7 +160,29 @@ func (mgr *SessionManager) VerifyToken(tokenString string) (jwt.Claims, error) {
|
||||
verifier := provider.Verifier(&oidc.Config{ClientID: claims.Audience})
|
||||
idToken, err := verifier.Verify(context.Background(), tokenString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// HACK: if we failed token verification, it's possible the reason was because dex
|
||||
// restarted and has new JWKS signing keys (we do not back dex with persistent storage
|
||||
// so keys might be regenerated). Detect this by:
|
||||
// 1. looking for the specific error message
|
||||
// 2. re-initializing the OIDC provider
|
||||
// 3. re-attempting token verification
|
||||
// NOTE: the error message is sensitive to implementation of verifier.Verify()
|
||||
if !strings.Contains(err.Error(), "failed to verify signature") {
|
||||
return nil, err
|
||||
}
|
||||
provider, retryErr := mgr.initializeOIDCProvider()
|
||||
if retryErr != nil {
|
||||
// return original error if we fail to re-initialize OIDC
|
||||
return nil, err
|
||||
}
|
||||
verifier = provider.Verifier(&oidc.Config{ClientID: claims.Audience})
|
||||
idToken, err = verifier.Verify(context.Background(), tokenString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If we get here, we successfully re-initialized OIDC and after re-initialization,
|
||||
// the token is now valid.
|
||||
log.Info("New OIDC settings detected")
|
||||
}
|
||||
var claims jwt.MapClaims
|
||||
err = idToken.Claims(&claims)
|
||||
@@ -168,6 +190,7 @@ func (mgr *SessionManager) VerifyToken(tokenString string) (jwt.Claims, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Username is a helper to extract a human readable username from a context
|
||||
func Username(ctx context.Context) string {
|
||||
claims, ok := ctx.Value("claims").(jwt.Claims)
|
||||
if !ok {
|
||||
@@ -194,8 +217,7 @@ func MakeCookieMetadata(key, value string, flags ...string) string {
|
||||
return strings.Join(components, "; ")
|
||||
}
|
||||
|
||||
// OIDCProvider lazily initializes and returns the OIDC provider, querying the well known oidc
|
||||
// configuration path (http://example-argocd.com/api/dex/.well-known/openid-configuration).
|
||||
// OIDCProvider lazily initializes, memoizes, and returns the OIDC provider.
|
||||
// We have to initialize the provider lazily since ArgoCD is an OIDC client to itself, which
|
||||
// presents a chicken-and-egg problem of (1) serving dex over HTTP, and (2) querying the OIDC
|
||||
// provider (ourselves) to initialize the app.
|
||||
@@ -203,6 +225,12 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
|
||||
if mgr.provider != nil {
|
||||
return mgr.provider, nil
|
||||
}
|
||||
return mgr.initializeOIDCProvider()
|
||||
}
|
||||
|
||||
// initializeOIDCProvider re-initializes the OIDC provider, querying the well known oidc
|
||||
// configuration path (http://example-argocd.com/api/dex/.well-known/openid-configuration)
|
||||
func (mgr *SessionManager) initializeOIDCProvider() (*oidc.Provider, error) {
|
||||
if !mgr.settings.IsSSOConfigured() {
|
||||
return nil, fmt.Errorf("SSO is not configured")
|
||||
}
|
||||
@@ -213,7 +241,6 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to query provider %q: %v", issuerURL, err)
|
||||
}
|
||||
|
||||
// Returns the scopes the provider supports
|
||||
// See: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||
var s struct {
|
||||
@@ -223,24 +250,27 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
|
||||
return nil, fmt.Errorf("Failed to parse provider scopes_supported: %v", err)
|
||||
}
|
||||
log.Infof("OpenID supported scopes: %v", s.ScopesSupported)
|
||||
offlineAsScope := false
|
||||
if len(s.ScopesSupported) == 0 {
|
||||
// scopes_supported is a "RECOMMENDED" discovery claim, not a required
|
||||
// one. If missing, assume that the provider follows the spec and has
|
||||
// an "offline_access" scope.
|
||||
mgr.offlineAsScope = true
|
||||
offlineAsScope = true
|
||||
} else {
|
||||
// See if scopes_supported has the "offline_access" scope.
|
||||
for _, scope := range s.ScopesSupported {
|
||||
if scope == oidc.ScopeOfflineAccess {
|
||||
mgr.offlineAsScope = true
|
||||
offlineAsScope = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
mgr.provider = provider
|
||||
mgr.offlineAsScope = offlineAsScope
|
||||
return mgr.provider, nil
|
||||
}
|
||||
|
||||
// OfflineAsScope returns whether or not the OIDC provider supports offline as a scope
|
||||
func (mgr *SessionManager) OfflineAsScope() bool {
|
||||
_, _ = mgr.OIDCProvider() // forces offlineAsScope to be determined
|
||||
return mgr.offlineAsScope
|
||||
|
||||
Reference in New Issue
Block a user