Compare commits

...

15 Commits

Author SHA1 Message Date
Jesse Suen
89ece31762 Update version to v0.10.4 2018-11-07 17:25:16 -08:00
Jesse Suen
d565a0a119 Health check is not discerning apiVersion when assessing CRDs (issue #753) (#754) 2018-11-07 17:24:20 -08:00
Mario Duarte
1e2b554f01 Fix nil pointer dereference in util/health (#723) 2018-11-07 17:24:14 -08:00
Alessandro Marrella
8cb2f5d7e4 Updated helm (#749) 2018-11-07 16:35:16 -08:00
Alexander Matyushentsev
c5814d5946 Update version to v0.10.3 2018-10-28 23:31:08 -07:00
Alexander Matyushentsev
a4a81d1de9 Fix applying TLS version settings (#731) 2018-10-28 23:30:00 -07:00
Tom Wieczorek
cb27cec021 Update to kustomize 1.0.10 (#728) (#728)
See also kubernetes-sigs/kustomize#514
2018-10-28 22:47:42 -07:00
Alexander Matyushentsev
e13e13e7ae Update manifests to v0.10.2 2018-10-25 11:52:17 -07:00
Tom Wieczorek
88d41f8efa Update to kustomize 1.0.9 (#722) 2018-10-25 11:51:22 -07:00
dthomson25
dbe09104a1 Fix app refresh err when k8s patch is too slow (#724) 2018-10-25 11:51:18 -07:00
Alexander Matyushentsev
6a18870ec1 Update manifests to v0.10.1 2018-10-24 12:57:02 -07:00
Jesse Suen
ca9f992fc2 Update version to v0.10.1 2018-10-24 11:40:52 -07:00
Jesse Suen
063ff34f00 Handle case where OIDC settings become invalid after dex server restart (issue #710) (#715) 2018-10-24 11:39:53 -07:00
Jesse Suen
a9980c3025 git clean also needs to clean files under gitignore (issue #711) (#712) 2018-10-19 22:11:25 -07:00
Alexander Matyushentsev
3f5967c83e Update manifests to v0.10.0 2018-10-19 14:29:37 -07:00
20 changed files with 274 additions and 118 deletions

View File

@@ -55,13 +55,13 @@ RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks
# Install helm
ENV HELM_VERSION=2.9.1
ENV HELM_VERSION=2.11.0
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /usr/local/bin/helm
# Install kustomize
ENV KUSTOMIZE_VERSION=1.0.8
ENV KUSTOMIZE_VERSION=1.0.10
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/kustomize

View File

@@ -1 +1 @@
0.10.0
0.10.4

View File

@@ -24,10 +24,10 @@ resources:
imageTags:
- name: argoproj/argocd-server
newTag: latest
newTag: v0.10.3
- name: argoproj/argocd-ui
newTag: latest
newTag: v0.10.3
- name: argoproj/argocd-repo-server
newTag: latest
newTag: v0.10.3
- name: argoproj/argocd-application-controller
newTag: latest
newTag: v0.10.3

View File

@@ -321,7 +321,7 @@ spec:
- /argocd-application-controller
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-application-controller:latest
image: argoproj/argocd-application-controller:v0.10.4
name: application-controller
serviceAccountName: application-controller
---
@@ -342,7 +342,7 @@ spec:
containers:
- command:
- /argocd-repo-server
image: argoproj/argocd-repo-server:latest
image: argoproj/argocd-repo-server:v0.10.4
name: argocd-repo-server
ports:
- containerPort: 8081
@@ -372,7 +372,7 @@ spec:
- /shared/app
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.4
name: argocd-server
readinessProbe:
httpGet:
@@ -389,7 +389,7 @@ spec:
- -r
- /app
- /shared
image: argoproj/argocd-ui:latest
image: argoproj/argocd-ui:v0.10.4
name: ui
volumeMounts:
- mountPath: /shared
@@ -429,7 +429,7 @@ spec:
- cp
- /argocd-util
- /shared
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.4
name: copyutil
volumeMounts:
- mountPath: /shared

View File

@@ -261,7 +261,7 @@ spec:
- /argocd-application-controller
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-application-controller:latest
image: argoproj/argocd-application-controller:v0.10.4
name: application-controller
serviceAccountName: application-controller
---
@@ -282,7 +282,7 @@ spec:
containers:
- command:
- /argocd-repo-server
image: argoproj/argocd-repo-server:latest
image: argoproj/argocd-repo-server:v0.10.4
name: argocd-repo-server
ports:
- containerPort: 8081
@@ -312,7 +312,7 @@ spec:
- /shared/app
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.4
name: argocd-server
readinessProbe:
httpGet:
@@ -329,7 +329,7 @@ spec:
- -r
- /app
- /shared
image: argoproj/argocd-ui:latest
image: argoproj/argocd-ui:v0.10.4
name: ui
volumeMounts:
- mountPath: /shared
@@ -369,7 +369,7 @@ spec:
- cp
- /argocd-util
- /shared
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.4
name: copyutil
volumeMounts:
- mountPath: /shared

View File

@@ -243,6 +243,7 @@ func (a *ArgoCDServer) Run(ctx context.Context, port int) {
tlsConfig := tls.Config{
Certificates: []tls.Certificate{*a.settings.Certificate},
}
a.TLSConfigCustomizer(&tlsConfig)
tlsl = tls.NewListener(tlsl, &tlsConfig)
// Now, we build another mux recursively to match HTTPS and gRPC.

View File

@@ -122,6 +122,7 @@ func RefreshApp(appIf v1alpha1.ApplicationInterface, name string) (*argoappv1.Ap
}
// WaitForRefresh watches an application until its comparison timestamp is after the refresh timestamp
// If refresh timestamp is not present, will use current timestamp at time of call
func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *time.Duration) (*argoappv1.Application, error) {
ctx := context.Background()
var cancel context.CancelFunc
@@ -136,6 +137,7 @@ func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *t
return nil, err
}
defer watchIf.Stop()
now := time.Now().UTC()
for {
select {
@@ -161,6 +163,9 @@ func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *t
return nil, fmt.Errorf("Application event object failed conversion: %v", next)
}
refreshTimestampStr := app.ObjectMeta.Annotations[common.AnnotationKeyRefresh]
if refreshTimestampStr == "" {
refreshTimestampStr = now.String()
}
refreshTimestamp, err := time.Parse(time.RFC3339, refreshTimestampStr)
if err != nil {
return nil, fmt.Errorf("Unable to parse '%s': %v", common.AnnotationKeyRefresh, err)

View File

@@ -20,7 +20,6 @@ import (
"github.com/coreos/dex/api"
oidc "github.com/coreos/go-oidc"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"golang.org/x/oauth2"
"google.golang.org/grpc"
@@ -325,15 +324,9 @@ func (a *ClientApp) HandleCallback(w http.ResponseWriter, r *http.Request) {
return
}
idToken, err := a.verify(rawIDToken)
claims, err := a.sessionMgr.VerifyToken(rawIDToken)
if err != nil {
http.Error(w, fmt.Sprintf("Failed to verify ID token: %v", err), http.StatusInternalServerError)
return
}
var claims jwt.MapClaims
err = idToken.Claims(&claims)
if err != nil {
http.Error(w, fmt.Sprintf("Failed to unmarshal claims: %v", err), http.StatusInternalServerError)
http.Error(w, fmt.Sprintf("invalid session token: %v", err), http.StatusInternalServerError)
return
}
flags := []string{"path=/"}

View File

@@ -149,7 +149,7 @@ func (m *nativeGitClient) Checkout(revision string) error {
if _, err := m.runCmd("git", "checkout", "--force", revision); err != nil {
return err
}
if _, err := m.runCmd("git", "clean", "-fd"); err != nil {
if _, err := m.runCmd("git", "clean", "-fdx"); err != nil {
return err
}
return nil

View File

@@ -9,40 +9,47 @@ import (
extv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/apps"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"k8s.io/kubernetes/pkg/apis/apps"
)
// GetAppHealth returns the health of a k8s resource
func GetAppHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
var err error
var health *appv1.HealthStatus
switch obj.GetKind() {
case kube.DeploymentKind:
health, err = getDeploymentHealth(kubectl, obj)
case kube.ServiceKind:
health, err = getServiceHealth(kubectl, obj)
case kube.IngressKind:
health, err = getIngressHealth(kubectl, obj)
case kube.StatefulSetKind:
health, err = getStatefulSetHealth(kubectl, obj)
case kube.ReplicaSetKind:
health, err = getReplicaSetHealth(kubectl, obj)
case kube.DaemonSetKind:
health, err = getDaemonSetHealth(kubectl, obj)
case kube.PersistentVolumeClaimKind:
health, err = getPvcHealth(kubectl, obj)
default:
health = &appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
gvk := obj.GroupVersionKind()
switch gvk.Group {
case "apps", "extensions":
switch gvk.Kind {
case kube.DeploymentKind:
health, err = getDeploymentHealth(kubectl, obj)
case kube.IngressKind:
health, err = getIngressHealth(kubectl, obj)
case kube.StatefulSetKind:
health, err = getStatefulSetHealth(kubectl, obj)
case kube.ReplicaSetKind:
health, err = getReplicaSetHealth(kubectl, obj)
case kube.DaemonSetKind:
health, err = getDaemonSetHealth(kubectl, obj)
}
case "":
switch gvk.Kind {
case kube.ServiceKind:
health, err = getServiceHealth(kubectl, obj)
case kube.PersistentVolumeClaimKind:
health, err = getPVCHealth(kubectl, obj)
}
}
if err != nil {
health.Status = appv1.HealthStatusUnknown
health.StatusDetails = err.Error()
health = &appv1.HealthStatus{
Status: appv1.HealthStatusUnknown,
StatusDetails: err.Error(),
}
} else if health == nil {
health = &appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
}
return health, err
}
@@ -71,7 +78,7 @@ func IsWorse(current, new appv1.HealthStatusCode) bool {
return newIndex > currentIndex
}
func getPvcHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
func getPVCHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
obj, err := kubectl.ConvertToVersion(obj, "", "v1")
if err != nil {
return nil, err

View File

@@ -12,74 +12,45 @@ import (
"github.com/argoproj/argo-cd/util/kube"
)
func assertAppHealth(t *testing.T, yamlPath string, expectedStatus appv1.HealthStatusCode) {
yamlBytes, err := ioutil.ReadFile(yamlPath)
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, expectedStatus, health.Status)
}
func TestDeploymentHealth(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("../kube/testdata/nginx.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
}
func TestDeploymentProgressing(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/progressing.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusProgressing, health.Status)
}
func TestDeploymentDegraded(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/degraded.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusDegraded, health.Status)
assertAppHealth(t, "../kube/testdata/nginx.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/deployment-progressing.yaml", appv1.HealthStatusProgressing)
assertAppHealth(t, "./testdata/deployment-degraded.yaml", appv1.HealthStatusDegraded)
}
func TestStatefulSetHealth(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/statefulset.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
assertAppHealth(t, "./testdata/statefulset.yaml", appv1.HealthStatusHealthy)
}
func TestPvcHealthy(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/pvc-bound.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
func TestPVCHealth(t *testing.T) {
assertAppHealth(t, "./testdata/pvc-bound.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/pvc-pending.yaml", appv1.HealthStatusProgressing)
}
func TestPvcPending(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/pvc-pending.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusProgressing, health.Status)
func TestServiceHealth(t *testing.T) {
assertAppHealth(t, "./testdata/svc-clusterip.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/svc-loadbalancer.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/svc-loadbalancer-unassigned.yaml", appv1.HealthStatusProgressing)
}
func TestIngressHealth(t *testing.T) {
assertAppHealth(t, "./testdata/ingress.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/ingress-unassigned.yaml", appv1.HealthStatusProgressing)
}
func TestCRD(t *testing.T) {
// This ensures we do not try to compare only based on "Kind"
assertAppHealth(t, "./testdata/knative-service.yaml", appv1.HealthStatusHealthy)
}

View File

@@ -0,0 +1,24 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
creationTimestamp: 2018-09-20T06:47:27Z
generation: 9
name: argocd-server-ingress
namespace: argocd
resourceVersion: "23207680"
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
spec:
rules:
- host: example.argoproj.io
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
status:
loadBalancer: {}

26
util/health/testdata/ingress.yaml vendored Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
creationTimestamp: 2018-09-20T06:47:27Z
generation: 9
name: argocd-server-ingress
namespace: argocd
resourceVersion: "23207680"
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
spec:
rules:
- host: example.argoproj.io
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
status:
loadBalancer:
ingress:
- ip: 1.2.3.4

View File

@@ -0,0 +1,14 @@
apiVersion: serving.knative.dev/v1alpha1
kind: Service
metadata:
name: helloworld
spec:
runLatest:
configuration:
revisionTemplate:
spec:
container:
env:
- name: TARGET
value: world
image: helloworld:latest

25
util/health/testdata/svc-clusterip.yaml vendored Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"argocd-metrics","namespace":"argocd"},"spec":{"ports":[{"name":"http","port":8082,"protocol":"TCP","targetPort":8082}],"selector":{"app":"argocd-server"}}}
creationTimestamp: 2018-10-27T06:36:27Z
name: argocd-metrics
namespace: argocd
resourceVersion: "1131"
selfLink: /api/v1/namespaces/argocd/services/argocd-metrics
uid: a1f65069-d9b2-11e8-b3c1-9ae2f452bd03
spec:
clusterIP: 10.96.199.2
ports:
- name: http
port: 8082
protocol: TCP
targetPort: 8082
selector:
app: argocd-server
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: Service
metadata:
creationTimestamp: 2018-11-06T01:07:35Z
name: argo-artifacts
namespace: argo
resourceVersion: "346792"
selfLink: /api/v1/namespaces/argo/services/argo-artifacts
uid: 586f5e57-e160-11e8-b3c1-9ae2f452bd03
spec:
clusterIP: 10.105.70.181
externalTrafficPolicy: Cluster
ports:
- name: service
nodePort: 32667
port: 9000
protocol: TCP
targetPort: 9000
selector:
app: minio
release: argo-artifacts
sessionAffinity: None
type: LoadBalancer
status:
loadBalancer: {}

View File

@@ -0,0 +1,35 @@
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "600"
creationTimestamp: 2018-06-05T23:34:58Z
labels:
applications.argoproj.io/app-name: argocd-cdp
name: argocd-server
namespace: argocd
resourceVersion: "32559487"
selfLink: /api/v1/namespaces/argocd/services/argocd-server
uid: 0f5885a9-6919-11e8-ad29-020124679688
spec:
clusterIP: 100.69.46.185
externalTrafficPolicy: Cluster
ports:
- name: http
nodePort: 30354
port: 80
protocol: TCP
targetPort: 8080
- name: https
nodePort: 31866
port: 443
protocol: TCP
targetPort: 8080
selector:
app: argocd-server
sessionAffinity: None
type: LoadBalancer
status:
loadBalancer:
ingress:
- hostname: abc123.us-west-2.elb.amazonaws.com

View File

@@ -160,7 +160,29 @@ func (mgr *SessionManager) VerifyToken(tokenString string) (jwt.Claims, error) {
verifier := provider.Verifier(&oidc.Config{ClientID: claims.Audience})
idToken, err := verifier.Verify(context.Background(), tokenString)
if err != nil {
return nil, err
// HACK: if we failed token verification, it's possible the reason was because dex
// restarted and has new JWKS signing keys (we do not back dex with persistent storage
// so keys might be regenerated). Detect this by:
// 1. looking for the specific error message
// 2. re-initializing the OIDC provider
// 3. re-attempting token verification
// NOTE: the error message is sensitive to implementation of verifier.Verify()
if !strings.Contains(err.Error(), "failed to verify signature") {
return nil, err
}
provider, retryErr := mgr.initializeOIDCProvider()
if retryErr != nil {
// return original error if we fail to re-initialize OIDC
return nil, err
}
verifier = provider.Verifier(&oidc.Config{ClientID: claims.Audience})
idToken, err = verifier.Verify(context.Background(), tokenString)
if err != nil {
return nil, err
}
// If we get here, we successfully re-initialized OIDC and after re-initialization,
// the token is now valid.
log.Info("New OIDC settings detected")
}
var claims jwt.MapClaims
err = idToken.Claims(&claims)
@@ -168,6 +190,7 @@ func (mgr *SessionManager) VerifyToken(tokenString string) (jwt.Claims, error) {
}
}
// Username is a helper to extract a human readable username from a context
func Username(ctx context.Context) string {
claims, ok := ctx.Value("claims").(jwt.Claims)
if !ok {
@@ -194,8 +217,7 @@ func MakeCookieMetadata(key, value string, flags ...string) string {
return strings.Join(components, "; ")
}
// OIDCProvider lazily initializes and returns the OIDC provider, querying the well known oidc
// configuration path (http://example-argocd.com/api/dex/.well-known/openid-configuration).
// OIDCProvider lazily initializes, memoizes, and returns the OIDC provider.
// We have to initialize the provider lazily since ArgoCD is an OIDC client to itself, which
// presents a chicken-and-egg problem of (1) serving dex over HTTP, and (2) querying the OIDC
// provider (ourselves) to initialize the app.
@@ -203,6 +225,12 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
if mgr.provider != nil {
return mgr.provider, nil
}
return mgr.initializeOIDCProvider()
}
// initializeOIDCProvider re-initializes the OIDC provider, querying the well known oidc
// configuration path (http://example-argocd.com/api/dex/.well-known/openid-configuration)
func (mgr *SessionManager) initializeOIDCProvider() (*oidc.Provider, error) {
if !mgr.settings.IsSSOConfigured() {
return nil, fmt.Errorf("SSO is not configured")
}
@@ -213,7 +241,6 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
if err != nil {
return nil, fmt.Errorf("Failed to query provider %q: %v", issuerURL, err)
}
// Returns the scopes the provider supports
// See: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
var s struct {
@@ -223,24 +250,27 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
return nil, fmt.Errorf("Failed to parse provider scopes_supported: %v", err)
}
log.Infof("OpenID supported scopes: %v", s.ScopesSupported)
offlineAsScope := false
if len(s.ScopesSupported) == 0 {
// scopes_supported is a "RECOMMENDED" discovery claim, not a required
// one. If missing, assume that the provider follows the spec and has
// an "offline_access" scope.
mgr.offlineAsScope = true
offlineAsScope = true
} else {
// See if scopes_supported has the "offline_access" scope.
for _, scope := range s.ScopesSupported {
if scope == oidc.ScopeOfflineAccess {
mgr.offlineAsScope = true
offlineAsScope = true
break
}
}
}
mgr.provider = provider
mgr.offlineAsScope = offlineAsScope
return mgr.provider, nil
}
// OfflineAsScope returns whether or not the OIDC provider supports offline as a scope
func (mgr *SessionManager) OfflineAsScope() bool {
_, _ = mgr.OIDCProvider() // forces offlineAsScope to be determined
return mgr.offlineAsScope