feat(health): Adding more health checks for Keycloak, Postgres, Grafana, SolrCloud (#20294)

* feat(health): Add healtchcheck for acid.zalan.do/postgresql

Signed-off-by: Remo Zellmer <rze@vizrt.com>

* feat(health): Add healthcheck for grafana.integreatly.org/Grafana and GrafanaDatasource

Signed-off-by: Remo Zellmer <rze@vizrt.com>

* feat(health): Add healthcheck for k8s.keycloak.org/Keycloak

Signed-off-by: Remo Zellmer <rze@vizrt.com>

* feat(health): Add healthcheck for solr.apache.org/SolrCloud

Signed-off-by: Remo Zellmer <rze@vizrt.com>

---------

Signed-off-by: Remo Zellmer <rze@vizrt.com>
This commit is contained in:
rezellme
2024-10-09 17:12:09 +02:00
committed by GitHub
parent e1472f309a
commit 19d85aa9fb
28 changed files with 1230 additions and 0 deletions

View File

@@ -0,0 +1,30 @@
-- Waiting for status info => Progressing
if obj.status == nil or obj.status.PostgresClusterStatus == nil then
return {
status = "Progressing",
message = "Waiting for postgres cluster status...",
}
end
-- Running => Healthy
if obj.status.PostgresClusterStatus == "Running" then
return {
status = "Healthy",
message = obj.status.PostgresClusterStatus,
}
end
-- Creating/Updating => Progressing
if obj.status.PostgresClusterStatus == "Creating" or obj.status.PostgresClusterStatus == "Updating" then
return {
status = "Progressing",
message = obj.status.PostgresClusterStatus,
}
end
-- CreateFailed/UpdateFailed/SyncFailed/Invalid/etc => Degraded
-- See https://github.com/zalando/postgres-operator/blob/0745ce7c/pkg/apis/acid.zalan.do/v1/const.go#L4-L13
return {
status = "Degraded",
message = obj.status.PostgresClusterStatus,
}

View File

@@ -0,0 +1,17 @@
tests:
- healthStatus:
status: Progressing
message: "Waiting for postgres cluster status..."
inputPath: testdata/provisioning.yaml
- healthStatus:
status: Progressing
message: "Updating"
inputPath: testdata/progressing.yaml
- healthStatus:
status: Healthy
message: "Running"
inputPath: testdata/healthy.yaml
- healthStatus:
status: Degraded
message: "UpdateFailed"
inputPath: testdata/degraded.yaml

View File

@@ -0,0 +1,27 @@
apiVersion: acid.zalan.do/v1
kind: postgresql
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
creationTimestamp: '2024-10-07T09:06:07Z'
generation: 4
name: foobar-db
namespace: foo
resourceVersion: '242244'
uid: 741b63d5-8deb-45ef-af80-09d558d355a7
spec:
databases:
foobar: root
enableLogicalBackup: false
numberOfInstances: 1
postgresql:
parameters:
password_encryption: scram-sha-256
version: '15'
teamId: foobar
users: {}
volume:
size: 1Gi
status:
PostgresClusterStatus: UpdateFailed

View File

@@ -0,0 +1,27 @@
apiVersion: acid.zalan.do/v1
kind: postgresql
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
creationTimestamp: '2024-10-07T09:06:07Z'
generation: 4
name: foobar-db
namespace: foo
resourceVersion: '242244'
uid: 741b63d5-8deb-45ef-af80-09d558d355a7
spec:
databases:
foobar: root
enableLogicalBackup: false
numberOfInstances: 1
postgresql:
parameters:
password_encryption: scram-sha-256
version: '15'
teamId: foobar
users: {}
volume:
size: 1Gi
status:
PostgresClusterStatus: Running

View File

@@ -0,0 +1,27 @@
apiVersion: acid.zalan.do/v1
kind: postgresql
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
creationTimestamp: '2024-10-07T09:06:07Z'
generation: 4
name: foobar-db
namespace: foo
resourceVersion: '242244'
uid: 741b63d5-8deb-45ef-af80-09d558d355a7
spec:
databases:
foobar: root
enableLogicalBackup: false
numberOfInstances: 1
postgresql:
parameters:
password_encryption: scram-sha-256
version: '15'
teamId: foobar
users: {}
volume:
size: 1Gi
status:
PostgresClusterStatus: Updating

View File

@@ -0,0 +1,21 @@
apiVersion: acid.zalan.do/v1
kind: postgresql
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
name: foobar-db
namespace: foo
spec:
databases:
foobar: root
enableLogicalBackup: false
numberOfInstances: 1
postgresql:
parameters:
password_encryption: scram-sha-256
version: '15'
teamId: foobar
users: {}
volume:
size: 1Gi

View File

@@ -0,0 +1,30 @@
-- if no status info available yet, assume progressing
if obj.status == nil or obj.status.stageStatus == nil then
return {
status = "Progressing",
message = "Waiting for Grafana status info",
}
end
-- if last stage failed, we are stuck here
if obj.status.stageStatus == "failed" then
return {
status = "Degraded",
message = "Failed at stage " .. obj.status.stage,
}
end
-- only if "complete" stage was successful, Grafana can be considered healthy
if obj.status.stage == "complete" and obj.status.stageStatus == "success" then
return {
status = "Healthy",
message = "",
}
end
-- no final status yet, assume progressing
return {
status = "Progressing",
message = obj.status.stage,
}

View File

@@ -0,0 +1,17 @@
tests:
- healthStatus:
status: Progressing
message: "Waiting for Grafana status info"
inputPath: testdata/provisioning.yaml
- healthStatus:
status: Progressing
message: "deployment"
inputPath: testdata/progressing.yaml
- healthStatus:
status: Healthy
message: ""
inputPath: testdata/healthy.yaml
- healthStatus:
status: Degraded
message: "Failed at stage ingress"
inputPath: testdata/degraded.yaml

View File

@@ -0,0 +1,47 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: Grafana
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
creationTimestamp: '2024-10-07T08:46:00Z'
generation: 3
labels:
dashboards: grafana
folders: grafana
name: grafana
namespace: foo
resourceVersion: '343511'
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
spec:
deployment:
metadata: {}
spec:
template:
metadata: {}
spec:
containers:
- image: docker.io/grafana/grafana:11.1.4
name: grafana
volumeMounts:
- mountPath: /etc/ssl/certs/ca-certificates.crt
name: tls-ca-bundle
readOnly: true
subPath: tls-ca-bundle.pem
volumes:
- name: tls-ca-bundle
secret:
items:
- key: tls-ca-bundle.pem
path: tls-ca-bundle.pem
secretName: tls-ca-bundle-secret
version: 10.4.3
status:
adminUrl: http://grafana-service.foo:3000
dashboards:
- foo/dashboard-argocd/qPkgGHg7k
datasources:
- foo/cluster-local/927b3c23-e25f-4cbe-a82f-effbb0bbbf40
stage: ingress
stageStatus: failed
version: 11.1.4

View File

@@ -0,0 +1,47 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: Grafana
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
creationTimestamp: '2024-10-07T08:46:00Z'
generation: 3
labels:
dashboards: grafana
folders: grafana
name: grafana
namespace: foo
resourceVersion: '343511'
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
spec:
deployment:
metadata: {}
spec:
template:
metadata: {}
spec:
containers:
- image: docker.io/grafana/grafana:11.1.4
name: grafana
volumeMounts:
- mountPath: /etc/ssl/certs/ca-certificates.crt
name: tls-ca-bundle
readOnly: true
subPath: tls-ca-bundle.pem
volumes:
- name: tls-ca-bundle
secret:
items:
- key: tls-ca-bundle.pem
path: tls-ca-bundle.pem
secretName: tls-ca-bundle-secret
version: 10.4.3
status:
adminUrl: http://grafana-service.foo:3000
dashboards:
- foo/dashboard-argocd/qPkgGHg7k
datasources:
- foo/cluster-local/927b3c23-e25f-4cbe-a82f-effbb0bbbf40
stage: complete
stageStatus: success
version: 11.1.4

View File

@@ -0,0 +1,47 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: Grafana
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
creationTimestamp: '2024-10-07T08:46:00Z'
generation: 3
labels:
dashboards: grafana
folders: grafana
name: grafana
namespace: foo
resourceVersion: '343511'
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
spec:
deployment:
metadata: {}
spec:
template:
metadata: {}
spec:
containers:
- image: docker.io/grafana/grafana:11.1.4
name: grafana
volumeMounts:
- mountPath: /etc/ssl/certs/ca-certificates.crt
name: tls-ca-bundle
readOnly: true
subPath: tls-ca-bundle.pem
volumes:
- name: tls-ca-bundle
secret:
items:
- key: tls-ca-bundle.pem
path: tls-ca-bundle.pem
secretName: tls-ca-bundle-secret
version: 10.4.3
status:
adminUrl: http://grafana-service.foo:3000
dashboards:
- foo/dashboard-argocd/qPkgGHg7k
datasources:
- foo/cluster-local/927b3c23-e25f-4cbe-a82f-effbb0bbbf40
stage: deployment
stageStatus: success
version: 11.1.4

View File

@@ -0,0 +1,39 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: Grafana
metadata:
annotations:
argocd.argoproj.io/sync-wave: '1'
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
creationTimestamp: '2024-10-07T08:46:00Z'
generation: 3
labels:
dashboards: grafana
folders: grafana
name: grafana
namespace: foo
resourceVersion: '343511'
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
spec:
deployment:
metadata: {}
spec:
template:
metadata: {}
spec:
containers:
- image: docker.io/grafana/grafana:11.1.4
name: grafana
volumeMounts:
- mountPath: /etc/ssl/certs/ca-certificates.crt
name: tls-ca-bundle
readOnly: true
subPath: tls-ca-bundle.pem
volumes:
- name: tls-ca-bundle
secret:
items:
- key: tls-ca-bundle.pem
path: tls-ca-bundle.pem
secretName: tls-ca-bundle-secret
version: 10.4.3
status:

View File

@@ -0,0 +1,20 @@
-- if UID not yet created, we are progressing
if obj.status == nil or obj.status.uid == "" then
return {
status = "Progressing",
message = "",
}
end
-- NoMatchingInstances distinguishes if we are healthy or degraded
if obj.status.NoMatchingInstances then
return {
status = "Degraded",
message = "can't find matching grafana instance",
}
end
return {
status = "Healthy",
message = "",
}

View File

@@ -0,0 +1,13 @@
tests:
- healthStatus:
status: Progressing
message: ""
inputPath: testdata/progressing.yaml
- healthStatus:
status: Healthy
message: ""
inputPath: testdata/healthy.yaml
- healthStatus:
status: Degraded
message: "can't find matching grafana instance"
inputPath: testdata/degraded.yaml

View File

@@ -0,0 +1,42 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDatasource
metadata:
annotations:
argocd.argoproj.io/sync-wave: '3'
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/GrafanaDatasource:foo/cluster-local
creationTimestamp: '2024-10-07T09:37:21Z'
generation: 1
name: cluster-local
namespace: foo
resourceVersion: '356565'
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40
spec:
allowCrossNamespaceImport: true
datasource:
access: proxy
editable: true
isDefault: true
jsonData:
httpHeaderName1: Authorization
timeInterval: 5s
tlsSkipVerify: true
name: cluster-local
secureJsonData:
httpHeaderValue1: Bearer ${token}
type: prometheus
url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091
instanceSelector:
matchLabels:
dashboards: invalid-selector
resyncPeriod: 5m
valuesFrom:
- targetPath: secureJsonData.httpHeaderValue1
valueFrom:
secretKeyRef:
key: token
name: grafana-token
status:
NoMatchingInstances: true
hash: 56e40622b6a72563637b7c5f33c26d1ce87839dd5897a4a263fbd3d947f951cb
lastResync: '2024-10-09T10:30:40Z'
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40

View File

@@ -0,0 +1,41 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDatasource
metadata:
annotations:
argocd.argoproj.io/sync-wave: '3'
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/GrafanaDatasource:foo/cluster-local
creationTimestamp: '2024-10-07T09:37:21Z'
generation: 1
name: cluster-local
namespace: foo
resourceVersion: '356565'
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40
spec:
allowCrossNamespaceImport: true
datasource:
access: proxy
editable: true
isDefault: true
jsonData:
httpHeaderName1: Authorization
timeInterval: 5s
tlsSkipVerify: true
name: cluster-local
secureJsonData:
httpHeaderValue1: Bearer ${token}
type: prometheus
url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091
instanceSelector:
matchLabels:
dashboards: grafana
resyncPeriod: 5m
valuesFrom:
- targetPath: secureJsonData.httpHeaderValue1
valueFrom:
secretKeyRef:
key: token
name: grafana-token
status:
hash: 56e40622b6a72563637b7c5f33c26d1ce87839dd5897a4a263fbd3d947f951cb
lastResync: '2024-10-09T10:30:40Z'
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40

View File

@@ -0,0 +1,35 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDatasource
metadata:
annotations:
argocd.argoproj.io/sync-wave: '3'
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/GrafanaDatasource:foo/cluster-local
name: cluster-local
namespace: foo
spec:
allowCrossNamespaceImport: true
datasource:
access: proxy
editable: true
isDefault: true
jsonData:
httpHeaderName1: Authorization
timeInterval: 5s
tlsSkipVerify: true
name: cluster-local
secureJsonData:
httpHeaderValue1: Bearer ${token}
type: prometheus
url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091
instanceSelector:
matchLabels:
dashboards: grafana
resyncPeriod: 5m
valuesFrom:
- targetPath: secureJsonData.httpHeaderValue1
valueFrom:
secretKeyRef:
key: token
name: grafana-token
status:
uid: ""

View File

@@ -0,0 +1,32 @@
if obj.status == nil or obj.status.conditions == nil then
-- no status info available yet
return {
status = "Progressing",
message = "Waiting for Keycloak status conditions to exist",
}
end
-- Sort conditions by lastTransitionTime, from old to new.
table.sort(obj.status.conditions, function(a, b)
return a.lastTransitionTime < b.lastTransitionTime
end)
for _, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "True" then
return {
status = "Healthy",
message = "",
}
elseif condition.type == "HasErrors" and condition.status == "True" then
return {
status = "Degraded",
message = "Has Errors: " .. condition.message,
}
end
end
-- We couldn't find matching conditions yet, so assume progressing
return {
status = "Progressing",
message = "",
}

View File

@@ -0,0 +1,17 @@
tests:
- healthStatus:
status: Progressing
message: "Waiting for Keycloak status conditions to exist"
inputPath: testdata/provisioning.yaml
- healthStatus:
status: Progressing
message: ""
inputPath: testdata/progressing.yaml
- healthStatus:
status: Healthy
message: ""
inputPath: testdata/healthy.yaml
- healthStatus:
status: Degraded
message: "Has Errors: Waiting for foo/keycloak-1 due to CrashLoopBackOff: back-off 10s"
inputPath: testdata/degraded.yaml

View File

@@ -0,0 +1,73 @@
apiVersion: k8s.keycloak.org/v2alpha1
kind: Keycloak
metadata:
annotations:
argocd.argoproj.io/sync-wave: '2'
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
creationTimestamp: '2024-10-07T09:06:33Z'
generation: 4
name: keycloak
namespace: foo
resourceVersion: '343382'
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
spec:
db:
host: keycloak-db
passwordSecret:
key: password
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
usernameSecret:
key: username
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
vendor: postgres
hostname:
admin: https://keycloak.apps-crc.testing
hostname: keycloak.apps-crc.testing
http:
httpEnabled: false
tlsSecret: keycloak-tls
ingress:
enabled: false
instances: 2
unsupported:
podTemplate:
spec:
containers:
- env:
- name: KC_HTTPS_TRUST_STORE_FILE
value: /truststore/openshiftca.jks
- name: KC_HTTPS_TRUST_STORE_PASSWORD
value: OpenshiftCA
- name: KC_HTTPS_TRUST_STORE_TYPE
value: JKS
- name: KC_LOG_LEVEL
value: INFO
volumeMounts:
- mountPath: /truststore
name: truststore-volume
volumes:
- name: truststore-volume
secret:
secretName: keycloak-truststore
status:
conditions:
- lastTransitionTime: '2024-10-09T10:13:00.097073410Z'
message: Waiting for more replicas
observedGeneration: 5
status: 'False'
type: Ready
- lastTransitionTime: '2024-10-09T10:14:12.070548569Z'
message: >-
Waiting for foo/keycloak-1 due to CrashLoopBackOff: back-off 10s
observedGeneration: 5
status: 'True'
type: HasErrors
- lastTransitionTime: '2024-10-09T10:12:59.087234931Z'
message: Rolling out deployment update
observedGeneration: 5
status: 'True'
type: RollingUpdate
instances: 1
observedGeneration: 5
selector: >-
app=keycloak,app.kubernetes.io/managed-by=keycloak-operator,app.kubernetes.io/instance=keycloak

View File

@@ -0,0 +1,77 @@
apiVersion: k8s.keycloak.org/v2alpha1
kind: Keycloak
metadata:
annotations:
argocd.argoproj.io/sync-wave: '2'
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
creationTimestamp: '2024-10-07T09:06:33Z'
generation: 4
name: keycloak
namespace: foo
resourceVersion: '343382'
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
spec:
additionalOptions:
- name: proxy-headers
value: xforwarded
db:
host: keycloak-db
passwordSecret:
key: password
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
usernameSecret:
key: username
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
vendor: postgres
hostname:
admin: https://keycloak.apps-crc.testing
hostname: keycloak.apps-crc.testing
http:
httpEnabled: false
tlsSecret: keycloak-tls
ingress:
enabled: false
instances: 2
unsupported:
podTemplate:
spec:
containers:
- env:
- name: KC_HTTPS_TRUST_STORE_FILE
value: /truststore/openshiftca.jks
- name: KC_HTTPS_TRUST_STORE_PASSWORD
value: OpenshiftCA
- name: KC_HTTPS_TRUST_STORE_TYPE
value: JKS
- name: KC_LOG_LEVEL
value: INFO
volumeMounts:
- mountPath: /truststore
name: truststore-volume
volumes:
- name: truststore-volume
secret:
secretName: keycloak-truststore
status:
conditions:
- lastTransitionTime: '2024-10-09T09:55:28.695748046Z'
message: ''
observedGeneration: 4
status: 'True'
type: Ready
- lastTransitionTime: '2024-10-08T11:11:08.814752530Z'
message: >-
warning: You need to specify these fields as the first-class citizen of
the CR: proxy-headers
observedGeneration: 4
status: 'False'
type: HasErrors
- lastTransitionTime: '2024-10-09T09:47:33.600863636Z'
message: ''
observedGeneration: 4
status: 'False'
type: RollingUpdate
instances: 2
observedGeneration: 4
selector: >-
app=keycloak,app.kubernetes.io/managed-by=keycloak-operator,app.kubernetes.io/instance=keycloak

View File

@@ -0,0 +1,77 @@
apiVersion: k8s.keycloak.org/v2alpha1
kind: Keycloak
metadata:
annotations:
argocd.argoproj.io/sync-wave: '2'
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
creationTimestamp: '2024-10-07T09:06:33Z'
generation: 4
name: keycloak
namespace: foo
resourceVersion: '343382'
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
spec:
additionalOptions:
- name: proxy-headers
value: xforwarded
db:
host: keycloak-db
passwordSecret:
key: password
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
usernameSecret:
key: username
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
vendor: postgres
hostname:
admin: https://keycloak.apps-crc.testing
hostname: keycloak.apps-crc.testing
http:
httpEnabled: false
tlsSecret: keycloak-tls
ingress:
enabled: false
instances: 2
unsupported:
podTemplate:
spec:
containers:
- env:
- name: KC_HTTPS_TRUST_STORE_FILE
value: /truststore/openshiftca.jks
- name: KC_HTTPS_TRUST_STORE_PASSWORD
value: OpenshiftCA
- name: KC_HTTPS_TRUST_STORE_TYPE
value: JKS
- name: KC_LOG_LEVEL
value: INFO
volumeMounts:
- mountPath: /truststore
name: truststore-volume
volumes:
- name: truststore-volume
secret:
secretName: keycloak-truststore
status:
conditions:
- lastTransitionTime: '2024-10-09T10:13:00.097073410Z'
message: Waiting for more replicas
observedGeneration: 5
status: 'False'
type: Ready
- lastTransitionTime: '2024-10-08T11:11:08.814752530Z'
message: >-
warning: You need to specify these fields as the first-class citizen of
the CR: proxy-headers
observedGeneration: 5
status: 'False'
type: HasErrors
- lastTransitionTime: '2024-10-09T10:12:59.087234931Z'
message: Rolling out deployment update
observedGeneration: 5
status: 'True'
type: RollingUpdate
instances: 1
observedGeneration: 5
selector: >-
app=keycloak,app.kubernetes.io/managed-by=keycloak-operator,app.kubernetes.io/instance=keycloak

View File

@@ -0,0 +1,52 @@
apiVersion: k8s.keycloak.org/v2alpha1
kind: Keycloak
metadata:
annotations:
argocd.argoproj.io/sync-wave: '2'
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
creationTimestamp: '2024-10-07T09:06:33Z'
generation: 4
name: keycloak
namespace: foo
resourceVersion: '343382'
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
spec:
db:
host: keycloak-db
passwordSecret:
key: password
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
usernameSecret:
key: username
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
vendor: postgres
hostname:
admin: https://keycloak.apps-crc.testing
hostname: keycloak.apps-crc.testing
http:
httpEnabled: false
tlsSecret: keycloak-tls
ingress:
enabled: false
instances: 2
unsupported:
podTemplate:
spec:
containers:
- env:
- name: KC_HTTPS_TRUST_STORE_FILE
value: /truststore/openshiftca.jks
- name: KC_HTTPS_TRUST_STORE_PASSWORD
value: OpenshiftCA
- name: KC_HTTPS_TRUST_STORE_TYPE
value: JKS
- name: KC_LOG_LEVEL
value: INFO
volumeMounts:
- mountPath: /truststore
name: truststore-volume
volumes:
- name: truststore-volume
secret:
secretName: keycloak-truststore
status:

View File

@@ -0,0 +1,24 @@
-- There is no value in the manifest that can lead to conclude that
-- this resource is in a "Degraded" state. Update this, if in the future
-- this possibility arises.
if obj.status == nil or obj.status.solrNodes == nil then
return {
status = "Progressing",
message = "Waiting for solr to exist",
}
end
for _, solrNode in ipairs(obj.status.solrNodes) do
if not solrNode.ready then
return {
status = "Progressing",
message = "Not all replicas are ready",
}
end
end
return {
status = "Healthy",
message = "Solr is ready",
}

View File

@@ -0,0 +1,13 @@
tests:
- healthStatus:
status: Progressing
message: "Waiting for solr to exist"
inputPath: testdata/provisioning.yaml
- healthStatus:
status: Progressing
message: "Not all replicas are ready"
inputPath: testdata/progressing.yaml
- healthStatus:
status: Healthy
message: "Solr is ready"
inputPath: testdata/healthy.yaml

View File

@@ -0,0 +1,118 @@
apiVersion: solr.apache.org/v1beta1
kind: SolrCloud
metadata:
annotations:
argocd.argoproj.io/tracking-id: foobar-solr:solr.apache.org/SolrCloud:foo/solr
creationTimestamp: '2024-10-07T09:30:03Z'
finalizers:
- storage.finalizers.solr.apache.org
generation: 2
labels:
app.kubernetes.io/instance: foobar-solr
app.kubernetes.io/name: solr
app.kubernetes.io/version: 8.11.1
helm.sh/chart: solr-0.8.1
name: solr
namespace: foo
resourceVersion: '339148'
uid: 42f073e1-bf7c-4d2f-923a-66886898e6a2
spec:
availability:
podDisruptionBudget:
enabled: true
method: ClusterWide
busyBoxImage:
repository: library/busybox
tag: 1.28.0-glibc
customSolrKubeOptions:
podOptions:
defaultInitContainerResources: {}
nodeSelector:
node-role.kubernetes.io/worker: ''
podSecurityContext:
runAsGroup: 8983
runAsNonRoot: true
runAsUser: 8983
seccompProfile:
type: RuntimeDefault
resources: {}
serviceAccountName: solr-sa
startupProbe:
periodSeconds: 10
timeoutSeconds: 30
dataStorage:
persistent:
pvcTemplate:
metadata:
annotations:
foobar: solr-data
labels:
foobar: solr-data
name: solr-data
spec:
resources:
requests:
storage: 20Gi
reclaimPolicy: Delete
replicas: 1
scaling:
populatePodsOnScaleUp: true
vacatePodsOnScaleDown: true
solrAddressability:
commonServicePort: 80
podPort: 8983
solrImage:
repository: solr
tag: '8.11'
solrJavaMem: '-Xms1g -Xmx2g'
solrLogLevel: DEBUG
solrOpts: '-Dsolr.disable.shardsWhitelist=true'
updateStrategy:
managed: {}
method: Managed
zookeeperRef:
provided:
adminServerService: {}
chroot: /
clientService: {}
config: {}
headlessService: {}
image:
pullPolicy: IfNotPresent
repository: pravega/zookeeper
maxUnavailableReplicas: 1
persistence:
reclaimPolicy: Delete
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
replicas: 1
zookeeperPodPolicy:
resources: {}
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
status:
internalCommonAddress: http://solr-solrcloud-common.foo
podSelector: solr-cloud=solr,technology=solr-cloud
readyReplicas: 1
replicas: 1
solrNodes:
- internalAddress: http://solr-solrcloud-0.solr-solrcloud-headless.foo:8983
name: solr-solrcloud-0
nodeName: crc-j5m2n-master-0
ready: true
scheduledForDeletion: false
specUpToDate: true
version: '8.11'
upToDateNodes: 1
version: '8.11'
zookeeperConnectionInfo:
chroot: /
externalConnectionString: N/A
internalConnectionString: >-
solr-solrcloud-zookeeper-0.solr-solrcloud-zookeeper-headless.foo.svc.cluster.local:2181

View File

@@ -0,0 +1,125 @@
apiVersion: solr.apache.org/v1beta1
kind: SolrCloud
metadata:
annotations:
argocd.argoproj.io/tracking-id: foobar-solr:solr.apache.org/SolrCloud:foo/solr
creationTimestamp: '2024-10-07T09:30:03Z'
finalizers:
- storage.finalizers.solr.apache.org
generation: 2
labels:
app.kubernetes.io/instance: foobar-solr
app.kubernetes.io/name: solr
app.kubernetes.io/version: 8.11.1
helm.sh/chart: solr-0.8.1
name: solr
namespace: foo
resourceVersion: '339148'
uid: 42f073e1-bf7c-4d2f-923a-66886898e6a2
spec:
availability:
podDisruptionBudget:
enabled: true
method: ClusterWide
busyBoxImage:
repository: library/busybox
tag: 1.28.0-glibc
customSolrKubeOptions:
podOptions:
defaultInitContainerResources: {}
nodeSelector:
node-role.kubernetes.io/worker: ''
podSecurityContext:
runAsGroup: 8983
runAsNonRoot: true
runAsUser: 8983
seccompProfile:
type: RuntimeDefault
resources: {}
serviceAccountName: solr-sa
startupProbe:
periodSeconds: 10
timeoutSeconds: 30
dataStorage:
persistent:
pvcTemplate:
metadata:
annotations:
foobar: solr-data
labels:
foobar: solr-data
name: solr-data
spec:
resources:
requests:
storage: 20Gi
reclaimPolicy: Delete
replicas: 2
scaling:
populatePodsOnScaleUp: true
vacatePodsOnScaleDown: true
solrAddressability:
commonServicePort: 80
podPort: 8983
solrImage:
repository: solr
tag: '8.11'
solrJavaMem: '-Xms1g -Xmx2g'
solrLogLevel: DEBUG
solrOpts: '-Dsolr.disable.shardsWhitelist=true'
updateStrategy:
managed: {}
method: Managed
zookeeperRef:
provided:
adminServerService: {}
chroot: /
clientService: {}
config: {}
headlessService: {}
image:
pullPolicy: IfNotPresent
repository: pravega/zookeeper
maxUnavailableReplicas: 1
persistence:
reclaimPolicy: Delete
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
replicas: 1
zookeeperPodPolicy:
resources: {}
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
status:
internalCommonAddress: http://solr-solrcloud-common.foo
podSelector: solr-cloud=solr,technology=solr-cloud
readyReplicas: 1
replicas: 2
solrNodes:
- internalAddress: http://solr-solrcloud-0.solr-solrcloud-headless.foo:8983
name: solr-solrcloud-0
nodeName: crc-j5m2n-master-0
ready: true
scheduledForDeletion: false
specUpToDate: true
version: '8.11'
- internalAddress: http://solr-solrcloud-1.solr-solrcloud-headless.foo:8983
name: solr-solrcloud-1
nodeName: ''
ready: false
scheduledForDeletion: false
specUpToDate: true
version: ''
upToDateNodes: 2
version: '8.11'
zookeeperConnectionInfo:
chroot: /
externalConnectionString: N/A
internalConnectionString: >-
solr-solrcloud-zookeeper-0.solr-solrcloud-zookeeper-headless.foo.svc.cluster.local:2181

View File

@@ -0,0 +1,95 @@
apiVersion: solr.apache.org/v1beta1
kind: SolrCloud
metadata:
annotations:
argocd.argoproj.io/tracking-id: foobar-solr:solr.apache.org/SolrCloud:foo/solr
finalizers:
- storage.finalizers.solr.apache.org
labels:
app.kubernetes.io/instance: foobar-solr
app.kubernetes.io/name: solr
app.kubernetes.io/version: 8.11.1
helm.sh/chart: solr-0.8.1
name: solr
namespace: foo
spec:
availability:
podDisruptionBudget:
enabled: true
method: ClusterWide
busyBoxImage:
repository: library/busybox
tag: 1.28.0-glibc
customSolrKubeOptions:
podOptions:
defaultInitContainerResources: {}
nodeSelector:
node-role.kubernetes.io/worker: ''
podSecurityContext:
runAsGroup: 8983
runAsNonRoot: true
runAsUser: 8983
seccompProfile:
type: RuntimeDefault
resources: {}
serviceAccountName: solr-sa
startupProbe:
periodSeconds: 10
timeoutSeconds: 30
dataStorage:
persistent:
pvcTemplate:
metadata:
annotations:
foobar: solr-data
labels:
foobar: solr-data
name: solr-data
spec:
resources:
requests:
storage: 20Gi
reclaimPolicy: Delete
replicas: 1
scaling:
populatePodsOnScaleUp: true
vacatePodsOnScaleDown: true
solrAddressability:
commonServicePort: 80
podPort: 8983
solrImage:
repository: solr
tag: '8.11'
solrJavaMem: '-Xms1g -Xmx2g'
solrLogLevel: DEBUG
solrOpts: '-Dsolr.disable.shardsWhitelist=true'
updateStrategy:
managed: {}
method: Managed
zookeeperRef:
provided:
adminServerService: {}
chroot: /
clientService: {}
config: {}
headlessService: {}
image:
pullPolicy: IfNotPresent
repository: pravega/zookeeper
maxUnavailableReplicas: 1
persistence:
reclaimPolicy: Delete
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
replicas: 1
zookeeperPodPolicy:
resources: {}
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
status: