feat(actions): Add cloudnativepg reload, restart, promote, suspend and resume actions (#24192)

Signed-off-by: Rouke Broersma <mobrockers@gmail.com>
Signed-off-by: Rouke Broersma <rouke.broersma@infosupport.com>
This commit is contained in:
Rouke Broersma
2025-12-10 12:15:08 +01:00
committed by GitHub
parent 2c6edd819f
commit e50dd008fd
17 changed files with 574 additions and 42 deletions

View File

@@ -64,6 +64,11 @@
- [numaplane.numaproj.io/PipelineRollout/pause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/pause/action.lua) - [numaplane.numaproj.io/PipelineRollout/pause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/pause/action.lua)
- [numaplane.numaproj.io/PipelineRollout/unpause-fast](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/unpause-fast/action.lua) - [numaplane.numaproj.io/PipelineRollout/unpause-fast](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/unpause-fast/action.lua)
- [numaplane.numaproj.io/PipelineRollout/unpause-gradual](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/unpause-gradual/action.lua) - [numaplane.numaproj.io/PipelineRollout/unpause-gradual](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/unpause-gradual/action.lua)
- [postgresql.cnpg.io/Cluster/promote](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/postgresql.cnpg.io/Cluster/actions/promote/action.lua)
- [postgresql.cnpg.io/Cluster/reload](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/postgresql.cnpg.io/Cluster/actions/reload/action.lua)
- [postgresql.cnpg.io/Cluster/restart](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/postgresql.cnpg.io/Cluster/actions/restart/action.lua)
- [postgresql.cnpg.io/Cluster/resume](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/postgresql.cnpg.io/Cluster/actions/resume/action.lua)
- [postgresql.cnpg.io/Cluster/suspend](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/postgresql.cnpg.io/Cluster/actions/suspend/action.lua)
- [promoter.argoproj.io/PullRequest/merge](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/promoter.argoproj.io/PullRequest/actions/merge/action.lua) - [promoter.argoproj.io/PullRequest/merge](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/promoter.argoproj.io/PullRequest/actions/merge/action.lua)
- [source.toolkit.fluxcd.io/Bucket/reconcile](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/source.toolkit.fluxcd.io/Bucket/actions/reconcile/action.lua) - [source.toolkit.fluxcd.io/Bucket/reconcile](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/source.toolkit.fluxcd.io/Bucket/actions/reconcile/action.lua)
- [source.toolkit.fluxcd.io/Bucket/resume](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/source.toolkit.fluxcd.io/Bucket/actions/resume/action.lua) - [source.toolkit.fluxcd.io/Bucket/resume](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/source.toolkit.fluxcd.io/Bucket/actions/resume/action.lua)

View File

@@ -0,0 +1,33 @@
actionTests:
- action: promote
inputPath: testdata/cluster_healthy.yaml
expectedOutputPath: testdata/cluster_promoting.yaml
parameters:
instance: 'any'
- action: promote
inputPath: testdata/cluster_healthy.yaml
expectedOutputPath: testdata/cluster_promoting.yaml
parameters:
instance: '2'
- action: promote
inputPath: testdata/cluster_healthy.yaml
expectedOutputPath: testdata/cluster_promoting.yaml
parameters:
instance: 'cluster-example-2'
- action: promote
inputPath: testdata/cluster_healthy.yaml
expectedErrorMessage: 'Could not find a healthy instance matching the criteria: nonexistent-instance'
parameters:
instance: 'nonexistent-instance'
- action: reload
inputPath: testdata/cluster_healthy.yaml
expectedOutputPath: testdata/cluster_reload.yaml
- action: restart
inputPath: testdata/cluster_healthy.yaml
expectedOutputPath: testdata/cluster_restart.yaml
- action: suspend
inputPath: testdata/cluster_healthy.yaml
expectedOutputPath: testdata/cluster_reconcile_suspended.yaml
- action: resume
inputPath: testdata/cluster_reconcile_suspended.yaml
expectedOutputPath: testdata/cluster_healthy.yaml

View File

@@ -0,0 +1,41 @@
local actions = {}
actions["restart"] = {
["iconClass"] = "fa fa-fw fa-recycle",
["displayName"] = "Rollout restart Cluster"
}
actions["reload"] = {
["iconClass"] = "fa fa-fw fa-rotate-right",
["displayName"] = "Reload all Configuration"
}
actions["promote"] = {
["iconClass"] = "fa fa-fw fa-angles-up",
["displayName"] = "Promote Replica to Primary",
["disabled"] = (not obj.status.instancesStatus or not obj.status.instancesStatus.healthy or #obj.status.instancesStatus.healthy < 2),
["params"] = {
{
["name"] = "instance",
["default"] = "any"
}
}
}
-- Check if reconciliation is currently suspended
local isSuspended = false
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliation"] == "disabled" then
isSuspended = true
end
-- Add suspend/resume actions based on current state
if isSuspended then
actions["resume"] = {
["iconClass"] = "fa fa-fw fa-play",
["displayName"] = "Resume Reconciliation"
}
else
actions["suspend"] = {
["iconClass"] = "fa fa-fw fa-pause",
["displayName"] = "Suspend Reconciliation"
}
end
return actions

View File

@@ -0,0 +1,50 @@
local os = require("os")
local instance = actionParams["instance"]
local healthy = obj.status.instancesStatus.healthy
local selected = nil
if instance == "any" then
-- Select next healthy instance after currentPrimary
local nextIndex = 0
for index, node in ipairs(healthy) do
if node == obj.status.currentPrimary then
nextIndex = index + 1
if nextIndex > #healthy then
nextIndex = 1
end
break
end
end
if nextIndex > 0 then
selected = healthy[nextIndex]
elseif #healthy > 0 then
selected = healthy[1] -- fallback to first healthy if current primary not healthy
end
elseif type(instance) == "string" and tonumber(instance) then
-- Select by instance number
local wanted = (obj.metadata and obj.metadata.name or "") .. "-" .. instance
for _, node in ipairs(healthy or {}) do
if node == wanted then
selected = node
break
end
end
elseif type(instance) == "string" then
-- Select by full name
for _, node in ipairs(healthy) do
if node == instance then
selected = node
break
end
end
end
if selected then
obj.status.targetPrimary = selected
obj.status.targetPrimaryTimestamp = os.date("!%Y-%m-%dT%XZ")
obj.status.phase = "Switchover in progress"
obj.status.phaseReason = "Switching over to " .. selected
else
error("Could not find a healthy instance matching the criteria: " .. instance, 0)
end
return obj

View File

@@ -0,0 +1,10 @@
local os = require("os")
if obj.metadata == nil then
obj.metadata = {}
end
if obj.metadata.annotations == nil then
obj.metadata.annotations = {}
end
obj.metadata.annotations["cnpg.io/reloadedAt"] = os.date("!%Y-%m-%dT%XZ")
return obj

View File

@@ -0,0 +1,10 @@
local os = require("os")
if obj.metadata == nil then
obj.metadata = {}
end
if obj.metadata.annotations == nil then
obj.metadata.annotations = {}
end
obj.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ")
return obj

View File

@@ -0,0 +1,10 @@
if obj.metadata == nil then
obj.metadata = {}
end
if obj.metadata.annotations == nil then
obj.metadata.annotations = {}
end
obj.metadata.annotations["cnpg.io/reconciliation"] = nil
return obj

View File

@@ -0,0 +1,10 @@
if obj.metadata == nil then
obj.metadata = {}
end
if obj.metadata.annotations == nil then
obj.metadata.annotations = {}
end
obj.metadata.annotations["cnpg.io/reconciliation"] = "disabled"
return obj

View File

@@ -0,0 +1,23 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
creationTimestamp: "2025-04-25T20:44:24Z"
generation: 1
name: cluster-example
namespace: default
resourceVersion: "20230"
uid: 987fe1ba-bba7-4021-9d25-f06ca9a8c0d2
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:13
instances: 3
status:
currentPrimary: cluster-example-1
currentPrimaryTimestamp: "2025-04-25T20:44:38.190232Z"
instancesStatus:
healthy:
- cluster-example-1
- cluster-example-2
- cluster-example-3
phase: Cluster in healthy state
targetPrimary: cluster-example-1
targetPrimaryTimestamp: "2025-04-25T20:44:26.214164Z"

View File

@@ -0,0 +1,24 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
creationTimestamp: "2025-04-25T20:44:24Z"
generation: 1
name: cluster-example
namespace: default
resourceVersion: "20230"
uid: 987fe1ba-bba7-4021-9d25-f06ca9a8c0d2
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:13
instances: 3
status:
currentPrimary: cluster-example-1
currentPrimaryTimestamp: "2025-04-25T20:44:38.190232Z"
instancesStatus:
healthy:
- cluster-example-1
- cluster-example-2
- cluster-example-3
phase: Switchover in progress
phaseReason: Switching over to cluster-example-2
targetPrimary: cluster-example-2
targetPrimaryTimestamp: "0001-01-01T00:00:00Z"

View File

@@ -0,0 +1,23 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
creationTimestamp: "2025-04-25T20:44:24Z"
generation: 1
name: cluster-example
namespace: default
resourceVersion: "20230"
uid: 987fe1ba-bba7-4021-9d25-f06ca9a8c0d2
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:13
instances: 3
status:
currentPrimary: cluster-example-1
currentPrimaryTimestamp: "2025-04-25T20:44:38.190232Z"
instancesStatus:
healthy:
- cluster-example-1
- cluster-example-2
- cluster-example-3
phase: Cluster in healthy state
targetPrimary: cluster-example-1
targetPrimaryTimestamp: "2025-04-25T20:44:26.214164Z"

View File

@@ -0,0 +1,25 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
annotations:
cnpg.io/reloadedAt: "0001-01-01T00:00:00Z"
creationTimestamp: "2025-04-25T20:44:24Z"
generation: 1
name: cluster-example
namespace: default
resourceVersion: "20230"
uid: 987fe1ba-bba7-4021-9d25-f06ca9a8c0d2
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:13
instances: 3
status:
currentPrimary: cluster-example-1
currentPrimaryTimestamp: "2025-04-25T20:44:38.190232Z"
instancesStatus:
healthy:
- cluster-example-1
- cluster-example-2
- cluster-example-3
phase: Cluster in healthy state
targetPrimary: cluster-example-1
targetPrimaryTimestamp: "2025-04-25T20:44:26.214164Z"

View File

@@ -0,0 +1,25 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z"
creationTimestamp: "2025-04-25T20:44:24Z"
generation: 1
name: cluster-example
namespace: default
resourceVersion: "20230"
uid: 987fe1ba-bba7-4021-9d25-f06ca9a8c0d2
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:13
instances: 3
status:
currentPrimary: cluster-example-1
currentPrimaryTimestamp: "2025-04-25T20:44:38.190232Z"
instancesStatus:
healthy:
- cluster-example-1
- cluster-example-2
- cluster-example-3
phase: Cluster in healthy state
targetPrimary: cluster-example-1
targetPrimaryTimestamp: "2025-04-25T20:44:26.214164Z"

View File

@@ -3,7 +3,6 @@ local hs = {}
local cnpgStatus = { local cnpgStatus = {
["Cluster in healthy state"] = "Healthy", ["Cluster in healthy state"] = "Healthy",
["Setting up primary"] = "Progressing", ["Setting up primary"] = "Progressing",
["Setting up primary"] = "Progressing",
["Creating a new replica"] = "Progressing", ["Creating a new replica"] = "Progressing",
["Upgrading cluster"] = "Progressing", ["Upgrading cluster"] = "Progressing",
["Waiting for the instances to become active"] = "Progressing", ["Waiting for the instances to become active"] = "Progressing",
@@ -33,6 +32,13 @@ function hibernating(obj)
return nil return nil
end end
-- Check if reconciliation is suspended, since this is an explicit user action we return the "suspended" status immediately
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliation"] == "disabled" then
hs.status = "Suspended"
hs.message = "Cluster reconciliation is suspended"
return hs
end
if obj.status ~= nil and obj.status.conditions ~= nil then if obj.status ~= nil and obj.status.conditions ~= nil then
local hibernation = hibernating(obj) local hibernation = hibernating(obj)
if hibernation ~= nil then if hibernation ~= nil then

View File

@@ -15,3 +15,7 @@ tests:
status: Degraded status: Degraded
message: "Initiating a failover from cluster-example-2" message: "Initiating a failover from cluster-example-2"
inputPath: testdata/cluster_degraded.yaml inputPath: testdata/cluster_degraded.yaml
- healthStatus:
status: Suspended
message: "Cluster reconciliation is suspended"
inputPath: testdata/cluster_reconcile_suspended.yaml

View File

@@ -0,0 +1,178 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
annotations:
cnpg.io/reconciliation: "disabled"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"postgresql.cnpg.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"imageName":"ghcr.io/cloudnative-pg/postgresql:13","instances":3,"storage":{"size":"1Gi"}}}
creationTimestamp: "2025-04-25T20:44:24Z"
generation: 1
name: cluster-example
namespace: default
resourceVersion: "20230"
uid: 987fe1ba-bba7-4021-9d25-f06ca9a8c0d2
spec:
affinity:
podAntiAffinityType: preferred
bootstrap:
initdb:
database: app
encoding: UTF8
localeCType: C
localeCollate: C
owner: app
enablePDB: true
enableSuperuserAccess: false
failoverDelay: 0
imageName: ghcr.io/cloudnative-pg/postgresql:13
instances: 3
logLevel: info
maxSyncReplicas: 0
minSyncReplicas: 0
monitoring:
customQueriesConfigMap:
- key: queries
name: cnpg-default-monitoring
disableDefaultQueries: false
enablePodMonitor: false
postgresGID: 26
postgresUID: 26
postgresql:
parameters:
archive_mode: "on"
archive_timeout: 5min
dynamic_shared_memory_type: posix
full_page_writes: "on"
log_destination: csvlog
log_directory: /controller/log
log_filename: postgres
log_rotation_age: "0"
log_rotation_size: "0"
log_truncate_on_rotation: "false"
logging_collector: "on"
max_parallel_workers: "32"
max_replication_slots: "32"
max_worker_processes: "32"
shared_memory_type: mmap
shared_preload_libraries: ""
ssl_max_protocol_version: TLSv1.3
ssl_min_protocol_version: TLSv1.3
wal_keep_size: 512MB
wal_level: logical
wal_log_hints: "on"
wal_receiver_timeout: 5s
wal_sender_timeout: 5s
syncReplicaElectionConstraint:
enabled: false
primaryUpdateMethod: restart
primaryUpdateStrategy: unsupervised
replicationSlots:
highAvailability:
enabled: true
slotPrefix: _cnpg_
synchronizeReplicas:
enabled: true
updateInterval: 30
resources: {}
smartShutdownTimeout: 180
startDelay: 3600
stopDelay: 1800
storage:
resizeInUseVolumes: true
size: 1Gi
switchoverDelay: 3600
status:
availableArchitectures:
- goArch: amd64
hash: d54839c128b2b38034c6f73006b2a979d916c9715cda5d59a1241018cc44904e
certificates:
clientCASecret: cluster-example-ca
expirations:
cluster-example-ca: 2025-07-24 20:39:24 +0000 UTC
cluster-example-replication: 2025-07-24 20:39:25 +0000 UTC
cluster-example-server: 2025-07-24 20:39:25 +0000 UTC
replicationTLSSecret: cluster-example-replication
serverAltDNSNames:
- cluster-example-rw
- cluster-example-rw.default
- cluster-example-rw.default.svc
- cluster-example-rw.default.svc.cluster.local
- cluster-example-r
- cluster-example-r.default
- cluster-example-r.default.svc
- cluster-example-r.default.svc.cluster.local
- cluster-example-ro
- cluster-example-ro.default
- cluster-example-ro.default.svc
- cluster-example-ro.default.svc.cluster.local
serverCASecret: cluster-example-ca
serverTLSSecret: cluster-example-server
cloudNativePGCommitHash: 005e82a17
cloudNativePGOperatorHash: d54839c128b2b38034c6f73006b2a979d916c9715cda5d59a1241018cc44904e
conditions:
- lastTransitionTime: "2025-04-25T20:45:33Z"
message: Cluster is Ready
reason: ClusterIsReady
status: "True"
type: Ready
- lastTransitionTime: "2025-04-25T20:44:53Z"
message: Continuous archiving is working
reason: ContinuousArchivingSuccess
status: "True"
type: ContinuousArchiving
configMapResourceVersion:
metrics:
cnpg-default-monitoring: "19786"
currentPrimary: cluster-example-1
currentPrimaryTimestamp: "2025-04-25T20:44:38.190232Z"
healthyPVC:
- cluster-example-1
- cluster-example-2
- cluster-example-3
image: ghcr.io/cloudnative-pg/postgresql:13
instanceNames:
- cluster-example-1
- cluster-example-2
- cluster-example-3
instances: 3
instancesReportedState:
cluster-example-1:
isPrimary: true
timeLineID: 1
cluster-example-2:
isPrimary: false
timeLineID: 1
cluster-example-3:
isPrimary: false
timeLineID: 1
instancesStatus:
healthy:
- cluster-example-1
- cluster-example-2
- cluster-example-3
latestGeneratedNode: 3
managedRolesStatus: {}
phase: Cluster in healthy state
poolerIntegrations:
pgBouncerIntegration: {}
pvcCount: 3
readService: cluster-example-r
readyInstances: 3
secretsResourceVersion:
applicationSecretVersion: "19755"
clientCaSecretVersion: "19752"
replicationSecretVersion: "19754"
serverCaSecretVersion: "19752"
serverSecretVersion: "19753"
switchReplicaClusterStatus: {}
targetPrimary: cluster-example-1
targetPrimaryTimestamp: "2025-04-25T20:44:26.214164Z"
timelineID: 1
topology:
instances:
cluster-example-1: {}
cluster-example-2: {}
cluster-example-3: {}
nodesUsed: 3
successfullyExtracted: true
writeService: cluster-example-rw

View File

@@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
"github.com/argoproj/gitops-engine/pkg/diff" "github.com/argoproj/gitops-engine/pkg/diff"
@@ -26,61 +27,115 @@ func (t testNormalizer) Normalize(un *unstructured.Unstructured) error {
if un == nil { if un == nil {
return nil return nil
} }
switch un.GetKind() { // Disambiguate resources by apiVersion group to avoid collisions on Kind names
case "Job": gv, err := schema.ParseGroupVersion(un.GetAPIVersion())
return t.normalizeJob(un) if err != nil {
case "DaemonSet", "Deployment", "StatefulSet": return fmt.Errorf("failed to parse apiVersion for %s: %w", un.GetKind(), err)
err := unstructured.SetNestedStringMap(un.Object, map[string]string{"kubectl.kubernetes.io/restartedAt": "0001-01-01T00:00:00Z"}, "spec", "template", "metadata", "annotations")
if err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
switch un.GetKind() { group := gv.Group
case "Deployment": // First, group-specific, then kind-specific normalization
err := unstructured.SetNestedField(un.Object, nil, "status") switch group {
if err != nil { case "batch":
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err) if un.GetKind() == "Job" {
return t.normalizeJob(un)
} }
err = unstructured.SetNestedField(un.Object, nil, "metadata", "creationTimestamp") case "apps":
if err != nil { switch un.GetKind() {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err) case "DaemonSet", "Deployment", "StatefulSet":
if err := setRestartedAtAnnotationOnPodTemplate(un); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
err = unstructured.SetNestedField(un.Object, nil, "metadata", "generation") if un.GetKind() == "Deployment" {
if err != nil { if err := unstructured.SetNestedField(un.Object, nil, "status"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err) return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
if err := unstructured.SetNestedField(un.Object, nil, "metadata", "creationTimestamp"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
if err := unstructured.SetNestedField(un.Object, nil, "metadata", "generation"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
case "Rollout": case "argoproj.io":
err := unstructured.SetNestedField(un.Object, nil, "spec", "restartAt") switch un.GetKind() {
if err != nil { case "Rollout":
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err) if err := unstructured.SetNestedField(un.Object, nil, "spec", "restartAt"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
case "Workflow":
if err := unstructured.SetNestedField(un.Object, nil, "metadata", "resourceVersion"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
if err := unstructured.SetNestedField(un.Object, nil, "metadata", "uid"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
if err := unstructured.SetNestedField(un.Object, nil, "metadata", "annotations", "workflows.argoproj.io/scheduled-time"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
case "ExternalSecret", "PushSecret": case "external-secrets.io":
err := unstructured.SetNestedStringMap(un.Object, map[string]string{"force-sync": "0001-01-01T00:00:00Z"}, "metadata", "annotations") switch un.GetKind() {
if err != nil { case "ExternalSecret", "PushSecret":
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err) if err := unstructured.SetNestedStringMap(un.Object, map[string]string{"force-sync": "0001-01-01T00:00:00Z"}, "metadata", "annotations"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
case "Workflow": case "postgresql.cnpg.io":
err := unstructured.SetNestedField(un.Object, nil, "metadata", "resourceVersion") if un.GetKind() == "Cluster" {
if err != nil { if err := unstructured.SetNestedStringMap(un.Object, map[string]string{"cnpg.io/reloadedAt": "0001-01-01T00:00:00Z", "kubectl.kubernetes.io/restartedAt": "0001-01-01T00:00:00Z"}, "metadata", "annotations"); err != nil {
return fmt.Errorf("failed to normalize Rollout: %w", err) return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
if err := unstructured.SetNestedField(un.Object, nil, "status", "targetPrimaryTimestamp"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
err = unstructured.SetNestedField(un.Object, nil, "metadata", "uid") case "helm.toolkit.fluxcd.io":
if err != nil { if un.GetKind() == "HelmRelease" {
return fmt.Errorf("failed to normalize Rollout: %w", err) if err := setFluxRequestedAtAnnotation(un); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
err = unstructured.SetNestedField(un.Object, nil, "metadata", "annotations", "workflows.argoproj.io/scheduled-time") case "source.toolkit.fluxcd.io":
if err != nil { switch un.GetKind() {
return fmt.Errorf("failed to normalize Rollout: %w", err) case "Bucket", "GitRepository", "HelmChart", "HelmRepository", "OCIRepository":
if err := setFluxRequestedAtAnnotation(un); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
case "HelmRelease", "ImageRepository", "ImageUpdateAutomation", "Kustomization", "Receiver", "Bucket", "GitRepository", "HelmChart", "HelmRepository", "OCIRepository": case "image.toolkit.fluxcd.io":
err := unstructured.SetNestedStringMap(un.Object, map[string]string{"reconcile.fluxcd.io/requestedAt": "By Argo CD at: 0001-01-01T00:00:00"}, "metadata", "annotations") switch un.GetKind() {
if err != nil { case "ImageRepository", "ImageUpdateAutomation":
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err) if err := setFluxRequestedAtAnnotation(un); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
}
case "kustomize.toolkit.fluxcd.io":
if un.GetKind() == "Kustomization" {
if err := setFluxRequestedAtAnnotation(un); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
}
case "notification.toolkit.fluxcd.io":
if un.GetKind() == "Receiver" {
if err := setFluxRequestedAtAnnotation(un); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
} }
} }
return nil return nil
} }
// Helper: normalize restart annotation on pod template used by apps workloads
func setRestartedAtAnnotationOnPodTemplate(un *unstructured.Unstructured) error {
return unstructured.SetNestedStringMap(un.Object, map[string]string{"kubectl.kubernetes.io/restartedAt": "0001-01-01T00:00:00Z"}, "spec", "template", "metadata", "annotations")
}
// Helper: normalize Flux requestedAt annotation across FluxCD kinds
func setFluxRequestedAtAnnotation(un *unstructured.Unstructured) error {
return unstructured.SetNestedStringMap(un.Object, map[string]string{"reconcile.fluxcd.io/requestedAt": "By Argo CD at: 0001-01-01T00:00:00"}, "metadata", "annotations")
}
func (t testNormalizer) normalizeJob(un *unstructured.Unstructured) error { func (t testNormalizer) normalizeJob(un *unstructured.Unstructured) error {
if conditions, exist, err := unstructured.NestedSlice(un.Object, "status", "conditions"); err != nil { if conditions, exist, err := unstructured.NestedSlice(un.Object, "status", "conditions"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err) return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)