mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-03-05 16:08:49 +01:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b6251dfed | ||
|
|
8f903c3a11 | ||
|
|
8d0dde1388 | ||
|
|
784f62ca6d | ||
|
|
33b5043405 | ||
|
|
88fe638aff | ||
|
|
a29703877e | ||
|
|
95e7cdb16f | ||
|
|
122f4db3db | ||
|
|
2d65b26420 | ||
|
|
0ace9bb9a3 | ||
|
|
6398ec3dcb | ||
|
|
732b16fb2a | ||
|
|
024c7e6020 | ||
|
|
26b7fb2c61 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -20,6 +20,7 @@ node_modules/
|
||||
.kube/
|
||||
./test/cmp/*.sock
|
||||
.envrc.remote
|
||||
.mirrord/
|
||||
.*.swp
|
||||
rerunreport.txt
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@ const (
|
||||
var defaultPreservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
argov1alpha1.AnnotationKeyHydrate,
|
||||
}
|
||||
|
||||
type deleteInOrder struct {
|
||||
|
||||
@@ -589,6 +589,72 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ensure that hydrate annotation is preserved from an existing app",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
Annotations: map[string]string{
|
||||
"annot-key": "annot-value",
|
||||
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
Annotations: map[string]string{
|
||||
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ensure that configured preserved annotations are preserved from an existing app",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
|
||||
@@ -18,9 +18,11 @@ These are the upcoming releases dates:
|
||||
| v2.13 | Monday, Sep. 16, 2024 | Monday, Nov. 4, 2024 | [Regina Voloshin](https://github.com/reggie-k) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/19513) |
|
||||
| v2.14 | Monday, Dec. 16, 2024 | Monday, Feb. 3, 2025 | [Ryan Umstead](https://github.com/rumstead) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/20869) |
|
||||
| v3.0 | Monday, Mar. 17, 2025 | Tuesday, May 6, 2025 | [Regina Voloshin](https://github.com/reggie-k) | | [checklist](https://github.com/argoproj/argo-cd/issues/21735) |
|
||||
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](#) |
|
||||
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | | [checklist](#) |
|
||||
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | | |
|
||||
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](https://github.com/argoproj/argo-cd/issues/23347) |
|
||||
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | [Michael Crenshaw](https://github.com/crenshaw-dev) | [checklist](https://github.com/argoproj/argo-cd/issues/24539) |
|
||||
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | [Peter Jiang](https://github.com/pjiang-dev) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/25211) |
|
||||
| v3.4 | Monday, Mar. 16, 2026 | Monday, May. 4, 2026 | | |
|
||||
| v3.5 | Monday, Jun. 15, 2026 | Monday, Aug. 3, 2026 | | |
|
||||
|
||||
Actual release dates might differ from the plan by a few days.
|
||||
|
||||
|
||||
@@ -55,4 +55,16 @@ spec:
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
```
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [route53.aws.crossplane.io/ResourceRecordSet](https://github.com/argoproj/argo-cd/commit/666499f6108124ef7bfa0c6cc616770c6dc4f42c)
|
||||
* [cloudfront.aws.crossplane.io/Distribution](https://github.com/argoproj/argo-cd/commit/21c384f42354ada2b94c18773104527eb27f86bc)
|
||||
* [beat.k8s.elastic.co/Beat](https://github.com/argoproj/argo-cd/commit/5100726fd61617a0001a27233cfe8ac4354bdbed)
|
||||
* [apps.kruise.io/AdvancedCronjob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/BroadcastJob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/CloneSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/DaemonSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/StatefulSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [rollouts.kruise.io/Rollout](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
@@ -57,3 +57,24 @@ The affected ApplicationSet fields are the following (jq selector syntax):
|
||||
* `.spec.generators[].clusterDecisionResource.labelSelector`
|
||||
* `.spec.generators[].matrix.generators[].selector`
|
||||
* `.spec.generators[].merge.generators[].selector`
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [core.humio.com/HumioAction](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioAlert](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioCluster](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioIngestToken](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioParser](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioRepository](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioView](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [k8s.mariadb.com/Backup](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/Database](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/Grant](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/MariaDB](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/SqlJob](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/User](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [kafka.strimzi.io/KafkaBridge](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
|
||||
* [kafka.strimzi.io/KafkaConnector](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
|
||||
* [keda.sh/ScaledObject](https://github.com/argoproj/argo-cd/commit/9bc9ff9c7a3573742a767c38679cbefb4f07c1c0)
|
||||
* [openfaas.com/Function](https://github.com/argoproj/argo-cd/commit/2a05ae02ab90ae06fefa97ed6b9310590d317783)
|
||||
* [camel.apache.org/Integration](https://github.com/argoproj/argo-cd/commit/1e2f5987d25307581cd56b8fe9d329633e0f704f)
|
||||
@@ -68,6 +68,41 @@ The default extension for log files generated by Argo CD when using the "Downloa
|
||||
- Consistency with standard log file conventions.
|
||||
|
||||
If you have any custom scripts or tools that depend on the `.txt` extension, please update them accordingly.
|
||||
|
||||
## Added proxy to kustomize
|
||||
|
||||
Proxy config set on repository credentials / repository templates is now passed down to the `kustomize build` command.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [controlplane.cluster.x-k8s.io/AWSManagedControlPlane](https://github.com/argoproj/argo-cd/commit/f1105705126153674c79f69b5d9c9647360d16f5)
|
||||
* [policy.open-cluster-management.io/CertificatePolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/ConfigurationPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/OperatorPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/Policy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [PodDisruptionBudget](https://github.com/argoproj/argo-cd/commit/e86258d8a5049260b841abc0ef1fd7f7a4b7cd45)
|
||||
* [cluster.x-k8s.io/MachinePool](https://github.com/argoproj/argo-cd/commit/59e00911304288b4f96889bf669b6ed2aecdf31b)
|
||||
* [lifecycle.keptn.sh/KeptnWorkloadVersion](https://github.com/argoproj/argo-cd/commit/ddc0b0fd3fa7e0b53170582846b20be23c301185)
|
||||
* [numaplane.numaproj.io/ISBServiceRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [numaplane.numaproj.io/NumaflowControllerRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [numaplane.numaproj.io/PipelineRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [rds.aws.crossplane.io/DBCluster](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
|
||||
* [rds.aws.crossplane.io/DBInstance](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
|
||||
* [iam.aws.crossplane.io/Policy](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [iam.aws.crossplane.io/RolePolicyAttachment](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [iam.aws.crossplane.io/Role](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [s3.aws.crossplane.io/Bucket](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [metrics.keptn.sh/KeptnMetric](https://github.com/argoproj/argo-cd/commit/326cc4a06b2cb5ac99797d3f04c2d4c48b8692e2)
|
||||
* [metrics.keptn.sh/Analysis](https://github.com/argoproj/argo-cd/commit/e26c105e527ed262cc5dc838a793841017ba316a)
|
||||
* [numaplane.numaproj.io/MonoVertexRollout](https://github.com/argoproj/argo-cd/commit/32ee00f1f494f69cc84d1881dda70ce514e1f737)
|
||||
* [helm.toolkit.fluxcd.io/HelmRelease](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImagePolicy](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImageRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImageUpdateAutomation](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [kustomize.toolkit.fluxcd.io/Kustomization](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [notification.toolkit.fluxcd.io/Receiver](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/Bucket](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/GitRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/HelmChart](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/HelmRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/OCIRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
|
||||
@@ -20,3 +20,27 @@ the [CLI and Application CR](https://argo-cd.readthedocs.io/en/latest/user-guide
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [platform.confluent.io/Connector](https://github.com/argoproj/argo-cd/commit/99efafb55a553a9ab962d56c20dab54ba65b7ae0)
|
||||
* [addons.cluster.x-k8s.io/ClusterResourceSet](https://github.com/argoproj/argo-cd/commit/fdf539dc6a027ef975fde23bf734f880570ccdc3)
|
||||
* [numaflow.numaproj.io/InterStepBufferService](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/MonoVertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/Pipeline](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/Vertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [acid.zalan.do/Postgresql](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [grafana.integreatly.org/Grafana](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [grafana.integreatly.org/GrafanaDatasource](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [k8s.keycloak.org/Keycloak](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [solr.apache.org/SolrCloud](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [gateway.solo.io/Gateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/MatchableHttpGateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/RouteOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/RouteTable](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/VirtualHostOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/VirtualService](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Proxy](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Settings](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Upstream](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/UpstreamGroup](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
@@ -501,3 +501,7 @@ More details for ignored resource updates in the [Diffing customization](../../u
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* No new added health checks
|
||||
@@ -63,3 +63,25 @@ to the [release notes](https://github.com/kubernetes-sigs/kustomize/releases/tag
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [core.spinkube.dev/SpinApp](https://github.com/argoproj/argo-cd/commit/7d6604404fd3b7d77124f9623a2d7a12cc24a0bb)
|
||||
* [opentelemetry.io/OpenTelemetryCollector](https://github.com/argoproj/argo-cd/commit/65464d8b77941c65499028bb14172fc40e62e38b)
|
||||
* [logstash.k8s.elastic.co/Logstash](https://github.com/argoproj/argo-cd/commit/8f1f5c7234e694a4830744f92e1b0f8d1e3cd43d)
|
||||
* [kyverno.io/Policy](https://github.com/argoproj/argo-cd/commit/e578b85410f748c6c7b4e10ff1a5fdbca09b3328)
|
||||
* [projectcontour.io/HTTPProxy](https://github.com/argoproj/argo-cd/commit/ce4b7a28cc77959fab5b6fedd14b1f9e9a4af4f7)
|
||||
* [grafana.integreatly.org/GrafanaDashboard](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
|
||||
* [grafana.integreatly.org/GrafanaFolder](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
|
||||
* [postgresql.cnpg.io/Cluster](https://github.com/argoproj/argo-cd/commit/f4edcf7717940e44a141dadb5ca8c5fc11951cb2)
|
||||
* [gateway.networking.k8s.io/GRPCRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [gateway.networking.k8s.io/Gateway](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [gateway.networking.k8s.io/HTTPRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [rabbitmq.com/Binding](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Exchange](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Permission](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Policy](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Queue](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Shovel](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/User](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Vhost](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
@@ -92,3 +92,30 @@ credentials of project-scoped repositories and clusters.
|
||||
The `resources` field of the `status` resource of an ApplicationSet is now limited to 5000 elements by default. This is
|
||||
to prevent status bloat and exceeding etcd limits. The limit can be configured by setting the `applicationsetcontroller.status.max.resources.count`
|
||||
field in the `argocd-cmd-params-cm` ConfigMap.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [datadoghq.com/DatadogMetric](https://github.com/argoproj/argo-cd/commit/5c9a5ef9a65f8e04e729fbae54a9310c0a42f6c2)
|
||||
* [CronJob](https://github.com/argoproj/argo-cd/commit/d3de4435ce86f3f85a4cc58978b2544af2ac4248)
|
||||
* [promoter.argoproj.io/ArgoCDCommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/ChangeTransferPolicy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/CommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/PromotionStrategy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/PullRequest](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [coralogix.com/Alert](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
|
||||
* [coralogix.com/RecordingRuleGroupSet](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
|
||||
* [projectcontour.io/ExtensionService](https://github.com/argoproj/argo-cd/commit/4e63bc756394d93c684b6b8e8b3856e0e6b3f199)
|
||||
* [clickhouse-keeper.altinity.com/ClickHouseKeeperInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
|
||||
* [clickhouse.altinity.com/ClickHouseInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
|
||||
* [apps.3scale.net/APIManager](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ActiveDoc](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ApplicationAuth](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Application](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Backend](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/CustomPolicyDefinition](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/DeveloperAccount](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/DeveloperUser](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/OpenAPI](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Product](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ProxyConfigPromote](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Tenant](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
@@ -17,11 +17,12 @@ Adding the argocd.argoproj.io/hook annotation to a resource will assign it to a
|
||||
|
||||
## How phases work?
|
||||
|
||||
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following.
|
||||
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following:
|
||||
|
||||
1. Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
|
||||
2. Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
|
||||
3. Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
|
||||
|
||||
Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
|
||||
Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
|
||||
Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
|
||||
Hooks marked with Skip will not be applied.
|
||||
|
||||
Here is a graphical overview of the sync process:
|
||||
@@ -54,8 +55,9 @@ Argo CD also offers an alternative method of changing the sync order of resource
|
||||
Hooks and resources are assigned to wave 0 by default. The wave can be negative, so you can create a wave that runs before all other resources.
|
||||
|
||||
When a sync operation takes place, Argo CD will:
|
||||
Order all resources according to their wave (lowest to highest)
|
||||
Apply the resources according to the resulting sequence
|
||||
|
||||
1. Order all resources according to their wave (lowest to highest)
|
||||
2. Apply the resources according to the resulting sequence
|
||||
|
||||
There is currently a delay between each sync wave in order to give other controllers a chance to react to the spec change that was just applied. This also prevents Argo CD from assessing resource health too quickly (against the stale object), causing hooks to fire prematurely. The current delay between each sync wave is 2 seconds and can be configured via the environment variable ARGOCD_SYNC_WAVE_DELAY.
|
||||
|
||||
@@ -67,16 +69,16 @@ While you can use sync waves on their own, for maximum flexibility you can combi
|
||||
|
||||
When Argo CD starts a sync, it orders the resources in the following precedence:
|
||||
|
||||
The phase
|
||||
The wave they are in (lower values first)
|
||||
By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
|
||||
By name
|
||||
1. The phase
|
||||
2. The wave they are in (lower values first)
|
||||
3. By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
|
||||
4. By name
|
||||
|
||||
Once the order is defined:
|
||||
|
||||
First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
|
||||
It applies resources in that wave.
|
||||
It repeats this process until all phases and waves are in-sync and healthy.
|
||||
1. First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
|
||||
2. It applies resources in that wave.
|
||||
3. It repeats this process until all phases and waves are in-sync and healthy.
|
||||
|
||||
Because an application can have resources that are unhealthy in the first wave, it may be that the app can never get to healthy.
|
||||
|
||||
|
||||
20
go.mod
20
go.mod
@@ -12,7 +12,7 @@ require (
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/TomOnTime/utfutil v1.0.0
|
||||
github.com/alicebob/miniredis/v2 v2.35.0
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251108235403-13d5172d3ee2
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff
|
||||
github.com/argoproj/pkg v0.13.6
|
||||
github.com/argoproj/pkg/v2 v2.0.1
|
||||
@@ -92,11 +92,11 @@ require (
|
||||
go.opentelemetry.io/otel v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
|
||||
go.opentelemetry.io/otel/sdk v1.38.0
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/net v0.44.0
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/net v0.47.0
|
||||
golang.org/x/oauth2 v0.31.0
|
||||
golang.org/x/sync v0.17.0
|
||||
golang.org/x/term v0.35.0
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/term v0.38.0
|
||||
golang.org/x/time v0.13.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5
|
||||
google.golang.org/grpc v1.75.1
|
||||
@@ -267,10 +267,10 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect
|
||||
gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
@@ -290,7 +290,7 @@ require (
|
||||
k8s.io/controller-manager v0.34.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kube-aggregator v0.34.0 // indirect
|
||||
k8s.io/kubernetes v1.34.0 // indirect
|
||||
k8s.io/kubernetes v1.34.2 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
|
||||
36
go.sum
36
go.sum
@@ -113,8 +113,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251108235403-13d5172d3ee2 h1:g9XclEd+1mYQtpLL3rMgYbMUdB3kEppw3+Jd1/H54VM=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251108235403-13d5172d3ee2/go.mod h1:2nqYZBhj8CfVZb3ATakZpi1KNb/yc7mpadIHslicTFI=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d h1:iUJYrbSvpV9n8vyl1sBt1GceM60HhHfnHxuzcm5apDg=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d/go.mod h1:PauXVUVcfiTgC+34lDdWzPS101g4NpsUtDAjFBnWf94=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff h1:pGGAeHIktPuYCRl1Z540XdxPFnedqyUhJK4VgpyJZfY=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
|
||||
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
|
||||
@@ -974,8 +974,8 @@ golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -1018,8 +1018,8 @@ golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1085,8 +1085,8 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1111,8 +1111,8 @@ golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1195,8 +1195,8 @@ golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b/go.mod h1:4ZwOYna0/zsOKwuR5X/m0QFOJpSZvAxFfkQT+Erd9D4=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -1223,8 +1223,8 @@ golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1249,8 +1249,8 @@ golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1434,8 +1434,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs=
|
||||
k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4=
|
||||
k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w=
|
||||
k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
|
||||
k8s.io/kubernetes v1.34.2 h1:WQdDvYJazkmkwSncgNwGvVtaCt4TYXIU3wSMRgvp3MI=
|
||||
k8s.io/kubernetes v1.34.2/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.3
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.3
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
12
manifests/core-install-with-hydrator.yaml
generated
12
manifests/core-install-with-hydrator.yaml
generated
@@ -24850,7 +24850,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -24985,7 +24985,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25113,7 +25113,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25410,7 +25410,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25462,7 +25462,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -25810,7 +25810,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
10
manifests/core-install.yaml
generated
10
manifests/core-install.yaml
generated
@@ -24818,7 +24818,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -24947,7 +24947,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25244,7 +25244,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25296,7 +25296,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -25644,7 +25644,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.3
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.3
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
18
manifests/ha/install-with-hydrator.yaml
generated
18
manifests/ha/install-with-hydrator.yaml
generated
@@ -26216,7 +26216,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -26351,7 +26351,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26502,7 +26502,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -26598,7 +26598,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -26722,7 +26722,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -27045,7 +27045,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -27097,7 +27097,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -27471,7 +27471,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -27855,7 +27855,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/install.yaml
generated
16
manifests/ha/install.yaml
generated
@@ -26186,7 +26186,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -26338,7 +26338,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -26434,7 +26434,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -26558,7 +26558,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -26881,7 +26881,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26933,7 +26933,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -27307,7 +27307,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -27691,7 +27691,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
@@ -1897,7 +1897,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2032,7 +2032,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2183,7 +2183,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2279,7 +2279,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2403,7 +2403,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2726,7 +2726,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2778,7 +2778,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3152,7 +3152,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3536,7 +3536,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/namespace-install.yaml
generated
16
manifests/ha/namespace-install.yaml
generated
@@ -1867,7 +1867,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2019,7 +2019,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2115,7 +2115,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2239,7 +2239,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2562,7 +2562,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2614,7 +2614,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2988,7 +2988,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3372,7 +3372,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/install-with-hydrator.yaml
generated
18
manifests/install-with-hydrator.yaml
generated
@@ -25294,7 +25294,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -25429,7 +25429,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25580,7 +25580,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -25676,7 +25676,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -25778,7 +25778,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -26075,7 +26075,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26127,7 +26127,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -26499,7 +26499,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -26883,7 +26883,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/install.yaml
generated
16
manifests/install.yaml
generated
@@ -25262,7 +25262,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -25414,7 +25414,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -25510,7 +25510,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -25612,7 +25612,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25909,7 +25909,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25961,7 +25961,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -26333,7 +26333,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -26717,7 +26717,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/namespace-install-with-hydrator.yaml
generated
18
manifests/namespace-install-with-hydrator.yaml
generated
@@ -975,7 +975,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1110,7 +1110,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1261,7 +1261,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1357,7 +1357,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1459,7 +1459,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1756,7 +1756,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1808,7 +1808,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2180,7 +2180,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2564,7 +2564,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/namespace-install.yaml
generated
16
manifests/namespace-install.yaml
generated
@@ -943,7 +943,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1095,7 +1095,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1191,7 +1191,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1293,7 +1293,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1590,7 +1590,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1642,7 +1642,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2014,7 +2014,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2398,7 +2398,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -2407,7 +2407,7 @@ func (s *Server) TerminateOperation(ctx context.Context, termOpReq *application.
|
||||
}
|
||||
log.Warnf("failed to set operation for app %q due to update conflict. retrying again...", *termOpReq.Name)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
|
||||
a, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting application by name: %w", err)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
k8sbatchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -4030,3 +4031,75 @@ func TestServerSideDiff(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "application")
|
||||
})
|
||||
}
|
||||
|
||||
// TestTerminateOperationWithConflicts tests that TerminateOperation properly handles
|
||||
// concurrent update conflicts by retrying with the fresh application object.
|
||||
//
|
||||
// This test reproduces a bug where the retry loop discards the fresh app object
|
||||
// fetched from Get(), causing all retries to fail with stale resource versions.
|
||||
func TestTerminateOperationWithConflicts(t *testing.T) {
|
||||
testApp := newTestApp()
|
||||
testApp.ResourceVersion = "1"
|
||||
testApp.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
}
|
||||
testApp.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: *testApp.Operation,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
|
||||
appServer := newTestAppServer(t, testApp)
|
||||
ctx := context.Background()
|
||||
|
||||
// Get the fake clientset from the deepCopy wrapper
|
||||
fakeAppCs := appServer.appclientset.(*deepCopyAppClientset).GetUnderlyingClientSet().(*apps.Clientset)
|
||||
|
||||
getCallCount := 0
|
||||
updateCallCount := 0
|
||||
|
||||
// Remove default reactors and add our custom ones
|
||||
fakeAppCs.ReactionChain = nil
|
||||
|
||||
// Mock Get to return original version first, then fresh version
|
||||
fakeAppCs.AddReactor("get", "applications", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
getCallCount++
|
||||
freshApp := testApp.DeepCopy()
|
||||
if getCallCount == 1 {
|
||||
// First Get (for initialization) returns original version
|
||||
freshApp.ResourceVersion = "1"
|
||||
} else {
|
||||
// Subsequent Gets (during retry) return fresh version
|
||||
freshApp.ResourceVersion = "2"
|
||||
}
|
||||
return true, freshApp, nil
|
||||
})
|
||||
|
||||
// Mock Update to return conflict on first call, success on second
|
||||
fakeAppCs.AddReactor("update", "applications", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
updateCallCount++
|
||||
updateAction := action.(kubetesting.UpdateAction)
|
||||
app := updateAction.GetObject().(*v1alpha1.Application)
|
||||
|
||||
// First call (with original resource version): return conflict
|
||||
if app.ResourceVersion == "1" {
|
||||
return true, nil, apierrors.NewConflict(
|
||||
schema.GroupResource{Group: "argoproj.io", Resource: "applications"},
|
||||
app.Name,
|
||||
stderrors.New("the object has been modified"),
|
||||
)
|
||||
}
|
||||
|
||||
// Second call (with refreshed resource version from Get): return success
|
||||
updatedApp := app.DeepCopy()
|
||||
return true, updatedApp, nil
|
||||
})
|
||||
|
||||
// Attempt to terminate the operation
|
||||
_, err := appServer.TerminateOperation(ctx, &application.OperationTerminateRequest{
|
||||
Name: ptr.To(testApp.Name),
|
||||
})
|
||||
|
||||
// Should succeed after retrying with the fresh app
|
||||
require.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, updateCallCount, 2, "Update should be called at least twice (once with conflict, once with success)")
|
||||
}
|
||||
|
||||
@@ -1220,9 +1220,12 @@ func (server *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWeb
|
||||
|
||||
terminalOpts := application.TerminalOptions{DisableAuth: server.DisableAuth, Enf: server.enf}
|
||||
|
||||
// SSO ClientApp
|
||||
server.ssoClientApp, _ = oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
|
||||
|
||||
terminal := application.NewHandler(server.appLister, server.Namespace, server.ApplicationNamespaces, server.db, appResourceTreeFn, server.settings.ExecShells, server.sessionMgr, &terminalOpts).
|
||||
WithFeatureFlagMiddleware(server.settingsMgr.GetSettings)
|
||||
th := util_session.WithAuthMiddleware(server.DisableAuth, server.sessionMgr, terminal)
|
||||
th := util_session.WithAuthMiddleware(server.DisableAuth, server.settings.IsSSOConfigured(), server.ssoClientApp, server.sessionMgr, terminal)
|
||||
mux.Handle("/terminal", th)
|
||||
|
||||
// Proxy extension is currently an alpha feature and is disabled
|
||||
@@ -1252,7 +1255,7 @@ func (server *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWeb
|
||||
swagger.ServeSwaggerUI(mux, assets.SwaggerJSON, "/swagger-ui", server.RootPath)
|
||||
healthz.ServeHealthCheck(mux, server.healthCheck)
|
||||
|
||||
// Dex reverse proxy and client app and OAuth2 login/callback
|
||||
// Dex reverse proxy and OAuth2 login/callback
|
||||
server.registerDexHandlers(mux)
|
||||
|
||||
// Webhook handler for git events (Note: cache timeouts are hardcoded because API server does not write to cache and not really using them)
|
||||
@@ -1304,7 +1307,7 @@ func enforceContentTypes(handler http.Handler, types []string) http.Handler {
|
||||
func registerExtensions(mux *http.ServeMux, a *ArgoCDServer, metricsReg HTTPMetricsRegistry) {
|
||||
a.log.Info("Registering extensions...")
|
||||
extHandler := http.HandlerFunc(a.extensionManager.CallExtension())
|
||||
authMiddleware := a.sessionMgr.AuthMiddlewareFunc(a.DisableAuth)
|
||||
authMiddleware := a.sessionMgr.AuthMiddlewareFunc(a.DisableAuth, a.settings.IsSSOConfigured(), a.ssoClientApp)
|
||||
// auth middleware ensures that requests to all extensions are authenticated first
|
||||
mux.Handle(extension.URLPrefix+"/", authMiddleware(extHandler))
|
||||
|
||||
@@ -1358,7 +1361,7 @@ func (server *ArgoCDServer) serveExtensions(extensionsSharedPath string, w http.
|
||||
}
|
||||
}
|
||||
|
||||
// registerDexHandlers will register dex HTTP handlers, creating the OAuth client app
|
||||
// registerDexHandlers will register dex HTTP handlers
|
||||
func (server *ArgoCDServer) registerDexHandlers(mux *http.ServeMux) {
|
||||
if !server.settings.IsSSOConfigured() {
|
||||
return
|
||||
@@ -1366,7 +1369,6 @@ func (server *ArgoCDServer) registerDexHandlers(mux *http.ServeMux) {
|
||||
// Run dex OpenID Connect Identity Provider behind a reverse proxy (served at /api/dex)
|
||||
var err error
|
||||
mux.HandleFunc(common.DexAPIEndpoint+"/", dexutil.NewDexHTTPReverseProxy(server.DexServerAddr, server.BaseHRef, server.DexTLSConfig))
|
||||
server.ssoClientApp, err = oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
|
||||
errorsutil.CheckError(err)
|
||||
mux.HandleFunc(common.LoginEndpoint, server.ssoClientApp.HandleLogin)
|
||||
mux.HandleFunc(common.CallbackEndpoint, server.ssoClientApp.HandleCallback)
|
||||
@@ -1577,34 +1579,15 @@ func (server *ArgoCDServer) getClaims(ctx context.Context) (jwt.Claims, string,
|
||||
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
|
||||
}
|
||||
|
||||
// Some SSO implementations (Okta) require a call to
|
||||
// the OIDC user info path to get attributes like groups
|
||||
// we assume that everywhere in argocd jwt.MapClaims is used as type for interface jwt.Claims
|
||||
// otherwise this would cause a panic
|
||||
var groupClaims jwt.MapClaims
|
||||
if groupClaims, ok = claims.(jwt.MapClaims); !ok {
|
||||
if tmpClaims, ok := claims.(*jwt.MapClaims); ok {
|
||||
groupClaims = *tmpClaims
|
||||
}
|
||||
}
|
||||
iss := jwtutil.StringField(groupClaims, "iss")
|
||||
if iss != util_session.SessionManagerClaimsIssuer && server.settings.UserInfoGroupsEnabled() && server.settings.UserInfoPath() != "" {
|
||||
userInfo, unauthorized, err := server.ssoClientApp.GetUserInfo(groupClaims, server.settings.IssuerURL(), server.settings.UserInfoPath())
|
||||
if unauthorized {
|
||||
log.Errorf("error while quering userinfo endpoint: %v", err)
|
||||
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session")
|
||||
}
|
||||
finalClaims := claims
|
||||
if server.settings.IsSSOConfigured() {
|
||||
finalClaims, err = server.ssoClientApp.SetGroupsFromUserInfo(claims, util_session.SessionManagerClaimsIssuer)
|
||||
if err != nil {
|
||||
log.Errorf("error fetching user info endpoint: %v", err)
|
||||
return claims, "", status.Errorf(codes.Internal, "invalid userinfo response")
|
||||
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
|
||||
}
|
||||
if groupClaims["sub"] != userInfo["sub"] {
|
||||
return claims, "", status.Error(codes.Unknown, "subject of claims from user info endpoint didn't match subject of idToken, see https://openid.net/specs/openid-connect-core-1_0.html#UserInfo")
|
||||
}
|
||||
groupClaims["groups"] = userInfo["groups"]
|
||||
}
|
||||
|
||||
return groupClaims, newToken, nil
|
||||
return finalClaims, newToken, nil
|
||||
}
|
||||
|
||||
// getToken extracts the token from gRPC metadata or cookie headers
|
||||
|
||||
@@ -3078,6 +3078,12 @@ func TestDeletionConfirmation(t *testing.T) {
|
||||
Then().Expect(OperationPhaseIs(OperationRunning)).
|
||||
When().ConfirmDeletion().
|
||||
Then().Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
// Wait for controller caches to fully settle before deletion
|
||||
// This ensures both the informer and cluster watcher have the latest state
|
||||
When().Refresh(RefreshTypeNormal).
|
||||
Then().
|
||||
When().Delete(true).
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
|
||||
@@ -41,10 +41,10 @@ func TestSyncWithNoDestinationServiceAccountsInProject(t *testing.T) {
|
||||
Given(t).
|
||||
Path("guestbook").
|
||||
When().
|
||||
WithImpersonationEnabled("", nil).
|
||||
CreateFromFile(func(app *v1alpha1.Application) {
|
||||
app.Spec.SyncPolicy = &v1alpha1.SyncPolicy{Automated: &v1alpha1.SyncPolicyAutomated{}}
|
||||
}).
|
||||
WithImpersonationEnabled("", nil).
|
||||
Then().
|
||||
// With the impersonation feature enabled, Application sync must fail
|
||||
// when there are no destination service accounts configured in AppProject
|
||||
|
||||
@@ -3,9 +3,7 @@ package db
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -141,22 +139,6 @@ func NewDB(namespace string, settingsMgr *settings.SettingsManager, kubeclientse
|
||||
}
|
||||
}
|
||||
|
||||
func (db *db) getSecret(name string, cache map[string]*corev1.Secret) (*corev1.Secret, error) {
|
||||
if _, ok := cache[name]; !ok {
|
||||
secret, err := db.settingsMgr.GetSecretByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cache[name] = secret
|
||||
}
|
||||
return cache[name], nil
|
||||
}
|
||||
|
||||
// StripCRLFCharacter strips the trailing CRLF characters
|
||||
func StripCRLFCharacter(input string) string {
|
||||
return strings.TrimSpace(input)
|
||||
}
|
||||
|
||||
// GetApplicationControllerReplicas gets the replicas of application controller
|
||||
func (db *db) GetApplicationControllerReplicas() int {
|
||||
// get the replicas from application controller deployment, if the application controller deployment does not exist, check for environment variable
|
||||
|
||||
@@ -163,7 +163,7 @@ func TestCreateWriteRepoCredentials(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "https://github.com/argoproj/", creds.URL)
|
||||
|
||||
secret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), RepoURLToSecretName(credSecretPrefix, creds.URL, ""), metav1.GetOptions{})
|
||||
secret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), RepoURLToSecretName(credWriteSecretPrefix, creds.URL, ""), metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, common.AnnotationValueManagedByArgoCD, secret.Annotations[common.AnnotationKeyManagedBy])
|
||||
|
||||
@@ -19,8 +19,12 @@ import (
|
||||
const (
|
||||
// Prefix to use for naming repository secrets
|
||||
repoSecretPrefix = "repo"
|
||||
// Prefix to use for naming repository write secrets
|
||||
repoWriteSecretPrefix = "repo-write"
|
||||
// Prefix to use for naming credential template secrets
|
||||
credSecretPrefix = "creds"
|
||||
// Prefix to use for naming write credential template secrets
|
||||
credWriteSecretPrefix = "creds-write"
|
||||
// The name of the key storing the username in the secret
|
||||
username = "username"
|
||||
// The name of the key storing the password in the secret
|
||||
|
||||
@@ -26,7 +26,11 @@ type secretsRepositoryBackend struct {
|
||||
}
|
||||
|
||||
func (s *secretsRepositoryBackend) CreateRepository(ctx context.Context, repository *appsv1.Repository) (*appsv1.Repository, error) {
|
||||
secName := RepoURLToSecretName(repoSecretPrefix, repository.Repo, repository.Project)
|
||||
secretPrefix := repoSecretPrefix
|
||||
if s.writeCreds {
|
||||
secretPrefix = repoWriteSecretPrefix
|
||||
}
|
||||
secName := RepoURLToSecretName(secretPrefix, repository.Repo, repository.Project)
|
||||
|
||||
repositorySecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -60,12 +64,8 @@ func (s *secretsRepositoryBackend) CreateRepository(ctx context.Context, reposit
|
||||
// the label is found and false otherwise. Will return false if no secret is found with the given
|
||||
// name.
|
||||
func (s *secretsRepositoryBackend) hasRepoTypeLabel(secretName string) (bool, error) {
|
||||
noCache := make(map[string]*corev1.Secret)
|
||||
sec, err := s.db.getSecret(secretName, noCache)
|
||||
sec, err := s.db.settingsMgr.GetSecretByName(secretName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
_, ok := sec.GetLabels()[common.LabelKeySecretType]
|
||||
@@ -76,7 +76,7 @@ func (s *secretsRepositoryBackend) hasRepoTypeLabel(secretName string) (bool, er
|
||||
}
|
||||
|
||||
func (s *secretsRepositoryBackend) GetRepoCredsBySecretName(_ context.Context, name string) (*appsv1.RepoCreds, error) {
|
||||
secret, err := s.db.getSecret(name, map[string]*corev1.Secret{})
|
||||
secret, err := s.db.settingsMgr.GetSecretByName(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret %s: %w", name, err)
|
||||
}
|
||||
@@ -179,7 +179,11 @@ func (s *secretsRepositoryBackend) RepositoryExists(_ context.Context, repoURL,
|
||||
}
|
||||
|
||||
func (s *secretsRepositoryBackend) CreateRepoCreds(ctx context.Context, repoCreds *appsv1.RepoCreds) (*appsv1.RepoCreds, error) {
|
||||
secName := RepoURLToSecretName(credSecretPrefix, repoCreds.URL, "")
|
||||
secretPrefix := credSecretPrefix
|
||||
if s.writeCreds {
|
||||
secretPrefix = credWriteSecretPrefix
|
||||
}
|
||||
secName := RepoURLToSecretName(secretPrefix, repoCreds.URL, "")
|
||||
|
||||
repoCredsSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -400,6 +404,8 @@ func secretToRepository(secret *corev1.Secret) (*appsv1.Repository, error) {
|
||||
return repository, nil
|
||||
}
|
||||
|
||||
// repositoryToSecret updates the given secret with the data from the repository object. It adds the appropriate
|
||||
// labels/annotations, but it does not add any name or namespace metadata.
|
||||
func (s *secretsRepositoryBackend) repositoryToSecret(repository *appsv1.Repository, secret *corev1.Secret) *corev1.Secret {
|
||||
secretCopy := secret.DeepCopy()
|
||||
|
||||
|
||||
@@ -11,13 +11,9 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
appsv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
@@ -39,8 +35,8 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
InsecureIgnoreHostKey: false,
|
||||
EnableLFS: true,
|
||||
}
|
||||
setupWithK8sObjects := func(objects ...runtime.Object) *fixture {
|
||||
clientset := getClientset(objects...)
|
||||
setup := func() *fixture {
|
||||
clientset := getClientset()
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
|
||||
repoBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
@@ -55,7 +51,7 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
t.Run("will create repository successfully", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
f := setupWithK8sObjects()
|
||||
f := setup()
|
||||
|
||||
// when
|
||||
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
@@ -85,21 +81,26 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
t.Run("will return proper error if secret does not have expected label", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
secret := &corev1.Secret{}
|
||||
s := secretsRepositoryBackend{}
|
||||
updatedSecret := s.repositoryToSecret(repo, secret)
|
||||
delete(updatedSecret.Labels, common.LabelKeySecretType)
|
||||
f := setupWithK8sObjects(updatedSecret)
|
||||
f.clientSet.ReactionChain = nil
|
||||
f.clientSet.AddReactor("create", "secrets", func(_ k8stesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
gr := schema.GroupResource{
|
||||
Group: "v1",
|
||||
Resource: "secrets",
|
||||
}
|
||||
return true, nil, apierrors.NewAlreadyExists(gr, "already exists")
|
||||
})
|
||||
f := setup()
|
||||
|
||||
// when
|
||||
_, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
|
||||
// given - remove the label from the secret
|
||||
secret, err := f.clientSet.CoreV1().Secrets(testNamespace).Get(
|
||||
t.Context(),
|
||||
RepoURLToSecretName(repoSecretPrefix, repo.Repo, ""),
|
||||
metav1.GetOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
delete(secret.Labels, common.LabelKeySecretType)
|
||||
_, err = f.clientSet.CoreV1().Secrets(testNamespace).Update(t.Context(), secret, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// when - try to create the same repository again
|
||||
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
@@ -107,41 +108,20 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
assert.Nil(t, output)
|
||||
status, ok := status.FromError(err)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, codes.InvalidArgument, status.Code())
|
||||
assert.Equal(t, codes.InvalidArgument, status.Code(), "got unexpected error: %v", err)
|
||||
})
|
||||
t.Run("will return proper error if secret already exists", func(t *testing.T) {
|
||||
t.Run("will return proper error if secret already exists and does have the proper label", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
secName := RepoURLToSecretName(repoSecretPrefix, repo.Repo, "")
|
||||
secret := &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secName,
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
s := secretsRepositoryBackend{}
|
||||
updatedSecret := s.repositoryToSecret(repo, secret)
|
||||
f := setupWithK8sObjects(updatedSecret)
|
||||
f.clientSet.ReactionChain = nil
|
||||
f.clientSet.WatchReactionChain = nil
|
||||
f.clientSet.AddReactor("create", "secrets", func(_ k8stesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
gr := schema.GroupResource{
|
||||
Group: "v1",
|
||||
Resource: "secrets",
|
||||
}
|
||||
return true, nil, apierrors.NewAlreadyExists(gr, "already exists")
|
||||
})
|
||||
watcher := watch.NewFakeWithChanSize(1, true)
|
||||
watcher.Add(updatedSecret)
|
||||
f.clientSet.AddWatchReactor("secrets", func(_ k8stesting.Action) (handled bool, ret watch.Interface, err error) {
|
||||
return true, watcher, nil
|
||||
})
|
||||
f := setup()
|
||||
|
||||
// when
|
||||
_, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
|
||||
// when - try to create the same repository again
|
||||
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
@@ -149,7 +129,7 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
assert.Nil(t, output)
|
||||
status, ok := status.FromError(err)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, codes.AlreadyExists, status.Code())
|
||||
assert.Equal(t, codes.AlreadyExists, status.Code(), "got unexpected error: %v", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1180,3 +1160,86 @@ func TestRaceConditionInRepositoryOperations(t *testing.T) {
|
||||
assert.Equal(t, repo.Username, finalRepo.Username)
|
||||
assert.Equal(t, repo.Password, finalRepo.Password)
|
||||
}
|
||||
|
||||
func TestCreateReadAndWriteSecretForSameURL(t *testing.T) {
|
||||
clientset := getClientset()
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
|
||||
|
||||
repo := &appsv1.Repository{
|
||||
Name: "TestRepo",
|
||||
Repo: "git@github.com:argoproj/argo-cd.git",
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
}
|
||||
|
||||
// Create read secret
|
||||
readBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: false}
|
||||
_, err := readBackend.CreateRepository(t.Context(), repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create write secret
|
||||
writeBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: true}
|
||||
_, err = writeBackend.CreateRepository(t.Context(), repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert both secrets exist
|
||||
readSecretName := RepoURLToSecretName(repoSecretPrefix, repo.Repo, repo.Project)
|
||||
writeSecretName := RepoURLToSecretName(repoWriteSecretPrefix, repo.Repo, repo.Project)
|
||||
|
||||
readSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), readSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepository, readSecret.Labels[common.LabelKeySecretType])
|
||||
|
||||
writeSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), writeSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepositoryWrite, writeSecret.Labels[common.LabelKeySecretType])
|
||||
}
|
||||
|
||||
func TestCreateReadAndWriteRepoCredsSecretForSameURL(t *testing.T) {
|
||||
clientset := getClientset()
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
|
||||
|
||||
creds := &appsv1.RepoCreds{
|
||||
URL: "git@github.com:argoproj/argo-cd.git",
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
}
|
||||
|
||||
// Create read creds secret
|
||||
readBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: false}
|
||||
_, err := readBackend.CreateRepoCreds(t.Context(), creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create write creds secret
|
||||
writeBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: true}
|
||||
_, err = writeBackend.CreateRepoCreds(t.Context(), creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert both secrets exist
|
||||
readSecretName := RepoURLToSecretName(credSecretPrefix, creds.URL, "")
|
||||
writeSecretName := RepoURLToSecretName(credWriteSecretPrefix, creds.URL, "")
|
||||
|
||||
readSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), readSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepoCreds, readSecret.Labels[common.LabelKeySecretType])
|
||||
|
||||
writeSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), writeSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepoCredsWrite, writeSecret.Labels[common.LabelKeySecretType])
|
||||
}
|
||||
|
||||
@@ -493,7 +493,7 @@ func (a *ClientApp) HandleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
sub := jwtutil.StringField(claims, "sub")
|
||||
err = a.clientCache.Set(&cache.Item{
|
||||
Key: formatAccessTokenCacheKey(sub),
|
||||
Key: FormatAccessTokenCacheKey(sub),
|
||||
Object: encToken,
|
||||
CacheActionOpts: cache.CacheActionOpts{
|
||||
Expiration: getTokenExpiration(claims),
|
||||
@@ -640,6 +640,39 @@ func createClaimsAuthenticationRequestParameter(requestedClaims map[string]*oidc
|
||||
return oauth2.SetAuthURLParam("claims", string(claimsRequestRAW)), nil
|
||||
}
|
||||
|
||||
// SetGroupsFromUserInfo takes a claims object and adds groups claim from userinfo endpoint if available
|
||||
// This is required by some SSO implementations as they don't provide the groups claim in the ID token
|
||||
// If querying the UserInfo endpoint fails, we return an error to indicate the session is invalid
|
||||
// we assume that everywhere in argocd jwt.MapClaims is used as type for interface jwt.Claims
|
||||
// otherwise this would cause a panic
|
||||
func (a *ClientApp) SetGroupsFromUserInfo(claims jwt.Claims, sessionManagerClaimsIssuer string) (jwt.MapClaims, error) {
|
||||
var groupClaims jwt.MapClaims
|
||||
var ok bool
|
||||
if groupClaims, ok = claims.(jwt.MapClaims); !ok {
|
||||
if tmpClaims, ok := claims.(*jwt.MapClaims); ok {
|
||||
if tmpClaims != nil {
|
||||
groupClaims = *tmpClaims
|
||||
}
|
||||
}
|
||||
}
|
||||
iss := jwtutil.StringField(groupClaims, "iss")
|
||||
if iss != sessionManagerClaimsIssuer && a.settings.UserInfoGroupsEnabled() && a.settings.UserInfoPath() != "" {
|
||||
userInfo, unauthorized, err := a.GetUserInfo(groupClaims, a.settings.IssuerURL(), a.settings.UserInfoPath())
|
||||
if unauthorized {
|
||||
return groupClaims, fmt.Errorf("error while quering userinfo endpoint: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return groupClaims, fmt.Errorf("error fetching user info endpoint: %w", err)
|
||||
}
|
||||
if groupClaims["sub"] != userInfo["sub"] {
|
||||
return groupClaims, errors.New("subject of claims from user info endpoint didn't match subject of idToken, see https://openid.net/specs/openid-connect-core-1_0.html#UserInfo")
|
||||
}
|
||||
groupClaims["groups"] = userInfo["groups"]
|
||||
}
|
||||
|
||||
return groupClaims, nil
|
||||
}
|
||||
|
||||
// GetUserInfo queries the IDP userinfo endpoint for claims
|
||||
func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoPath string) (jwt.MapClaims, bool, error) {
|
||||
sub := jwtutil.StringField(actualClaims, "sub")
|
||||
@@ -647,7 +680,7 @@ func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoP
|
||||
var encClaims []byte
|
||||
|
||||
// in case we got it in the cache, we just return the item
|
||||
clientCacheKey := formatUserInfoResponseCacheKey(sub)
|
||||
clientCacheKey := FormatUserInfoResponseCacheKey(sub)
|
||||
if err := a.clientCache.Get(clientCacheKey, &encClaims); err == nil {
|
||||
claimsRaw, err := crypto.Decrypt(encClaims, a.encryptionKey)
|
||||
if err != nil {
|
||||
@@ -664,7 +697,7 @@ func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoP
|
||||
|
||||
// check if the accessToken for the user is still present
|
||||
var encAccessToken []byte
|
||||
err := a.clientCache.Get(formatAccessTokenCacheKey(sub), &encAccessToken)
|
||||
err := a.clientCache.Get(FormatAccessTokenCacheKey(sub), &encAccessToken)
|
||||
// without an accessToken we can't query the user info endpoint
|
||||
// thus the user needs to reauthenticate for argocd to get a new accessToken
|
||||
if errors.Is(err, cache.ErrCacheMiss) {
|
||||
@@ -774,11 +807,11 @@ func getTokenExpiration(claims jwt.MapClaims) time.Duration {
|
||||
}
|
||||
|
||||
// formatUserInfoResponseCacheKey returns the key which is used to store userinfo of user in cache
|
||||
func formatUserInfoResponseCacheKey(sub string) string {
|
||||
func FormatUserInfoResponseCacheKey(sub string) string {
|
||||
return fmt.Sprintf("%s_%s", UserInfoResponseCachePrefix, sub)
|
||||
}
|
||||
|
||||
// formatAccessTokenCacheKey returns the key which is used to store the accessToken of a user in cache
|
||||
func formatAccessTokenCacheKey(sub string) string {
|
||||
func FormatAccessTokenCacheKey(sub string) string {
|
||||
return fmt.Sprintf("%s_%s", AccessTokenCachePrefix, sub)
|
||||
}
|
||||
|
||||
@@ -943,7 +943,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -958,7 +958,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -977,7 +977,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -992,7 +992,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -1011,7 +1011,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -1034,7 +1034,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -1053,7 +1053,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -1086,7 +1086,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
value: "{\"groups\":[\"githubOrg:engineers\"]}",
|
||||
expectEncrypted: true,
|
||||
expectError: false,
|
||||
@@ -1113,7 +1113,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -1172,3 +1172,94 @@ func TestGetUserInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetGroupsFromUserInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputClaims jwt.MapClaims // function input
|
||||
cacheClaims jwt.MapClaims // userinfo response
|
||||
expectedClaims jwt.MapClaims // function output
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "set correct groups from userinfo endpoint", // enriches the JWT claims with information from the userinfo endpoint, default case
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"githubOrg:example"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []any{"githubOrg:example"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())}, // the groups must be of type any since the response we get was parsed by GetUserInfo and we don't yet know the type of the groups claim
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "return error for wrong userinfo claims returned", // when there's an error in this feature, the claims should be untouched for the rest to still proceed
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: jwt.MapClaims{"sub": "wrongUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "override groups already defined in input claims", // this is expected behavior since input claims might have been truncated (HTTP header 4K limit)
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"superusers", "usergroup", "support-group"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []any{"superusers", "usergroup", "support-group"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "empty cache and non-rechable userinfo endpoint", // this will try to reach the userinfo endpoint defined in the test and fail
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: nil, // the test doesn't set the cache for an empty object
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// create the ClientApp
|
||||
userInfoCache := cache.NewInMemoryCache(24 * time.Hour)
|
||||
signature, err := util.MakeSignature(32)
|
||||
require.NoError(t, err, "failed creating signature for settings object")
|
||||
cdSettings := &settings.ArgoCDSettings{
|
||||
ServerSignature: signature,
|
||||
OIDCConfigRAW: `
|
||||
issuer: http://localhost:63231
|
||||
enableUserInfoGroups: true
|
||||
userInfoPath: /`,
|
||||
}
|
||||
a, err := NewClientApp(cdSettings, "", nil, "/argo-cd", userInfoCache)
|
||||
require.NoError(t, err, "failed creating clientapp")
|
||||
|
||||
// prepoluate cache to predict what the GetUserInfo function will return to the SetGroupsFromUserInfo function (without having to mock the userinfo response)
|
||||
encryptionKey, err := cdSettings.GetServerEncryptionKey()
|
||||
require.NoError(t, err, "failed obtaining encryption key from settings")
|
||||
|
||||
// set fake accessToken for function to not return early
|
||||
encAccessToken, err := crypto.Encrypt([]byte("123456"), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting dummy access token")
|
||||
err = a.clientCache.Set(&cache.Item{
|
||||
Key: FormatAccessTokenCacheKey("randomUser"),
|
||||
Object: encAccessToken,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
|
||||
// set cacheClaims to in-memory cache to let GetUserInfo return early with this information (GetUserInfo has a separate test, here we focus on SetUserInfoGroups)
|
||||
if tt.cacheClaims != nil {
|
||||
cacheClaims, err := json.Marshal(tt.cacheClaims)
|
||||
require.NoError(t, err)
|
||||
encCacheClaims, err := crypto.Encrypt([]byte(cacheClaims), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting dummy access token")
|
||||
err = a.clientCache.Set(&cache.Item{
|
||||
Key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
Object: encCacheClaims,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
}
|
||||
|
||||
receivedClaims, err := a.SetGroupsFromUserInfo(tt.inputClaims, "argocd")
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, tt.expectedClaims, receivedClaims) // check that the claims were successfully enriched with what we expect
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -480,9 +480,9 @@ func (mgr *SessionManager) VerifyUsernamePassword(username string, password stri
|
||||
|
||||
// AuthMiddlewareFunc returns a function that can be used as an
|
||||
// authentication middleware for HTTP requests.
|
||||
func (mgr *SessionManager) AuthMiddlewareFunc(disabled bool) func(http.Handler) http.Handler {
|
||||
func (mgr *SessionManager) AuthMiddlewareFunc(disabled bool, isSSOConfigured bool, ssoClientApp *oidcutil.ClientApp) func(http.Handler) http.Handler {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return WithAuthMiddleware(disabled, mgr, h)
|
||||
return WithAuthMiddleware(disabled, isSSOConfigured, ssoClientApp, mgr, h)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -495,26 +495,41 @@ type TokenVerifier interface {
|
||||
// WithAuthMiddleware is an HTTP middleware used to ensure incoming
|
||||
// requests are authenticated before invoking the target handler. If
|
||||
// disabled is true, it will just invoke the next handler in the chain.
|
||||
func WithAuthMiddleware(disabled bool, authn TokenVerifier, next http.Handler) http.Handler {
|
||||
func WithAuthMiddleware(disabled bool, isSSOConfigured bool, ssoClientApp *oidcutil.ClientApp, authn TokenVerifier, next http.Handler) http.Handler {
|
||||
if disabled {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !disabled {
|
||||
cookies := r.Cookies()
|
||||
tokenString, err := httputil.JoinCookies(common.AuthCookieName, cookies)
|
||||
if err != nil {
|
||||
http.Error(w, "Auth cookie not found", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
claims, _, err := authn.VerifyToken(tokenString)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
// Add claims to the context to inspect for RBAC
|
||||
//nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, "claims", claims)
|
||||
r = r.WithContext(ctx)
|
||||
cookies := r.Cookies()
|
||||
tokenString, err := httputil.JoinCookies(common.AuthCookieName, cookies)
|
||||
if err != nil {
|
||||
http.Error(w, "Auth cookie not found", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
claims, _, err := authn.VerifyToken(tokenString)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
finalClaims := claims
|
||||
if isSSOConfigured {
|
||||
finalClaims, err = ssoClientApp.SetGroupsFromUserInfo(claims, SessionManagerClaimsIssuer)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid session", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
// Add claims to the context to inspect for RBAC
|
||||
//nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, "claims", finalClaims)
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package session
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
@@ -29,7 +30,11 @@ import (
|
||||
apps "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned/fake"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/client/listers/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test"
|
||||
"github.com/argoproj/argo-cd/v3/util"
|
||||
"github.com/argoproj/argo-cd/v3/util/cache"
|
||||
"github.com/argoproj/argo-cd/v3/util/crypto"
|
||||
jwtutil "github.com/argoproj/argo-cd/v3/util/jwt"
|
||||
"github.com/argoproj/argo-cd/v3/util/oidc"
|
||||
"github.com/argoproj/argo-cd/v3/util/password"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
utiltest "github.com/argoproj/argo-cd/v3/util/test"
|
||||
@@ -236,20 +241,39 @@ func strPointer(str string) *string {
|
||||
|
||||
func TestSessionManager_WithAuthMiddleware(t *testing.T) {
|
||||
handlerFunc := func() func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, _ *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Helper()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "application/text")
|
||||
_, err := w.Write([]byte("Ok"))
|
||||
require.NoError(t, err, "error writing response: %s", err)
|
||||
|
||||
contextClaims := r.Context().Value("claims")
|
||||
if contextClaims != nil {
|
||||
var gotClaims jwt.MapClaims
|
||||
var ok bool
|
||||
if gotClaims, ok = contextClaims.(jwt.MapClaims); !ok {
|
||||
if tmpClaims, ok := contextClaims.(*jwt.MapClaims); ok && tmpClaims != nil {
|
||||
gotClaims = *tmpClaims
|
||||
}
|
||||
}
|
||||
jsonClaims, err := json.Marshal(gotClaims)
|
||||
require.NoError(t, err, "erorr marshalling claims set by AuthMiddleware")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, err = w.Write(jsonClaims)
|
||||
require.NoError(t, err, "error writing response: %s", err)
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "application/text")
|
||||
_, err := w.Write([]byte("Ok"))
|
||||
require.NoError(t, err, "error writing response: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
type testCase struct {
|
||||
name string
|
||||
authDisabled bool
|
||||
ssoEnabled bool
|
||||
cookieHeader bool
|
||||
verifiedClaims *jwt.RegisteredClaims
|
||||
verifiedClaims *jwt.MapClaims
|
||||
verifyTokenErr error
|
||||
userInfoCacheClaims *jwt.MapClaims
|
||||
expectedStatusCode int
|
||||
expectedResponseBody *string
|
||||
}
|
||||
@@ -258,47 +282,79 @@ func TestSessionManager_WithAuthMiddleware(t *testing.T) {
|
||||
{
|
||||
name: "will authenticate successfully",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: &jwt.RegisteredClaims{},
|
||||
verifiedClaims: &jwt.MapClaims{},
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("Ok"),
|
||||
expectedResponseBody: strPointer("{}"),
|
||||
},
|
||||
{
|
||||
name: "will be noop if auth is disabled",
|
||||
authDisabled: true,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: false,
|
||||
verifiedClaims: nil,
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("Ok"),
|
||||
},
|
||||
{
|
||||
name: "will return 400 if no cookie header",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: false,
|
||||
verifiedClaims: &jwt.RegisteredClaims{},
|
||||
verifiedClaims: &jwt.MapClaims{},
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusBadRequest,
|
||||
expectedResponseBody: nil,
|
||||
},
|
||||
{
|
||||
name: "will return 401 verify token fails",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: &jwt.RegisteredClaims{},
|
||||
verifiedClaims: &jwt.MapClaims{},
|
||||
verifyTokenErr: stderrors.New("token error"),
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusUnauthorized,
|
||||
expectedResponseBody: nil,
|
||||
},
|
||||
{
|
||||
name: "will return 200 if claims are nil",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: nil,
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("Ok"),
|
||||
expectedResponseBody: strPointer("null"),
|
||||
},
|
||||
{
|
||||
name: "will return 401 if sso is enabled but userinfo response not working",
|
||||
authDisabled: false,
|
||||
ssoEnabled: true,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: nil,
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil, // indicates that the userinfo response will not work since cache is empty and userinfo endpoint not rechable
|
||||
expectedStatusCode: http.StatusUnauthorized,
|
||||
expectedResponseBody: strPointer("Invalid session"),
|
||||
},
|
||||
{
|
||||
name: "will return 200 if sso is enabled and userinfo response from cache is valid",
|
||||
authDisabled: false,
|
||||
ssoEnabled: true,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: &jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: &jwt.MapClaims{"sub": "randomUser", "groups": []string{"superusers"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("\"groups\":[\"superusers\"]"),
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
@@ -311,7 +367,47 @@ func TestSessionManager_WithAuthMiddleware(t *testing.T) {
|
||||
claims: tc.verifiedClaims,
|
||||
err: tc.verifyTokenErr,
|
||||
}
|
||||
ts := httptest.NewServer(WithAuthMiddleware(tc.authDisabled, tm, mux))
|
||||
clientApp := &oidc.ClientApp{} // all testcases need at least the empty struct for the function to work
|
||||
if tc.ssoEnabled {
|
||||
userInfoCache := cache.NewInMemoryCache(24 * time.Hour)
|
||||
signature, err := util.MakeSignature(32)
|
||||
require.NoError(t, err, "failed creating signature for settings object")
|
||||
cdSettings := &settings.ArgoCDSettings{
|
||||
ServerSignature: signature,
|
||||
OIDCConfigRAW: `
|
||||
issuer: http://localhost:63231
|
||||
enableUserInfoGroups: true
|
||||
userInfoPath: /`,
|
||||
}
|
||||
clientApp, err = oidc.NewClientApp(cdSettings, "", nil, "/argo-cd", userInfoCache)
|
||||
require.NoError(t, err, "failed creating clientapp")
|
||||
|
||||
// prepopulate the cache with claims to return for a userinfo call
|
||||
encryptionKey, err := cdSettings.GetServerEncryptionKey()
|
||||
require.NoError(t, err, "failed obtaining encryption key from settings")
|
||||
// set fake accessToken for GetUserInfo to not return early (can be the same for all cases)
|
||||
encAccessToken, err := crypto.Encrypt([]byte("123456"), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting dummy access token")
|
||||
err = userInfoCache.Set(&cache.Item{
|
||||
Key: oidc.FormatAccessTokenCacheKey("randomUser"),
|
||||
Object: encAccessToken,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
|
||||
// set cacheClaims to in-memory cache to let GetUserInfo return early with this information
|
||||
if tc.userInfoCacheClaims != nil {
|
||||
cacheClaims, err := json.Marshal(tc.userInfoCacheClaims)
|
||||
require.NoError(t, err)
|
||||
encCacheClaims, err := crypto.Encrypt([]byte(cacheClaims), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting cache Claims")
|
||||
err = userInfoCache.Set(&cache.Item{
|
||||
Key: oidc.FormatUserInfoResponseCacheKey("randomUser"),
|
||||
Object: encCacheClaims,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
}
|
||||
}
|
||||
ts := httptest.NewServer(WithAuthMiddleware(tc.authDisabled, tc.ssoEnabled, clientApp, tm, mux))
|
||||
defer ts.Close()
|
||||
req, err := http.NewRequest(http.MethodGet, ts.URL, http.NoBody)
|
||||
require.NoErrorf(t, err, "error creating request: %s", err)
|
||||
|
||||
Reference in New Issue
Block a user