Compare commits

...

48 Commits

Author SHA1 Message Date
github-actions[bot]
7a7cf076c2 Bump version to 3.0.18 on release-3.0 branch (#24703)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: alexmt <426437+alexmt@users.noreply.github.com>
2025-09-22 15:18:02 -07:00
Alexander Matyushentsev
36ce380906 fix: limit number of resources in appset status (#24690) (#24695)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2025-09-22 14:57:49 -07:00
Alexandre Gaudreault
531d96edef ci(release): only set latest release in github when latest (#24525) (#24687)
Signed-off-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
2025-09-22 11:47:55 -04:00
argo-cd-cherry-pick-bot[bot]
dcfb4db550 fix(server): validate new project on update (#23970) (cherry-pick #23973 for 3.0) (#24663)
Signed-off-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-09-19 11:20:56 -04:00
github-actions[bot]
d1dbf20c99 Bump version to 3.0.17 on release-3.0 branch (#24636)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: blakepettersson <1227954+blakepettersson@users.noreply.github.com>
2025-09-18 04:19:24 -10:00
Alexander Matyushentsev
97a87308ab fix: use informer in webhook handler to reduce memory usage (#24622) (#24627)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2025-09-18 08:13:12 +02:00
argo-cd-cherry-pick-bot[bot]
a85fa0947b fix: correct post-delete finalizer removal when cluster not found (cherry-pick #24415 for 3.0) (#24589)
Signed-off-by: Pavel Aborilov <aborilov@gmail.com>
Co-authored-by: Pavel <aborilov@gmail.com>
2025-09-16 16:14:27 -07:00
Fox Piacenti
b729cff932 docs: Update URL for HA manifests to stable. (#24455)
Signed-off-by: Fox Danger Piacenti <fox@opencraft.com>
2025-09-09 12:37:11 +03:00
Nitish Kumar
2a0282d668 fix(3.0): change the appset namespace to server namespace when generating appset (#24479)
Signed-off-by: nitishfy <justnitish06@gmail.com>
2025-09-09 10:45:53 +03:00
OpenGuidou
0af18331eb fix(cherry-pick-3.0): Do not block project update when a cluster referenced in an App doesn't exist (#24449)
Signed-off-by: OpenGuidou <guillaume.doussin@gmail.com>
2025-09-08 11:38:07 -04:00
github-actions[bot]
2798b54c96 Bump version to 3.0.16 on release-3.0 branch (#24426)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: agaudreault <47184027+agaudreault@users.noreply.github.com>
2025-09-05 16:30:36 -04:00
Alexandre Gaudreault
998260452c chore(deps): bump gitops-engine (#24419)
Signed-off-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
2025-09-05 16:23:48 -04:00
Alexandre Gaudreault
50befe995c fix(test): race condition in kubectl metrics (#23382) (#23383) (#24422)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-09-05 15:28:15 -04:00
github-actions[bot]
1a55610f80 Bump version to 3.0.15 on release-3.0 branch (#24402)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2025-09-04 13:31:09 -04:00
github-actions[bot]
5a4ef23d96 Bump version to 3.0.14 on release-3.0 branch (#24396)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2025-09-04 11:48:39 -04:00
Michael Crenshaw
5ebdd714d0 fix(security): repository.GetDetailedProject exposes repo secrets (#24390)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2025-09-04 11:32:52 -04:00
Adrian Berger
ef5b8ca167 fix(cherry-pick-3.0): custom resource health for flux helm repository of type oci (#24340)
Signed-off-by: Adrian Berger <adrian.berger@bedag.ch>
2025-09-02 15:19:13 -04:00
Nitish Kumar
775edda033 chore(cherry-pick-3.0): replace bitnami images (#24101) (#24287)
Signed-off-by: nitishfy <justnitish06@gmail.com>
2025-08-27 14:03:15 +02:00
Anand Francis Joseph
f4d409cf9b fix(appset): prevent idle connection buildup by cloning http.DefaultTransport in Bitbucket SCM/PR generator (#24266)
Signed-off-by: portly-halicore-76 <170707699+portly-halicore-76@users.noreply.github.com>
Signed-off-by: anandf <anjoseph@redhat.com>
Co-authored-by: portly-halicore-76 <170707699+portly-halicore-76@users.noreply.github.com>
2025-08-26 09:53:34 -04:00
github-actions[bot]
083ef929a5 Bump version to 3.0.13 on release-3.0 branch (#24261)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2025-08-25 11:30:39 -04:00
Codey Jenkins
d6e0e5c852 chore: cherry pick #24235 to release-3.0 (#24237)
Signed-off-by: Codey Jenkins <FourFifthsCode@users.noreply.github.com>
Co-authored-by: Matthew Bennett <mtbennett@godaddy.com>
Co-authored-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
2025-08-22 16:55:13 -04:00
rumstead
46e7c008ce fix(server): Send Azure DevOps token via git extra headers (#23478) (#23631) (#24222)
Signed-off-by: Mike Bordon <mikebordon@gmail.com>
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
Co-authored-by: mikebordon <31316193+mikebordon@users.noreply.github.com>
2025-08-22 09:42:21 -04:00
Anand Francis Joseph
274ab28f8f fix(util): Fix default key exchange algorthims used for SSH connection to be FIPS compliant (#24086) (cherry-pick 3.0) (#24165)
Signed-off-by: anandf <anjoseph@redhat.com>
2025-08-15 11:30:55 +02:00
Alexandre Gaudreault
b1df89bfce fix(lua): allow actions to add items to array (#24136)
Signed-off-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
2025-08-13 17:20:55 -04:00
Blake Pettersson
d76cedea57 fix: kustomize edit add component check (#24100) (cherry-pick 3.0) (#24103)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
2025-08-11 12:02:56 +02:00
Ville Vesilehto
3fb825c36c chore: update Go to 1.24.6 (release-3.0) (#24092)
Signed-off-by: Ville Vesilehto <ville@vesilehto.fi>
2025-08-11 10:38:43 +02:00
gcp-cherry-pick-bot[bot]
7822fbd43a docs: 3.0 migration - added remediation for explicitly syncing apps that use ApplyOutOfSyncOnly=true (cherry-pick #23918) (#23957)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
Co-authored-by: Regina Voloshin <regina.voloshin@codefresh.io>
2025-08-03 23:22:23 -04:00
gcp-cherry-pick-bot[bot]
d11bf1ac88 fix: helm GetTags cache writing (cherry-pick #23865) (#23953)
Signed-off-by: Matthew Clarke <mclarke@spotify.com>
Co-authored-by: Matthew Clarke <matthewclarke47@gmail.com>
Co-authored-by: Linghao Su <linghao.su@daocloud.io>
2025-07-28 23:42:32 +02:00
github-actions[bot]
ed1e2397ef Bump version to 3.0.12 on release-3.0 branch (#23936)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2025-07-25 10:35:18 -07:00
pbhatnagar-oss
5e769f900f fix(metrics): Cherrypick grpc stats fix release 3.0 (#23889)
Signed-off-by: pbhatnagar-oss <pbhatifiwork@gmail.com>
2025-07-23 07:51:52 -07:00
rumstead
fc27c4d099 fix(appset): When Appset is deleted, the controller should reconcile applicationset #23723 (cherry-pick ##23823) (#23834)
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
Co-authored-by: sangeer <86688098+sangdammad@users.noreply.github.com>
2025-07-17 12:18:29 -04:00
github-actions[bot]
240a1833c0 Bump version to 3.0.11 on release-3.0 branch (#23745)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2025-07-10 10:26:31 -04:00
github-actions[bot]
b19defcd40 Bump version to 3.0.10 on release-3.0 branch (#23744)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2025-07-10 10:18:47 -04:00
gcp-cherry-pick-bot[bot]
bbf0582eb0 fix(health): CRD health check message (#23690) (cherry-pick #23691) (#23739)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-07-10 10:18:28 -04:00
gcp-cherry-pick-bot[bot]
ba3d90972c docs(images): add a note about missing images for 3.0 releases (cherry-pick #23741) (#23742)
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
Co-authored-by: rumstead <37445536+rumstead@users.noreply.github.com>
2025-07-10 10:17:27 -04:00
gcp-cherry-pick-bot[bot]
0fe62852c7 fix(darwin): remove the need for cgo when building a darwin binary on linux (cherry-pick #23507) (#23735)
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
Co-authored-by: rumstead <37445536+rumstead@users.noreply.github.com>
2025-07-10 09:58:01 -04:00
gcp-cherry-pick-bot[bot]
108dab6e16 docs(images): add a note about missing images for 3.0 releases (#23612) (cherry-pick #23712) (#23714)
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: rumstead <37445536+rumstead@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-07-09 13:56:56 -04:00
Michael Crenshaw
873289c409 fix(server): infer resource status health for apps-in-any-ns (#22944) (#23706)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-07-09 11:20:22 -04:00
gcp-cherry-pick-bot[bot]
3ab0e9c1f5 fix: improves the ui message when an operation is terminated due to controller sync timeout (cherry-pick #23657) (#23671)
Signed-off-by: Patroklos Papapetrou <ppapapetrou76@gmail.com>
Co-authored-by: Papapetrou Patroklos <1743100+ppapapetrou76@users.noreply.github.com>
2025-07-07 10:49:04 +03:00
Alexandre Gaudreault
f7d0ebda31 fix(sync): auto-sync loop when FailOnSharedResource (#23357) (#23639)
Signed-off-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
2025-07-02 15:00:22 -04:00
gcp-cherry-pick-bot[bot]
ea7e2b7905 fix(controller): get commit server url from env (cherry-pick #23536) (#23542)
Signed-off-by: Alexej Disterhoft <alexej.disterhoft@redcare-pharmacy.com>
Co-authored-by: Alexej Disterhoft <alexej@disterhoft.de>
2025-06-24 14:40:49 -04:00
gcp-cherry-pick-bot[bot]
06fd5060a0 fix: kustomize components + monorepos (cherry-pick #23486) (#23539)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: Blake Pettersson <blake.pettersson@gmail.com>
2025-06-24 18:07:24 +02:00
gcp-cherry-pick-bot[bot]
f264eaa40f fix(controller): impersonation with destination name (#23309) (cherry-pick #23504) (#23517)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-06-23 09:52:45 -04:00
gcp-cherry-pick-bot[bot]
be3e9403e9 fix(appset): Add token to enterprise client (#23240) (cherry-pick #23455) (#23476)
Signed-off-by: Collin Walker <cwalker@ancestry.com>
Co-authored-by: Collin Walker <10523817+lets-call-n-walk@users.noreply.github.com>
Co-authored-by: Collin Walker <cwalker@ancestry.com>
2025-06-18 19:50:46 +05:30
github-actions[bot]
a1faf0265f Bump version to 3.0.9 on release-3.0 branch (#23462)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: rumstead <37445536+rumstead@users.noreply.github.com>
2025-06-17 16:45:12 -04:00
gcp-cherry-pick-bot[bot]
8868f91bbd fix(goreleaser): add darwin with CGO enabled and remove static to gorelease (cherry-pick #23457) (#23459)
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
Co-authored-by: rumstead <37445536+rumstead@users.noreply.github.com>
2025-06-17 16:41:31 -04:00
github-actions[bot]
902826f8c7 Bump version to 3.0.8 on release-3.0 branch (#23451)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: rumstead <37445536+rumstead@users.noreply.github.com>
2025-06-17 11:53:16 -04:00
rumstead
95d8b48624 fix(goreleaser): add darwin with CGO enabled to gorelease (#23438)
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
2025-06-16 23:01:14 -04:00
101 changed files with 1928 additions and 611 deletions

View File

@@ -14,7 +14,7 @@ on:
env:
# Golang version to use across CI steps
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.24.4'
GOLANG_VERSION: '1.24.6'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}

View File

@@ -53,7 +53,7 @@ jobs:
with:
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.24.4
go-version: 1.24.6
platforms: ${{ needs.set-vars.outputs.platforms }}
push: false
@@ -70,7 +70,7 @@ jobs:
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.24.4
go-version: 1.24.6
platforms: ${{ needs.set-vars.outputs.platforms }}
push: true
secrets:

View File

@@ -11,7 +11,7 @@ permissions: {}
env:
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.24.4' # Note: go-version must also be set in job argocd-image.with.go-version
GOLANG_VERSION: '1.24.6' # Note: go-version must also be set in job argocd-image.with.go-version
jobs:
argocd-image:
@@ -25,13 +25,49 @@ jobs:
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.24.4
go-version: 1.24.6
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
push: true
secrets:
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
setup-variables:
name: Setup Release Variables
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
outputs:
is_pre_release: ${{ steps.var.outputs.is_pre_release }}
is_latest_release: ${{ steps.var.outputs.is_latest_release }}
steps:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup variables
id: var
run: |
set -xue
# Fetch all tag information
git fetch --prune --tags --force
LATEST_RELEASE_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | grep -v '-' | tail -n1)
PRE_RELEASE=false
# Check if latest tag is a pre-release
if echo ${{ github.ref_name }} | grep -E -- '-rc[0-9]+$';then
PRE_RELEASE=true
fi
IS_LATEST=false
# Ensure latest release tag matches github.ref_name
if [[ $LATEST_RELEASE_TAG == ${{ github.ref_name }} ]];then
IS_LATEST=true
fi
echo "is_pre_release=$PRE_RELEASE" >> $GITHUB_OUTPUT
echo "is_latest_release=$IS_LATEST" >> $GITHUB_OUTPUT
argocd-image-provenance:
needs: [argocd-image]
permissions:
@@ -50,15 +86,17 @@ jobs:
goreleaser:
needs:
- setup-variables
- argocd-image
- argocd-image-provenance
permissions:
contents: write # used for uploading assets
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
env:
GORELEASER_MAKE_LATEST: ${{ needs.setup-variables.outputs.is_latest_release }}
outputs:
hashes: ${{ steps.hash.outputs.hashes }}
steps:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
@@ -141,7 +179,7 @@ jobs:
permissions:
contents: write # Needed for release uploads
outputs:
hashes: ${{ steps.sbom-hash.outputs.hashes}}
hashes: ${{ steps.sbom-hash.outputs.hashes }}
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
steps:
@@ -219,6 +257,7 @@ jobs:
post-release:
needs:
- setup-variables
- argocd-image
- goreleaser
- generate-sbom
@@ -227,6 +266,8 @@ jobs:
pull-requests: write # Needed to create PR for VERSION update.
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
env:
TAG_STABLE: ${{ needs.setup-variables.outputs.is_latest_release }}
steps:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
@@ -240,27 +281,6 @@ jobs:
git config --global user.email 'ci@argoproj.com'
git config --global user.name 'CI'
- name: Check if tag is the latest version and not a pre-release
run: |
set -xue
# Fetch all tag information
git fetch --prune --tags --force
LATEST_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n1)
PRE_RELEASE=false
# Check if latest tag is a pre-release
if echo $LATEST_TAG | grep -E -- '-rc[0-9]+$';then
PRE_RELEASE=true
fi
# Ensure latest tag matches github.ref_name & not a pre-release
if [[ $LATEST_TAG == ${{ github.ref_name }} ]] && [[ $PRE_RELEASE != 'true' ]];then
echo "TAG_STABLE=true" >> $GITHUB_ENV
else
echo "TAG_STABLE=false" >> $GITHUB_ENV
fi
- name: Update stable tag to latest version
run: |
git tag -f stable ${{ github.ref_name }}

View File

@@ -24,8 +24,8 @@ builds:
- -extldflags="-static"
goos:
- linux
- darwin
- windows
- darwin
goarch:
- amd64
- arm64
@@ -46,16 +46,17 @@ builds:
archives:
- id: argocd-archive
builds:
- argocd-cli
- argocd-cli
name_template: |-
{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}
format: binary
formats: [binary]
checksum:
name_template: 'cli_checksums.txt'
algorithm: sha256
release:
make_latest: '{{ .Env.GORELEASER_MAKE_LATEST }}'
prerelease: auto
draft: false
header: |
@@ -80,23 +81,21 @@ release:
All Argo CD container images are signed by cosign. A Provenance is generated for container images and CLI binaries which meet the SLSA Level 3 specifications. See the [documentation](https://argo-cd.readthedocs.io/en/stable/operator-manual/signed-release-assets) on how to verify.
## Release Notes Blog Post
For a detailed breakdown of the key changes and improvements in this release, check out the [official blog post](https://blog.argoproj.io/argo-cd-v2-14-release-candidate-57a664791e2a)
For a detailed breakdown of the key changes and improvements in this release, check out the [official blog post](https://blog.argoproj.io/argo-cd-v2-14-release-candidate-57a664791e2a)
## Upgrading
If upgrading from a different minor version, be sure to read the [upgrading](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) documentation.
footer: |
**Full Changelog**: https://github.com/argoproj/argo-cd/compare/{{ .PreviousTag }}...{{ .Tag }}
<a href="https://argoproj.github.io/cd/"><img src="https://raw.githubusercontent.com/argoproj/argo-site/master/content/pages/cd/gitops-cd.png" width="25%" ></a>
snapshot: #### To be removed for PR
name_template: "2.6.0"
name_template: '2.6.0'
changelog:
use:
github
use: github
sort: asc
abbrev: 0
groups: # Regex use RE2 syntax as defined here: https://github.com/google/re2/wiki/Syntax.
@@ -119,7 +118,4 @@ changelog:
- '^test:'
- '^.*?Bump(\([[:word:]]+\))?.+$'
- '^.*?\[Bot\](\([[:word:]]+\))?.+$'
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json

View File

@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:80dd3c3b9c6cecb9f1667e9290b
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS builder
FROM docker.io/library/golang:1.24.6@sha256:2c89c41fb9efc3807029b59af69645867cfe978d2b877d475be0d72f6c6ce6f6 AS builder
WORKDIR /tmp
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS argocd-build
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.24.6@sha256:2c89c41fb9efc3807029b59af69645867cfe978d2b877d475be0d72f6c6ce6f6 AS argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd

View File

@@ -1 +1 @@
3.0.7
3.0.18

View File

@@ -91,6 +91,7 @@ type ApplicationSetReconciler struct {
GlobalPreservedAnnotations []string
GlobalPreservedLabels []string
Metrics *metrics.ApplicationsetMetrics
MaxResourcesStatusCount int
}
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
@@ -1303,6 +1304,11 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
sort.Slice(statuses, func(i, j int) bool {
return statuses[i].Name < statuses[j].Name
})
if r.MaxResourcesStatusCount > 0 && len(statuses) > r.MaxResourcesStatusCount {
logCtx.Warnf("Truncating ApplicationSet %s resource status from %d to max allowed %d entries", appset.Name, len(statuses), r.MaxResourcesStatusCount)
statuses = statuses[:r.MaxResourcesStatusCount]
}
appset.Status.Resources = statuses
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
@@ -1603,14 +1609,15 @@ func shouldRequeueForApplicationSet(appSetOld, appSetNew *argov1alpha1.Applicati
}
}
// only compare the applicationset spec, annotations, labels and finalizers, specifically avoiding
// only compare the applicationset spec, annotations, labels and finalizers, deletionTimestamp, specifically avoiding
// the status field. status is owned by the applicationset controller,
// and we do not need to requeue when it does bookkeeping
// NB: the ApplicationDestination comes from the ApplicationSpec being embedded
// in the ApplicationSetTemplate from the generators
if !cmp.Equal(appSetOld.Spec, appSetNew.Spec, cmpopts.EquateEmpty(), cmpopts.EquateComparable(argov1alpha1.ApplicationDestination{})) ||
!cmp.Equal(appSetOld.ObjectMeta.GetLabels(), appSetNew.ObjectMeta.GetLabels(), cmpopts.EquateEmpty()) ||
!cmp.Equal(appSetOld.ObjectMeta.GetFinalizers(), appSetNew.ObjectMeta.GetFinalizers(), cmpopts.EquateEmpty()) {
!cmp.Equal(appSetOld.GetLabels(), appSetNew.GetLabels(), cmpopts.EquateEmpty()) ||
!cmp.Equal(appSetOld.GetFinalizers(), appSetNew.GetFinalizers(), cmpopts.EquateEmpty()) ||
!cmp.Equal(appSetOld.DeletionTimestamp, appSetNew.DeletionTimestamp, cmpopts.EquateEmpty()) {
return true
}

View File

@@ -6116,10 +6116,11 @@ func TestUpdateResourceStatus(t *testing.T) {
require.NoError(t, err)
for _, cc := range []struct {
name string
appSet v1alpha1.ApplicationSet
apps []v1alpha1.Application
expectedResources []v1alpha1.ResourceStatus
name string
appSet v1alpha1.ApplicationSet
apps []v1alpha1.Application
expectedResources []v1alpha1.ResourceStatus
maxResourcesStatusCount int
}{
{
name: "handles an empty application list",
@@ -6290,6 +6291,73 @@ func TestUpdateResourceStatus(t *testing.T) {
apps: []v1alpha1.Application{},
expectedResources: nil,
},
{
name: "truncates resources status list to",
appSet: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Status: v1alpha1.ApplicationSetStatus{
Resources: []v1alpha1.ResourceStatus{
{
Name: "app1",
Status: v1alpha1.SyncStatusCodeOutOfSync,
Health: &v1alpha1.HealthStatus{
Status: health.HealthStatusProgressing,
Message: "this is progressing",
},
},
{
Name: "app2",
Status: v1alpha1.SyncStatusCodeOutOfSync,
Health: &v1alpha1.HealthStatus{
Status: health.HealthStatusProgressing,
Message: "this is progressing",
},
},
},
},
},
apps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
},
Status: v1alpha1.ApplicationStatus{
Sync: v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeSynced,
},
Health: v1alpha1.HealthStatus{
Status: health.HealthStatusHealthy,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "app2",
},
Status: v1alpha1.ApplicationStatus{
Sync: v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeSynced,
},
Health: v1alpha1.HealthStatus{
Status: health.HealthStatusHealthy,
},
},
},
},
expectedResources: []v1alpha1.ResourceStatus{
{
Name: "app1",
Status: v1alpha1.SyncStatusCodeSynced,
Health: &v1alpha1.HealthStatus{
Status: health.HealthStatusHealthy,
},
},
},
maxResourcesStatusCount: 1,
},
} {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
@@ -6300,13 +6368,14 @@ func TestUpdateResourceStatus(t *testing.T) {
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: argodb,
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: argodb,
KubeClientset: kubeclientset,
Metrics: metrics,
MaxResourcesStatusCount: cc.maxResourcesStatusCount,
}
err := r.updateResourcesStatus(t.Context(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)
@@ -6821,6 +6890,28 @@ func TestApplicationSetOwnsHandlerUpdate(t *testing.T) {
enableProgressiveSyncs: false,
want: false,
},
{
name: "deletionTimestamp present when progressive sync enabled",
appSetOld: buildAppSet(map[string]string{}),
appSetNew: &v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
},
enableProgressiveSyncs: true,
want: true,
},
{
name: "deletionTimestamp present when progressive sync disabled",
appSetOld: buildAppSet(map[string]string{}),
appSetNew: &v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
},
enableProgressiveSyncs: false,
want: true,
},
}
for _, tt := range tests {
@@ -6969,6 +7060,36 @@ func TestShouldRequeueForApplicationSet(t *testing.T) {
},
want: true,
},
{
name: "ApplicationSetWithDeletionTimestamp",
args: args{
appSetOld: &v1alpha1.ApplicationSet{
Status: v1alpha1.ApplicationSetStatus{
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Status: "Healthy",
},
},
},
},
appSetNew: &v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
Status: v1alpha1.ApplicationSetStatus{
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Status: "Waiting",
},
},
},
},
enableProgressiveSyncs: false,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -28,10 +28,11 @@ type GitGenerator struct {
namespace string
}
func NewGitGenerator(repos services.Repos, namespace string) Generator {
// NewGitGenerator creates a new instance of Git Generator
func NewGitGenerator(repos services.Repos, controllerNamespace string) Generator {
g := &GitGenerator{
repos: repos,
namespace: namespace,
namespace: controllerNamespace,
}
return g
@@ -70,11 +71,11 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
if !strings.Contains(appSet.Spec.Template.Spec.Project, "{{") {
project := appSet.Spec.Template.Spec.Project
appProject := &argoprojiov1alpha1.AppProject{}
namespace := g.namespace
if namespace == "" {
namespace = appSet.Namespace
controllerNamespace := g.namespace
if controllerNamespace == "" {
controllerNamespace = appSet.Namespace
}
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: namespace}, appProject); err != nil {
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: controllerNamespace}, appProject); err != nil {
return nil, fmt.Errorf("error getting project %s: %w", project, err)
}
// we need to verify the signature on the Git revision if GPG is enabled

View File

@@ -10,15 +10,15 @@ import (
"github.com/argoproj/argo-cd/v3/applicationset/services"
)
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, namespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
terminalGenerators := map[string]Generator{
"List": NewListGenerator(),
"Clusters": NewClusterGenerator(ctx, c, k8sClient, namespace),
"Git": NewGitGenerator(argoCDService, namespace),
"Clusters": NewClusterGenerator(ctx, c, k8sClient, controllerNamespace),
"Git": NewGitGenerator(argoCDService, controllerNamespace),
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace),
"PullRequest": NewPullRequestGenerator(c, scmConfig),
"Plugin": NewPluginGenerator(ctx, c, k8sClient, namespace),
"Plugin": NewPluginGenerator(ctx, c, k8sClient, controllerNamespace),
}
nestedGenerators := map[string]Generator{

View File

@@ -58,8 +58,7 @@ func NewApplicationsetMetrics(appsetLister applisters.ApplicationSetLister, apps
metrics.Registry.MustRegister(reconcileHistogram)
metrics.Registry.MustRegister(appsetCollector)
kubectlMetricsServer := kubectl.NewKubectlMetrics()
kubectlMetricsServer.RegisterWithClientGo()
kubectl.RegisterWithClientGo()
kubectl.RegisterWithPrometheus(metrics.Registry)
return ApplicationsetMetrics{

View File

@@ -3,12 +3,11 @@ package pull_request
import (
"context"
"fmt"
"net/http"
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/applicationset/utils"
"github.com/argoproj/argo-cd/v3/applicationset/services"
)
type BitbucketService struct {
@@ -49,15 +48,10 @@ func NewBitbucketServiceNoAuth(ctx context.Context, url, projectKey, repositoryS
}
func newBitbucketService(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey, repositorySlug string, scmRootCAPath string, insecure bool, caCerts []byte) (PullRequestService, error) {
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
TLSClientConfig: tlsConfig,
}}
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
return &BitbucketService{
client: bitbucketClient,
client: bbClient,
projectKey: projectKey,
repositorySlug: repositorySlug,
}, nil

View File

@@ -33,7 +33,11 @@ func NewGithubService(token, url, owner, repo string, labels []string) (PullRequ
}
} else {
var err error
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
if token == "" {
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
} else {
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
}
if err != nil {
return nil, err
}

View File

@@ -10,7 +10,7 @@ import (
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/applicationset/utils"
"github.com/argoproj/argo-cd/v3/applicationset/services"
)
type BitbucketServerProvider struct {
@@ -49,15 +49,10 @@ func NewBitbucketServerProviderNoAuth(ctx context.Context, url, projectKey strin
}
func newBitbucketServerProvider(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey string, allBranches bool, scmRootCAPath string, insecure bool, caCerts []byte) (*BitbucketServerProvider, error) {
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
TLSClientConfig: tlsConfig,
}}
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
return &BitbucketServerProvider{
client: bitbucketClient,
client: bbClient,
projectKey: projectKey,
allBranches: allBranches,
}, nil

View File

@@ -32,7 +32,11 @@ func NewGithubProvider(organization string, token string, url string, allBranche
}
} else {
var err error
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
if token == "" {
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
} else {
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
}
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,22 @@
package services
import (
"context"
"net/http"
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
"github.com/argoproj/argo-cd/v3/applicationset/utils"
)
// SetupBitbucketClient configures and creates a Bitbucket API client with TLS settings
func SetupBitbucketClient(ctx context.Context, config *bitbucketv1.Configuration, scmRootCAPath string, insecure bool, caCerts []byte) *bitbucketv1.APIClient {
config.BasePath = utils.NormalizeBitbucketBasePath(config.BasePath)
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSClientConfig = tlsConfig
config.HTTPClient = &http.Client{Transport: transport}
return bitbucketv1.NewAPIClient(ctx, config)
}

View File

@@ -0,0 +1,36 @@
package services
import (
"crypto/tls"
"net/http"
"testing"
"time"
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
"github.com/stretchr/testify/require"
)
func TestSetupBitbucketClient(t *testing.T) {
ctx := t.Context()
cfg := &bitbucketv1.Configuration{}
// Act
client := SetupBitbucketClient(ctx, cfg, "", false, nil)
// Assert
require.NotNil(t, client, "expected client to be created")
require.NotNil(t, cfg.HTTPClient, "expected HTTPClient to be set")
// The transport should be a clone of DefaultTransport
tr, ok := cfg.HTTPClient.Transport.(*http.Transport)
require.True(t, ok, "expected HTTPClient.Transport to be *http.Transport")
require.NotSame(t, http.DefaultTransport, tr, "transport should be a clone, not the global DefaultTransport")
// Ensure TLSClientConfig is set
require.IsType(t, &tls.Config{}, tr.TLSClientConfig)
// Defaults from http.DefaultTransport.Clone() should be preserved
require.Greater(t, tr.IdleConnTimeout, time.Duration(0), "IdleConnTimeout should be non-zero")
require.Positive(t, tr.MaxIdleConns, "MaxIdleConns should be non-zero")
require.Greater(t, tr.TLSHandshakeTimeout, time.Duration(0), "TLSHandshakeTimeout should be non-zero")
}

View File

@@ -74,6 +74,7 @@ func NewCommand() *cobra.Command {
enableScmProviders bool
webhookParallelism int
tokenRefStrictMode bool
maxResourcesStatusCount int
)
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
@@ -225,6 +226,7 @@ func NewCommand() *cobra.Command {
GlobalPreservedAnnotations: globalPreservedAnnotations,
GlobalPreservedLabels: globalPreservedLabels,
Metrics: &metrics,
MaxResourcesStatusCount: maxResourcesStatusCount,
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
os.Exit(1)
@@ -268,6 +270,7 @@ func NewCommand() *cobra.Command {
command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels")
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
command.Flags().StringSliceVar(&metricsAplicationsetLabels, "metrics-applicationset-labels", []string{}, "List of Application labels that will be added to the argocd_applicationset_labels metric")
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 0, 0, math.MaxInt), "Max number of resources stored in appset status.")
return &command
}

View File

@@ -1,3 +1,5 @@
//go:build !darwin || (cgo && darwin)
package commands
import (

View File

@@ -0,0 +1,25 @@
//go:build darwin && !cgo
// Package commands
// This file is used when the GOOS is darwin and CGO is not enabled.
// It provides a no-op implementation of newAzureCommand to allow goreleaser to build
// a darwin binary on a linux machine.
package commands
import (
"log"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v3/util/workloadidentity"
)
func newAzureCommand() *cobra.Command {
command := &cobra.Command{
Use: "azure",
Run: func(c *cobra.Command, _ []string) {
log.Fatalf(workloadidentity.CGOError)
},
}
return command
}

View File

@@ -1201,7 +1201,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
if err != nil {
logCtx.Warnf("Unable to get destination cluster: %v", err)
app.UnSetCascadedDeletion()
app.UnSetPostDeleteFinalizer()
app.UnSetPostDeleteFinalizerAll()
if err := ctrl.updateFinalizers(app); err != nil {
return err
}
@@ -1481,7 +1481,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
} else {
state.Phase = synccommon.OperationRunning
state.RetryCount++
state.Message = fmt.Sprintf("%s. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
state.Message = fmt.Sprintf("%s due to application controller sync timeout. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
}
} else if state.RetryCount > 0 {
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)

View File

@@ -2051,7 +2051,7 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
assert.Equal(t, string(synccommon.OperationRunning), phase)
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
assert.Contains(t, message, "Retrying attempt #1")
assert.Contains(t, message, "due to application controller sync timeout. Retrying attempt #1")
retryCount, _, _ := unstructured.NestedFloat64(receivedPatch, "status", "operationState", "retryCount")
assert.InEpsilon(t, float64(1), retryCount, 0.0001)
}

View File

@@ -199,8 +199,7 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
registry.MustRegister(resourceEventsProcessingHistogram)
registry.MustRegister(resourceEventsNumberGauge)
kubectlMetricsServer := kubectl.NewKubectlMetrics()
kubectlMetricsServer.RegisterWithClientGo()
kubectl.RegisterWithClientGo()
kubectl.RegisterWithPrometheus(registry)
metricsServer := &MetricsServer{

View File

@@ -109,15 +109,6 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
}
syncOp = *state.Operation.Sync
// validates if it should fail the sync if it finds shared resources
hasSharedResource, sharedResourceMessage := hasSharedResourceCondition(app)
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") &&
hasSharedResource {
state.Phase = common.OperationFailed
state.Message = "Shared resource found: " + sharedResourceMessage
return
}
isMultiSourceRevision := app.Spec.HasMultipleSources()
rollback := len(syncOp.Sources) > 0 || syncOp.Source != nil
if rollback {
@@ -208,6 +199,15 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
syncRes.Revision = compareResult.syncStatus.Revision
syncRes.Revisions = compareResult.syncStatus.Revisions
// validates if it should fail the sync if it finds shared resources
hasSharedResource, sharedResourceMessage := hasSharedResourceCondition(app)
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") &&
hasSharedResource {
state.Phase = common.OperationFailed
state.Message = "Shared resource found: %s" + sharedResourceMessage
return
}
// If there are any comparison or spec errors error conditions do not perform the operation
if errConditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{
v1alpha1.ApplicationConditionComparisonError: true,
@@ -321,7 +321,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
return
}
if impersonationEnabled {
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(proj, app)
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(proj, app, destCluster)
if err != nil {
state.Phase = common.OperationError
state.Message = fmt.Sprintf("failed to find a matching service account to impersonate: %v", err)
@@ -598,7 +598,7 @@ func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject
// deriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application, destCluster *v1alpha1.Cluster) (string, error) {
// spec.Destination.Namespace is optional. If not specified, use the Application's
// namespace
serviceAccountNamespace := application.Spec.Destination.Namespace
@@ -608,7 +608,7 @@ func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
// if so, return the service account specified for that destination.
for _, item := range project.Spec.DestinationServiceAccounts {
dstServerMatched, err := glob.MatchWithError(item.Server, application.Spec.Destination.Server)
dstServerMatched, err := glob.MatchWithError(item.Server, destCluster.Server)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
}

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/argoproj/gitops-engine/pkg/sync"
"github.com/argoproj/gitops-engine/pkg/sync/common"
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -14,6 +14,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/controller/testdata"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
@@ -189,17 +190,23 @@ func TestSyncComparisonError(t *testing.T) {
}
func TestAppStateManager_SyncAppState(t *testing.T) {
t.Parallel()
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
project *v1alpha1.AppProject
controller *ApplicationController
}
setup := func() *fixture {
setup := func(liveObjects map[kube.ResourceKey]*unstructured.Unstructured) *fixture {
app := newFakeApp()
app.Status.OperationState = nil
app.Status.History = nil
if liveObjects == nil {
liveObjects = make(map[kube.ResourceKey]*unstructured.Unstructured)
}
project := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
@@ -207,6 +214,12 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
},
Spec: v1alpha1.AppProjectSpec{
SignatureKeys: []v1alpha1.SignatureKey{{KeyID: "test"}},
Destinations: []v1alpha1.ApplicationDestination{
{
Namespace: "*",
Server: "*",
},
},
},
}
data := fakeData{
@@ -217,13 +230,13 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
managedLiveObjs: liveObjects,
}
ctrl := newFakeController(&data, nil)
return &fixture{
project: project,
application: app,
project: project,
controller: ctrl,
}
}
@@ -231,13 +244,23 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
t.Run("will fail the sync if finds shared resources", func(t *testing.T) {
// given
t.Parallel()
f := setup()
syncErrorMsg := "deployment already applied by another application"
condition := v1alpha1.ApplicationCondition{
Type: v1alpha1.ApplicationConditionSharedResourceWarning,
Message: syncErrorMsg,
}
f.application.Status.Conditions = append(f.application.Status.Conditions, condition)
sharedObject := kube.MustToUnstructured(&corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: "configmap1",
Namespace: "default",
Annotations: map[string]string{
common.AnnotationKeyAppInstance: "guestbook:/ConfigMap:default/configmap1",
},
},
})
liveObjects := make(map[kube.ResourceKey]*unstructured.Unstructured)
liveObjects[kube.GetResourceKey(sharedObject)] = sharedObject
f := setup(liveObjects)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
@@ -251,8 +274,8 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
f.controller.appStateManager.SyncAppState(f.application, opState)
// then
assert.Equal(t, common.OperationFailed, opState.Phase)
assert.Contains(t, opState.Message, syncErrorMsg)
assert.Equal(t, synccommon.OperationFailed, opState.Phase)
assert.Contains(t, opState.Message, "ConfigMap/configmap1 is part of applications fake-argocd-ns/my-app and guestbook")
})
}
@@ -315,13 +338,13 @@ func TestSyncWindowDeniesSync(t *testing.T) {
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
Phase: synccommon.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then
assert.Equal(t, common.OperationRunning, opState.Phase)
assert.Equal(t, synccommon.OperationRunning, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
}
@@ -649,6 +672,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
cluster *v1alpha1.Cluster
}
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
@@ -674,9 +698,14 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
},
},
}
cluster := &v1alpha1.Cluster{
Server: "https://kubernetes.svc.local",
Name: "test-cluster",
}
return &fixture{
project: project,
application: app,
cluster: cluster,
}
}
@@ -692,7 +721,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should be an error saying no valid match was found
@@ -716,7 +745,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
@@ -755,7 +784,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
@@ -794,7 +823,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be no error and it should use the first matching service account for impersonation
require.NoError(t, err)
@@ -828,7 +857,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and should use the first matching glob pattern service account for impersonation
require.NoError(t, err)
@@ -863,7 +892,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be an error saying no match was found
require.EqualError(t, err, expectedErrMsg)
@@ -891,7 +920,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the service account configured for with empty namespace should be used.
require.NoError(t, err)
@@ -925,7 +954,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the catch all service account should be returned
require.NoError(t, err)
@@ -949,7 +978,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination namespace")
@@ -983,7 +1012,35 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.
require.NoError(t, err)
})
t.Run("app destination name instead of server URL", func(t *testing.T) {
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// Use destination name instead of server URL
f.application.Spec.Destination.Server = ""
f.application.Spec.Destination.Name = f.cluster.Name
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.
@@ -995,6 +1052,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
cluster *v1alpha1.Cluster
}
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
@@ -1020,9 +1078,14 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
},
},
}
cluster := &v1alpha1.Cluster{
Server: "https://kubernetes.svc.local",
Name: "test-cluster",
}
return &fixture{
project: project,
application: app,
cluster: cluster,
}
}
@@ -1058,7 +1121,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the right service account must be returned.
require.NoError(t, err)
@@ -1097,7 +1160,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and first matching service account should be used
require.NoError(t, err)
@@ -1131,7 +1194,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account of the glob pattern, being the first match should be returned.
@@ -1166,7 +1229,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
// then, there an error with appropriate message must be returned
require.EqualError(t, err, expectedErr)
@@ -1200,7 +1263,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the service account of the glob pattern match must be returned.
require.NoError(t, err)
@@ -1224,7 +1287,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination server")
@@ -1258,12 +1321,40 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
// then, there should not be any error and the service account with the given namespace prefix must be returned.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("app destination name instead of server URL", func(t *testing.T) {
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// Use destination name instead of server URL
f.application.Spec.Destination.Server = ""
f.application.Spec.Destination.Name = f.cluster.Name
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.
require.NoError(t, err)
})
}
func TestSyncWithImpersonate(t *testing.T) {
@@ -1336,13 +1427,13 @@ func TestSyncWithImpersonate(t *testing.T) {
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
Phase: synccommon.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then, app sync should fail with expected error message in operation state
assert.Equal(t, common.OperationError, opState.Phase)
assert.Equal(t, synccommon.OperationError, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
@@ -1357,13 +1448,13 @@ func TestSyncWithImpersonate(t *testing.T) {
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
Phase: synccommon.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then app sync should fail with expected error message in operation state
assert.Equal(t, common.OperationError, opState.Phase)
assert.Equal(t, synccommon.OperationError, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
@@ -1378,13 +1469,13 @@ func TestSyncWithImpersonate(t *testing.T) {
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
Phase: synccommon.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then app sync should not fail
assert.Equal(t, common.OperationSucceeded, opState.Phase)
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
@@ -1399,13 +1490,38 @@ func TestSyncWithImpersonate(t *testing.T) {
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
Phase: synccommon.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then application sync should pass using the control plane service account
assert.Equal(t, common.OperationSucceeded, opState.Phase)
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
t.Run("app destination name instead of server URL", func(t *testing.T) {
// given app sync impersonation feature is enabled with an application referring a project matching service account
f := setup(true, test.FakeDestNamespace, "test-sa")
opMessage := "successfully synced (no more tasks)"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: synccommon.OperationRunning,
}
f.application.Spec.Destination.Server = ""
f.application.Spec.Destination.Name = "minikube"
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then app sync should not fail
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
}

View File

@@ -284,6 +284,8 @@ data:
applicationsetcontroller.global.preserved.annotations: "acme.com/annotation1,acme.com/annotation2"
# Comma delimited list of labels to preserve in generated applications
applicationsetcontroller.global.preserved.labels: "acme.com/label1,acme.com/label2"
# The maximum number of resources stored in the status of an ApplicationSet. This is a safeguard to prevent the status from growing too large.
applicationsetcontroller.status.max.resources.count: "5000"
## Argo CD Notifications Controller Properties
# Set the logging level. One of: debug|info|warn|error (default "info")

View File

@@ -2,7 +2,7 @@
Argo CD is largely stateless. All data is persisted as Kubernetes objects, which in turn is stored in Kubernetes' etcd. Redis is only used as a throw-away cache and can be lost. When lost, it will be rebuilt without loss of service.
A set of [HA manifests](https://github.com/argoproj/argo-cd/tree/master/manifests/ha) are provided for users who wish to run Argo CD in a highly available manner. This runs more containers, and runs Redis in HA mode.
A set of [HA manifests](https://github.com/argoproj/argo-cd/tree/stable/manifests/ha) are provided for users who wish to run Argo CD in a highly available manner. This runs more containers, and runs Redis in HA mode.
> **NOTE:** The HA installation will require at least three different nodes due to pod anti-affinity roles in the
> specs. Additionally, IPv6 only clusters are not supported.

View File

@@ -193,6 +193,7 @@ argocd_cluster_labels{label_environment="production",label_team_name="team3",nam
## API Server Metrics
Metrics about API Server API request and response activity (request totals, response codes, etc...).
Scraped at the `argocd-server-metrics:8083/metrics` endpoint.
For GRPC metrics to show up environment variable ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM must be set to true.
| Metric | Type | Description |
|---------------------------------------------------|:---------:|---------------------------------------------------------------------------------------------|
@@ -231,17 +232,20 @@ Scraped at the `argocd-server-metrics:8083/metrics` endpoint.
| version | v2.13.3 | Argo CD version. |
## Repo Server Metrics
Metrics about the Repo Server.
Metrics about the Repo Server. The gRPC metrics are not exposed by default. Metrics can be enabled using
`ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM=true` environment variable.
Scraped at the `argocd-repo-server:8084/metrics` endpoint.
| Metric | Type | Description |
|--------|:----:|-------------|
| `argocd_git_request_duration_seconds` | histogram | Git requests duration seconds. |
| `argocd_git_request_total` | counter | Number of git requests performed by repo server |
| `argocd_git_fetch_fail_total` | counter | Number of git fetch requests failures by repo server |
| `argocd_redis_request_duration_seconds` | histogram | Redis requests duration seconds. |
| `argocd_redis_request_total` | counter | Number of Kubernetes requests executed during application reconciliation. |
| `argocd_repo_pending_request_total` | gauge | Number of pending requests requiring repository lock |
| Metric | Type | Description |
| --------------------------------------- | :-------: | ------------------------------------------------------------------------- |
| `argocd_git_request_duration_seconds` | histogram | Git requests duration seconds. |
| `argocd_git_request_total` | counter | Number of git requests performed by repo server |
| `argocd_git_fetch_fail_total` | counter | Number of git fetch requests failures by repo server |
| `argocd_redis_request_duration_seconds` | histogram | Redis requests duration seconds. |
| `argocd_redis_request_total` | counter | Number of Kubernetes requests executed during application reconciliation. |
| `argocd_repo_pending_request_total` | gauge | Number of pending requests requiring repository lock |
## Commit Server Metrics

View File

@@ -11,4 +11,12 @@ Eg, `https://github.com/argoproj/argo-cd/manifests/ha/cluster-install?ref=v2.14.
## Upgraded Helm Version
Helm was upgraded to 3.16.2 and the skipSchemaValidation Flag was added to
the [CLI and Application CR](https://argo-cd.readthedocs.io/en/latest/user-guide/helm/#helm-skip-schema-validation).
the [CLI and Application CR](https://argo-cd.readthedocs.io/en/latest/user-guide/helm/#helm-skip-schema-validation).
## Breaking Changes
## Sanitized project API response
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
the project API response was sanitized to remove sensitive information. This includes
credentials of project-scoped repositories and clusters.

View File

@@ -7,6 +7,13 @@ applicable) restore Argo CD 2.x default behavior.
Once 3.0 is released, no more 2.x minor versions will be released. We will continue to cut patch releases for the two
most recent minor versions (so 2.14 until 3.2 is released and 2.13 until 3.1 is released).
## Images missing release notes on GitHub
!!! important
Images 3.0.7 - 3.0.10 are missing release notes on GitHub. There was an issue with GoReleaser and building the darwin
CLI that prevented the release notes from being published. More information can be found
on [PR #23507](https://github.com/argoproj/argo-cd/pull/23507)
## Breaking Changes
### Fine-Grained RBAC for application `update` and `delete` sub-resources
@@ -239,16 +246,36 @@ external code copying tracking labels from one resource to another.
#### Detection
To detect if you are impacted, check the `argocd-cm` ConfigMap for the `application.resourceTrackingMethod` field. If it
To detect if you are impacted, check the `argocd-cm` ConfigMap for the `application.resourceTrackingMethod` field. If it is
unset or is set to `label`, you are using label-based tracking. If it is set to `annotation`, you are already using
annotation-based tracking and are not impacted by this change.
```sh
kubectl get cm argocd-cm -n argocd -o jsonpath='{.data.application\.resourceTrackingMethod}'
```
If you are using label-based tracking, it is also important to detect whether you have Applications that use `ApplyOutOfSyncOnly=true` syncOptions, as such Applications are likely to have orphan resources after switching to `annotation` tracking method and need to be synced explicitly right after the upgrade.
To detect whether you have such Applications, run:
```sh
kubectl get applications.argoproj.io -A -o json | jq -r '.items[] | select(.spec.syncPolicy.syncOptions[]? == "ApplyOutOfSyncOnly=true") | .metadata.name'
```
#### Remediation
##### Users with ApplyOutOfSyncOnly=true syncOptions and label-based tracking
For users with label-based tracking and Applications that have `ApplyOutOfSyncOnly=true` syncOptions, an explicit sync has to be run for those Applications after you upgrade.
Here is an example command, that syncs such an Application, it can be run after you [obtain a token](../../developer-guide/api-docs.md#authorization) to Argo CD API:
```sh
curl -X POST -H "Authorization: Bearer $ARGOCD_TOKEN" -H "Content-Type: application/json" -d '{
"name": "$YOUR_APP_NAME"
}' "http://$YOUR_ARGOCD_URL/api/v1/applications/$YOUR_APP_NAME/sync"
```
It is also possible to sync such an Applicaton using the UI, with `ApplyOutOfSyncOnly` option unchecked. However, currently, performing a sync without `ApplyOutOfSyncOnly` option is not possible using the CLI.
##### Other users
For most users, it is safe to upgrade to Argo CD 3.0 and use annotation-based tracking. Labels will be replaced with
annotations on the next sync. Applications will not be marked as out-of-sync if labels are not present on the
resources.
@@ -261,6 +288,7 @@ resources.
delete it. To avoid this edge case, it is recommended to perform a sync operation on your Applications, even if
they are not out of sync, so that orphan resource detection will work as expected on the next sync.
##### Users who rely on label-based for resources that are not managed by Argo CD
Some users rely on label-based tracking to track resources that are not managed by Argo CD. They may set annotations
to have Argo CD ignore the resource as extraneous or to disable pruning. If you are using label-based tracking to track
resources that are not managed by Argo CD, you will need to construct tracking annotations instead of tracking labels
@@ -463,4 +491,9 @@ resource.customizations.ignoreDifferences.apiextensions.k8s.io_CustomResourceDef
- /spec/preserveUnknownFields
```
More details for ignored resource updates in the [Diffing customization](../../user-guide/diffing.md) documentation.
More details for ignored resource updates in the [Diffing customization](../../user-guide/diffing.md) documentation.
### Sanitized project API response
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
the project API response was sanitized to remove sensitive information. This includes
credentials of project-scoped repositories and clusters.

4
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/argoproj/argo-cd/v3
go 1.24.4
go 1.24.6
require (
code.gitea.io/sdk/gitea v0.20.0
@@ -12,7 +12,7 @@ require (
github.com/Masterminds/sprig/v3 v3.3.0
github.com/TomOnTime/utfutil v1.0.0
github.com/alicebob/miniredis/v2 v2.34.0
github.com/argoproj/gitops-engine v0.7.1-0.20250520182409-89c110b5952e
github.com/argoproj/gitops-engine v0.7.1-0.20250905153922-d96c3d51e4c4
github.com/argoproj/notifications-engine v0.4.1-0.20250309174002-87bf0576a872
github.com/argoproj/pkg v0.13.7-0.20250305113207-cbc37dc61de5
github.com/aws/aws-sdk-go v1.55.6

4
go.sum
View File

@@ -114,8 +114,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
github.com/argoproj/gitops-engine v0.7.1-0.20250520182409-89c110b5952e h1:65x5+7Vz3HPjFoj7+mFyCckgHrAhPwy4rnDp/AveD18=
github.com/argoproj/gitops-engine v0.7.1-0.20250520182409-89c110b5952e/go.mod h1:duVhxDW7M7M7+19IBCVth2REOS11gmqzTWwj4u8N7aQ=
github.com/argoproj/gitops-engine v0.7.1-0.20250905153922-d96c3d51e4c4 h1:OsQxWX8UHdwXuy72Y1Js8gQY3xmOzFEieCSpMoXKFb8=
github.com/argoproj/gitops-engine v0.7.1-0.20250905153922-d96c3d51e4c4/go.mod h1:duVhxDW7M7M7+19IBCVth2REOS11gmqzTWwj4u8N7aQ=
github.com/argoproj/notifications-engine v0.4.1-0.20250309174002-87bf0576a872 h1:ADGAdyN9ty0+RmTT/yn+xV9vwkqvLn9O1ccqeP0Zeas=
github.com/argoproj/notifications-engine v0.4.1-0.20250309174002-87bf0576a872/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
github.com/argoproj/pkg v0.13.7-0.20250305113207-cbc37dc61de5 h1:YBoLSjpoaJXaXAldVvBRKJuOPvIXz9UOv6S96gMJM/Q=

View File

@@ -253,6 +253,12 @@ spec:
name: argocd-cmd-params-cm
key: controller.cluster.cache.events.processing.interval
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
name: argocd-cmd-params-cm
key: commit.server
optional: true
image: quay.io/argoproj/argocd:latest
imagePullPolicy: Always
name: argocd-application-controller

View File

@@ -268,6 +268,12 @@ spec:
name: argocd-cmd-params-cm
key: controller.cluster.cache.events.processing.interval
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
name: argocd-cmd-params-cm
key: commit.server
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest

View File

@@ -181,6 +181,12 @@ spec:
name: argocd-cmd-params-cm
key: applicationsetcontroller.requeue.after
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
name: argocd-cmd-params-cm
key: applicationsetcontroller.status.max.resources.count
optional: true
volumeMounts:
- mountPath: /app/config/ssh
name: ssh-known-hosts

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.0.7
newTag: v3.0.18

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.0.7
newTag: v3.0.18
resources:
- ./application-controller
- ./dex

View File

@@ -24609,7 +24609,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -24735,7 +24741,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -24781,7 +24787,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -24885,7 +24891,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25158,7 +25164,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25210,7 +25216,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -25544,9 +25550,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -24577,7 +24577,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -24697,7 +24703,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -24970,7 +24976,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25022,7 +25028,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -25356,9 +25362,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.0.7
newTag: v3.0.18

View File

@@ -12,7 +12,7 @@ patches:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.0.7
newTag: v3.0.18
resources:
- ../../base/application-controller
- ../../base/applicationset-controller

View File

@@ -25975,7 +25975,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -26101,7 +26107,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26147,7 +26153,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -26274,7 +26280,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -26370,7 +26376,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -26494,7 +26500,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -26793,7 +26799,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26845,7 +26851,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -27219,7 +27225,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -27589,9 +27595,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -25945,7 +25945,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -26088,7 +26094,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -26184,7 +26190,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -26308,7 +26314,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -26607,7 +26613,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26659,7 +26665,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -27033,7 +27039,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -27403,9 +27409,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1862,7 +1862,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1988,7 +1994,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2034,7 +2040,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2161,7 +2167,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2257,7 +2263,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2381,7 +2387,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2680,7 +2686,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2732,7 +2738,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -3106,7 +3112,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3476,9 +3482,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1832,7 +1832,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1975,7 +1981,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2071,7 +2077,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2195,7 +2201,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2494,7 +2500,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2546,7 +2552,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2920,7 +2926,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3290,9 +3296,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -25069,7 +25069,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -25195,7 +25201,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25241,7 +25247,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -25368,7 +25374,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -25464,7 +25470,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -25566,7 +25572,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25839,7 +25845,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25891,7 +25897,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -26263,7 +26269,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -26633,9 +26639,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

28
manifests/install.yaml generated
View File

@@ -25037,7 +25037,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -25180,7 +25186,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -25276,7 +25282,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -25378,7 +25384,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25651,7 +25657,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25703,7 +25709,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -26075,7 +26081,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -26445,9 +26451,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -956,7 +956,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1082,7 +1088,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1128,7 +1134,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -1255,7 +1261,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1351,7 +1357,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1453,7 +1459,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1726,7 +1732,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1778,7 +1784,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2150,7 +2156,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2520,9 +2526,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -924,7 +924,13 @@ spec:
key: applicationsetcontroller.requeue.after
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
valueFrom:
configMapKeyRef:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1067,7 +1073,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1163,7 +1169,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1265,7 +1271,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1538,7 +1544,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1590,7 +1596,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -1962,7 +1968,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2332,9 +2338,15 @@ spec:
key: controller.cluster.cache.events.processing.interval
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER
valueFrom:
configMapKeyRef:
key: commit.server
name: argocd-cmd-params-cm
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.0.7
image: quay.io/argoproj/argocd:v3.0.18
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -321,7 +321,6 @@ func (repo *Repository) Sanitized() *Repository {
Repo: repo.Repo,
Type: repo.Type,
Name: repo.Name,
Username: repo.Username,
Insecure: repo.IsInsecure(),
EnableLFS: repo.EnableLFS,
EnableOCI: repo.EnableOCI,

View File

@@ -2152,6 +2152,32 @@ type Cluster struct {
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,13,opt,name=annotations"`
}
func (c *Cluster) Sanitized() *Cluster {
return &Cluster{
ID: c.ID,
Server: c.Server,
Name: c.Name,
Project: c.Project,
Namespaces: c.Namespaces,
Shard: c.Shard,
Labels: c.Labels,
Annotations: c.Annotations,
ClusterResources: c.ClusterResources,
ConnectionState: c.ConnectionState,
ServerVersion: c.ServerVersion,
Info: c.Info,
RefreshRequestedAt: c.RefreshRequestedAt,
Config: ClusterConfig{
AWSAuthConfig: c.Config.AWSAuthConfig,
ProxyUrl: c.Config.ProxyUrl,
DisableCompression: c.Config.DisableCompression,
TLSClientConfig: TLSClientConfig{
Insecure: c.Config.Insecure,
},
},
}
}
// Equals returns true if two cluster objects are considered to be equal
func (c *Cluster) Equals(other *Cluster) bool {
if c.Server != other.Server {
@@ -3152,6 +3178,14 @@ func (app *Application) SetPostDeleteFinalizer(stage ...string) {
setFinalizer(&app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/"), true)
}
func (app *Application) UnSetPostDeleteFinalizerAll() {
for _, finalizer := range app.Finalizers {
if strings.HasPrefix(finalizer, PostDeleteFinalizerName) {
setFinalizer(&app.ObjectMeta, finalizer, false)
}
}
}
func (app *Application) UnSetPostDeleteFinalizer(stage ...string) {
setFinalizer(&app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/"), false)
}

View File

@@ -4507,3 +4507,58 @@ func TestCluster_ParseProxyUrl(t *testing.T) {
}
}
}
func TestSanitized(t *testing.T) {
now := metav1.Now()
cluster := &Cluster{
ID: "123",
Server: "https://example.com",
Name: "example",
ServerVersion: "v1.0.0",
Namespaces: []string{"default", "kube-system"},
Project: "default",
Labels: map[string]string{
"env": "production",
},
Annotations: map[string]string{
"annotation-key": "annotation-value",
},
ConnectionState: ConnectionState{
Status: ConnectionStatusSuccessful,
Message: "Connection successful",
ModifiedAt: &now,
},
Config: ClusterConfig{
Username: "admin",
Password: "password123",
BearerToken: "abc",
TLSClientConfig: TLSClientConfig{
Insecure: true,
},
ExecProviderConfig: &ExecProviderConfig{
Command: "test",
},
},
}
assert.Equal(t, &Cluster{
ID: "123",
Server: "https://example.com",
Name: "example",
ServerVersion: "v1.0.0",
Namespaces: []string{"default", "kube-system"},
Project: "default",
Labels: map[string]string{"env": "production"},
Annotations: map[string]string{"annotation-key": "annotation-value"},
ConnectionState: ConnectionState{
Status: ConnectionStatusSuccessful,
Message: "Connection successful",
ModifiedAt: &now,
},
Config: ClusterConfig{
TLSClientConfig: TLSClientConfig{
Insecure: true,
},
},
}, cluster.Sanitized())
}

View File

@@ -19,6 +19,7 @@ type MetricsServer struct {
repoPendingRequestsGauge *prometheus.GaugeVec
redisRequestCounter *prometheus.CounterVec
redisRequestHistogram *prometheus.HistogramVec
PrometheusRegistry *prometheus.Registry
}
type GitRequestType string
@@ -108,6 +109,7 @@ func NewMetricsServer() *MetricsServer {
repoPendingRequestsGauge: repoPendingRequestsGauge,
redisRequestCounter: redisRequestCounter,
redisRequestHistogram: redisRequestHistogram,
PrometheusRegistry: registry,
}
}

View File

@@ -9,7 +9,6 @@ import (
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"
"github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging"
"github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
@@ -69,8 +68,7 @@ func NewServer(metricsServer *metrics.MetricsServer, cache *reposervercache.Cach
serverMetricsOptions = append(serverMetricsOptions, grpc_prometheus.WithServerHandlingTimeHistogram())
}
serverMetrics := grpc_prometheus.NewServerMetrics(serverMetricsOptions...)
reg := prometheus.NewRegistry()
reg.MustRegister(serverMetrics)
metricsServer.PrometheusRegistry.MustRegister(serverMetrics)
serverLog := log.NewEntry(log.StandardLogger())
streamInterceptors := []grpc.StreamServerInterceptor{

View File

@@ -20,23 +20,29 @@ if #obj.status.conditions == 0 then
end
local isEstablished
local isTerminating
local namesNotAccepted
local hasViolations
local conditionMsg = ""
for _, condition in pairs(obj.status.conditions) do
-- Check if CRD is terminating
if condition.type == "Terminating" and condition.status == "True" then
isTerminating = true
conditionMsg = condition.message
hs.status = "Progressing"
hs.message = "CRD is terminating: " .. condition.message
return hs
end
-- Check if K8s has accepted names for this CRD
if condition.type == "NamesAccepted" and condition.status == "False" then
namesNotAccepted = true
conditionMsg = condition.message
hs.status = "Degraded"
hs.message = "CRD names have not been accepted: " .. condition.message
return hs
end
-- Checking if CRD has violations
if condition.type == "NonStructuralSchema" and condition.status == "True" then
hs.status = "Degraded"
hs.message = "Schema violations found: " .. condition.message
return hs
end
-- Checking if CRD is established
@@ -44,25 +50,6 @@ for _, condition in pairs(obj.status.conditions) do
isEstablished = true
conditionMsg = condition.message
end
-- Checking if CRD has violations
if condition.type == "NonStructuralSchema" and condition.status == "True" then
hasViolations = true
conditionMsg = condition.message
end
end
if isTerminating then
hs.status = "Progressing"
hs.message = "CRD is terminating: " .. conditionMsg
return hs
end
if namesNotAccepted then
hs.status = "Degraded"
hs.message = "CRD names have not been accepted: " .. conditionMsg
return hs
end
if not isEstablished then
@@ -71,12 +58,6 @@ if not isEstablished then
return hs
end
if hasViolations then
hs.status = "Degraded"
hs.message = "Schema violations found: " .. conditionMsg
return hs
end
hs.status = "Healthy"
hs.message = "CRD is healthy"
return hs

View File

@@ -47,15 +47,15 @@ status:
reason: NoConflicts
status: 'True'
type: NamesAccepted
- lastTransitionTime: '2024-05-19T23:35:28Z'
message: the initial names have been accepted
reason: InitialNamesAccepted
status: 'True'
type: Established
- lastTransitionTime: '2024-10-26T19:44:57Z'
message: 'spec.preserveUnknownFields: Invalid value: true: must be false'
reason: Violations
status: 'True'
type: NonStructuralSchema
- lastTransitionTime: '2024-05-19T23:35:28Z'
message: the initial names have been accepted
reason: InitialNamesAccepted
status: 'True'
type: Established
storedVersions:
- v1alpha1

View File

@@ -4,6 +4,13 @@ if obj.spec.suspend ~= nil and obj.spec.suspend == true then
hs.status = "Suspended"
return hs
end
-- Helm repositories of type "oci" do not contain any information in the status
-- https://fluxcd.io/flux/components/source/helmrepositories/#helmrepository-status
if obj.spec.type ~= nil and obj.spec.type == "oci" then
hs.message = "Helm repositories of type 'oci' do not contain any information in the status."
hs.status = "Healthy"
return hs
end
if obj.status ~= nil then
if obj.status.conditions ~= nil then
local numProgressing = 0

View File

@@ -11,3 +11,7 @@ tests:
status: Healthy
message: Succeeded
inputPath: testdata/healthy.yaml
- healthStatus:
status: Healthy
message: "Helm repositories of type 'oci' do not contain any information in the status."
inputPath: testdata/oci.yaml

View File

@@ -0,0 +1,10 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: podinfo
namespace: default
spec:
type: "oci"
interval: 5m0s
url: oci://ghcr.io/stefanprodan/charts
status: {}

View File

@@ -1288,6 +1288,17 @@ func (s *Server) validateAndNormalizeApp(ctx context.Context, app *v1alpha1.Appl
if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionUpdate, currApp.RBACName(s.ns)); err != nil {
return err
}
// Validate that the new project exists and the application is allowed to use it
newProj, err := s.getAppProject(ctx, app, log.WithFields(log.Fields{
"application": app.Name,
"app-namespace": app.Namespace,
"app-qualified-name": app.QualifiedName(),
"project": app.Spec.Project,
}))
if err != nil {
return err
}
proj = newProj
}
if _, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, s.db); err != nil {
@@ -2679,7 +2690,7 @@ func (s *Server) GetApplicationSyncWindows(ctx context.Context, q *application.A
func (s *Server) inferResourcesStatusHealth(app *v1alpha1.Application) {
if app.Status.ResourceHealthSource == v1alpha1.ResourceHealthLocationAppTree {
tree := &v1alpha1.ApplicationTree{}
if err := s.cache.GetAppResourcesTree(app.Name, tree); err == nil {
if err := s.cache.GetAppResourcesTree(app.InstanceName(s.ns), tree); err == nil {
healthByKey := map[kube.ResourceKey]*v1alpha1.HealthStatus{}
for _, node := range tree.Nodes {
if node.Health != nil {

View File

@@ -1511,14 +1511,130 @@ func TestCreateAppWithOperation(t *testing.T) {
}
func TestUpdateApp(t *testing.T) {
testApp := newTestApp()
appServer := newTestAppServer(t, testApp)
testApp.Spec.Project = ""
app, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: testApp,
t.Parallel()
t.Run("Same spec", func(t *testing.T) {
t.Parallel()
testApp := newTestApp()
appServer := newTestAppServer(t, testApp)
testApp.Spec.Project = ""
app, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: testApp,
})
require.NoError(t, err)
assert.Equal(t, "default", app.Spec.Project)
})
t.Run("Invalid existing app can be updated", func(t *testing.T) {
t.Parallel()
testApp := newTestApp()
testApp.Spec.Destination.Server = "https://invalid-cluster"
appServer := newTestAppServer(t, testApp)
updateApp := newTestAppWithDestName()
updateApp.TypeMeta = testApp.TypeMeta
updateApp.Spec.Source.Name = "updated"
app, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: updateApp,
})
require.NoError(t, err)
require.NotNil(t, app)
assert.Equal(t, "updated", app.Spec.Source.Name)
})
t.Run("Can update application project from invalid", func(t *testing.T) {
t.Parallel()
testApp := newTestApp()
restrictedProj := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "restricted-proj", Namespace: "default"},
Spec: v1alpha1.AppProjectSpec{
SourceRepos: []string{"not-your-repo"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "not-your-namespace"}},
},
}
testApp.Spec.Project = restrictedProj.Name
appServer := newTestAppServer(t, testApp, restrictedProj)
updateApp := newTestAppWithDestName()
updateApp.TypeMeta = testApp.TypeMeta
updateApp.Spec.Project = "my-proj"
app, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: updateApp,
})
require.NoError(t, err)
require.NotNil(t, app)
assert.Equal(t, "my-proj", app.Spec.Project)
})
t.Run("Cannot update application project to invalid", func(t *testing.T) {
t.Parallel()
testApp := newTestApp()
restrictedProj := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "restricted-proj", Namespace: "default"},
Spec: v1alpha1.AppProjectSpec{
SourceRepos: []string{"not-your-repo"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "not-your-namespace"}},
},
}
appServer := newTestAppServer(t, testApp, restrictedProj)
updateApp := newTestAppWithDestName()
updateApp.TypeMeta = testApp.TypeMeta
updateApp.Spec.Project = restrictedProj.Name
_, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: updateApp,
})
require.Error(t, err)
require.ErrorContains(t, err, "application repo https://github.com/argoproj/argocd-example-apps.git is not permitted in project 'restricted-proj'")
require.ErrorContains(t, err, "application destination server 'fake-cluster' and namespace 'fake-dest-ns' do not match any of the allowed destinations in project 'restricted-proj'")
})
t.Run("Cannot update application project to inexisting", func(t *testing.T) {
t.Parallel()
testApp := newTestApp()
appServer := newTestAppServer(t, testApp)
updateApp := newTestAppWithDestName()
updateApp.TypeMeta = testApp.TypeMeta
updateApp.Spec.Project = "i-do-not-exist"
_, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: updateApp,
})
require.Error(t, err)
require.ErrorContains(t, err, "app is not allowed in project \"i-do-not-exist\", or the project does not exist")
})
t.Run("Can update application project with project argument", func(t *testing.T) {
t.Parallel()
testApp := newTestApp()
appServer := newTestAppServer(t, testApp)
updateApp := newTestAppWithDestName()
updateApp.TypeMeta = testApp.TypeMeta
updateApp.Spec.Project = "my-proj"
app, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: updateApp,
Project: ptr.To("default"),
})
require.NoError(t, err)
require.NotNil(t, app)
assert.Equal(t, "my-proj", app.Spec.Project)
})
t.Run("Existing label and annotations are replaced", func(t *testing.T) {
t.Parallel()
testApp := newTestApp()
testApp.Annotations = map[string]string{"test": "test-value", "update": "old"}
testApp.Labels = map[string]string{"test": "test-value", "update": "old"}
appServer := newTestAppServer(t, testApp)
updateApp := newTestAppWithDestName()
updateApp.TypeMeta = testApp.TypeMeta
updateApp.Annotations = map[string]string{"update": "new"}
updateApp.Labels = map[string]string{"update": "new"}
app, err := appServer.Update(t.Context(), &application.ApplicationUpdateRequest{
Application: updateApp,
})
require.NoError(t, err)
require.NotNil(t, app)
assert.Len(t, app.Annotations, 1)
assert.Equal(t, "new", app.GetAnnotations()["update"])
assert.Len(t, app.Labels, 1)
assert.Equal(t, "new", app.GetLabels()["update"])
})
require.NoError(t, err)
assert.Equal(t, "default", app.Spec.Project)
}
func TestUpdateAppSpec(t *testing.T) {

View File

@@ -203,7 +203,7 @@ func (s *Server) Create(ctx context.Context, q *applicationset.ApplicationSetCre
}
if q.GetDryRun() {
apps, err := s.generateApplicationSetApps(ctx, log.WithField("applicationset", appset.Name), *appset, namespace)
apps, err := s.generateApplicationSetApps(ctx, log.WithField("applicationset", appset.Name), *appset)
if err != nil {
return nil, fmt.Errorf("unable to generate Applications of ApplicationSet: %w", err)
}
@@ -262,12 +262,12 @@ func (s *Server) Create(ctx context.Context, q *applicationset.ApplicationSetCre
return updated, nil
}
func (s *Server) generateApplicationSetApps(ctx context.Context, logEntry *log.Entry, appset v1alpha1.ApplicationSet, namespace string) ([]v1alpha1.Application, error) {
func (s *Server) generateApplicationSetApps(ctx context.Context, logEntry *log.Entry, appset v1alpha1.ApplicationSet) ([]v1alpha1.Application, error) {
argoCDDB := s.db
scmConfig := generators.NewSCMConfig(s.ScmRootCAPath, s.AllowedScmProviders, s.EnableScmProviders, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)), true)
argoCDService := services.NewArgoCDService(s.db, s.GitSubmoduleEnabled, s.repoClientSet, s.EnableNewGitFileGlobbing)
appSetGenerators := generators.GetGenerators(ctx, s.client, s.k8sClient, namespace, argoCDService, s.dynamicClient, scmConfig)
appSetGenerators := generators.GetGenerators(ctx, s.client, s.k8sClient, s.ns, argoCDService, s.dynamicClient, scmConfig)
apps, _, err := appsettemplate.GenerateApplications(logEntry, appset, appSetGenerators, &appsetutils.Render{}, s.client)
if err != nil {
@@ -364,11 +364,15 @@ func (s *Server) Generate(ctx context.Context, q *applicationset.ApplicationSetG
if appset == nil {
return nil, errors.New("error creating ApplicationSets: ApplicationSets is nil in request")
}
namespace := s.appsetNamespaceOrDefault(appset.Namespace)
// The RBAC check needs to be performed against the appset namespace
// However, when trying to generate params, the server namespace needs
// to be passed.
namespace := s.appsetNamespaceOrDefault(appset.Namespace)
if !s.isNamespaceEnabled(namespace) {
return nil, security.NamespaceNotPermittedError(namespace)
}
projectName, err := s.validateAppSet(appset)
if err != nil {
return nil, fmt.Errorf("error validating ApplicationSets: %w", err)
@@ -381,7 +385,16 @@ func (s *Server) Generate(ctx context.Context, q *applicationset.ApplicationSetG
logger := log.New()
logger.SetOutput(logs)
apps, err := s.generateApplicationSetApps(ctx, logger.WithField("applicationset", appset.Name), *appset, namespace)
// The server namespace will be used in the function
// since this is the exact namespace that is being used
// to generate parameters (especially for git generator).
//
// In case of Git generator, if the namespace is set to
// appset namespace, we'll look for a project in the appset
// namespace that would lead to error when generating params
// for an appset in any namespace feature.
// See https://github.com/argoproj/argo-cd/issues/22942
apps, err := s.generateApplicationSetApps(ctx, logger.WithField("applicationset", appset.Name), *appset)
if err != nil {
return nil, fmt.Errorf("unable to generate Applications of ApplicationSet: %w\n%s", err, logs.String())
}

View File

@@ -4,6 +4,9 @@ import (
"sort"
"testing"
"sigs.k8s.io/controller-runtime/pkg/client"
cr_fake "sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/argoproj/gitops-engine/pkg/health"
"github.com/argoproj/pkg/sync"
"github.com/stretchr/testify/assert"
@@ -50,7 +53,7 @@ func fakeCluster() *appsv1.Cluster {
}
// return an ApplicationServiceServer which returns fake data
func newTestAppSetServer(t *testing.T, objects ...runtime.Object) *Server {
func newTestAppSetServer(t *testing.T, objects ...client.Object) *Server {
t.Helper()
f := func(enf *rbac.Enforcer) {
_ = enf.SetBuiltinPolicy(assets.BuiltinPolicyCSV)
@@ -61,7 +64,7 @@ func newTestAppSetServer(t *testing.T, objects ...runtime.Object) *Server {
}
// return an ApplicationServiceServer which returns fake data
func newTestNamespacedAppSetServer(t *testing.T, objects ...runtime.Object) *Server {
func newTestNamespacedAppSetServer(t *testing.T, objects ...client.Object) *Server {
t.Helper()
f := func(enf *rbac.Enforcer) {
_ = enf.SetBuiltinPolicy(assets.BuiltinPolicyCSV)
@@ -71,7 +74,7 @@ func newTestNamespacedAppSetServer(t *testing.T, objects ...runtime.Object) *Ser
return newTestAppSetServerWithEnforcerConfigure(t, f, scopedNamespaces, objects...)
}
func newTestAppSetServerWithEnforcerConfigure(t *testing.T, f func(*rbac.Enforcer), namespace string, objects ...runtime.Object) *Server {
func newTestAppSetServerWithEnforcerConfigure(t *testing.T, f func(*rbac.Enforcer), namespace string, objects ...client.Object) *Server {
t.Helper()
kubeclientset := fake.NewClientset(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -115,7 +118,11 @@ func newTestAppSetServerWithEnforcerConfigure(t *testing.T, f func(*rbac.Enforce
objects = append(objects, defaultProj, myProj)
fakeAppsClientset := apps.NewSimpleClientset(objects...)
runtimeObjects := make([]runtime.Object, len(objects))
for i := range objects {
runtimeObjects[i] = objects[i]
}
fakeAppsClientset := apps.NewSimpleClientset(runtimeObjects...)
factory := appinformer.NewSharedInformerFactoryWithOptions(fakeAppsClientset, 0, appinformer.WithNamespace(namespace), appinformer.WithTweakListOptions(func(_ *metav1.ListOptions) {}))
fakeProjLister := factory.Argoproj().V1alpha1().AppProjects().Lister().AppProjects(testNamespace)
@@ -140,6 +147,13 @@ func newTestAppSetServerWithEnforcerConfigure(t *testing.T, f func(*rbac.Enforce
panic("Timed out waiting for caches to sync")
}
scheme := runtime.NewScheme()
err = appsv1.AddToScheme(scheme)
require.NoError(t, err)
err = corev1.AddToScheme(scheme)
require.NoError(t, err)
crClient := cr_fake.NewClientBuilder().WithScheme(scheme).WithObjects(objects...).Build()
projInformer := factory.Argoproj().V1alpha1().AppProjects().Informer()
go projInformer.Run(ctx.Done())
if !k8scache.WaitForCacheSync(ctx.Done(), projInformer.HasSynced) {
@@ -150,7 +164,7 @@ func newTestAppSetServerWithEnforcerConfigure(t *testing.T, f func(*rbac.Enforce
db,
kubeclientset,
nil,
nil,
crClient,
enforcer,
nil,
fakeAppsClientset,
@@ -640,3 +654,54 @@ func TestResourceTree(t *testing.T) {
assert.EqualError(t, err, "namespace 'NOT-ALLOWED' is not permitted")
})
}
func TestAppSet_Generate_Cluster(t *testing.T) {
appSet1 := newTestAppSet(func(appset *appsv1.ApplicationSet) {
appset.Name = "AppSet1"
appset.Spec.Template.Name = "{{name}}"
appset.Spec.Generators = []appsv1.ApplicationSetGenerator{
{
Clusters: &appsv1.ClusterGenerator{},
},
}
})
t.Run("Generate in default namespace", func(t *testing.T) {
appSetServer := newTestAppSetServer(t, appSet1)
appsetQuery := applicationset.ApplicationSetGenerateRequest{
ApplicationSet: appSet1,
}
res, err := appSetServer.Generate(t.Context(), &appsetQuery)
require.NoError(t, err)
require.Len(t, res.Applications, 2)
assert.Equal(t, "fake-cluster", res.Applications[0].Name)
assert.Equal(t, "in-cluster", res.Applications[1].Name)
})
t.Run("Generate in different namespace", func(t *testing.T) {
appSetServer := newTestAppSetServer(t, appSet1)
appSet1Ns := appSet1.DeepCopy()
appSet1Ns.Namespace = "external-namespace"
appsetQuery := applicationset.ApplicationSetGenerateRequest{ApplicationSet: appSet1Ns}
res, err := appSetServer.Generate(t.Context(), &appsetQuery)
require.NoError(t, err)
require.Len(t, res.Applications, 2)
assert.Equal(t, "fake-cluster", res.Applications[0].Name)
assert.Equal(t, "in-cluster", res.Applications[1].Name)
})
t.Run("Generate in not allowed namespace", func(t *testing.T) {
appSetServer := newTestAppSetServer(t, appSet1)
appSet1Ns := appSet1.DeepCopy()
appSet1Ns.Namespace = "NOT-ALLOWED"
appsetQuery := applicationset.ApplicationSetGenerateRequest{ApplicationSet: appSet1Ns}
_, err := appSetServer.Generate(t.Context(), &appsetQuery)
assert.EqualError(t, err, "namespace 'NOT-ALLOWED' is not permitted")
})
}

View File

@@ -471,19 +471,8 @@ func (s *Server) RotateAuth(ctx context.Context, q *cluster.ClusterQuery) (*clus
}
func (s *Server) toAPIResponse(clust *appv1.Cluster) *appv1.Cluster {
clust = clust.Sanitized()
_ = s.cache.GetClusterInfo(clust.Server, &clust.Info)
clust.Config.Password = ""
clust.Config.BearerToken = ""
clust.Config.TLSClientConfig.KeyData = nil
if clust.Config.ExecProviderConfig != nil {
// We can't know what the user has put into args or
// env vars on the exec provider that might be sensitive
// (e.g. --private-key=XXX, PASSWORD=XXX)
// Implicitly assumes the command executable name is non-sensitive
clust.Config.ExecProviderConfig.Env = make(map[string]string)
clust.Config.ExecProviderConfig.Args = nil
}
// populate deprecated fields for backward compatibility
//nolint:staticcheck
clust.ServerVersion = clust.Info.ServerVersion

View File

@@ -21,6 +21,7 @@ type MetricsServer struct {
extensionRequestCounter *prometheus.CounterVec
extensionRequestDuration *prometheus.HistogramVec
argoVersion *prometheus.GaugeVec
PrometheusRegistry *prometheus.Registry
}
var (
@@ -81,8 +82,7 @@ func NewMetricsServer(host string, port int) *MetricsServer {
registry.MustRegister(extensionRequestDuration)
registry.MustRegister(argoVersion)
kubectlMetricsServer := kubectl.NewKubectlMetrics()
kubectlMetricsServer.RegisterWithClientGo()
kubectl.RegisterWithClientGo()
kubectl.RegisterWithPrometheus(registry)
return &MetricsServer{
@@ -95,6 +95,7 @@ func NewMetricsServer(host string, port int) *MetricsServer {
extensionRequestCounter: extensionRequestCounter,
extensionRequestDuration: extensionRequestDuration,
argoVersion: argoVersion,
PrometheusRegistry: registry,
}
}

View File

@@ -310,12 +310,20 @@ func (s *Server) GetDetailedProject(ctx context.Context, q *project.ProjectQuery
}
proj.NormalizeJWTTokens()
globalProjects := argo.GetGlobalProjects(proj, listersv1alpha1.NewAppProjectLister(s.projInformer.GetIndexer()), s.settingsMgr)
var apiRepos []*v1alpha1.Repository
for _, repo := range repositories {
apiRepos = append(apiRepos, repo.Normalize().Sanitized())
}
var apiClusters []*v1alpha1.Cluster
for _, cluster := range clusters {
apiClusters = append(apiClusters, cluster.Sanitized())
}
return &project.DetailedProjectsResponse{
GlobalProjects: globalProjects,
Project: proj,
Repositories: repositories,
Clusters: clusters,
Repositories: apiRepos,
Clusters: apiClusters,
}, err
}
@@ -412,7 +420,8 @@ func (s *Server) Update(ctx context.Context, q *project.ProjectUpdateRequest) (*
destCluster, err := argo.GetDestinationCluster(ctx, a.Spec.Destination, s.db)
if err != nil {
if err.Error() != argo.ErrDestinationMissing {
return nil, err
// If cluster is not found, we should discard this app, as it's most likely already in error
continue
}
invalidDstCount++
}

View File

@@ -743,6 +743,35 @@ p, role:admin, projects, update, *, allow`)
_, err := projectServer.GetSyncWindowsState(ctx, &project.SyncWindowsQuery{Name: projectWithSyncWindows.Name})
assert.EqualError(t, err, "rpc error: code = PermissionDenied desc = permission denied: projects, get, test")
})
t.Run("TestAddSyncWindowWhenAnAppReferencesAClusterThatDoesNotExist", func(t *testing.T) {
_ = enforcer.SetBuiltinPolicy(`p, role:admin, projects, get, *, allow
p, role:admin, projects, update, *, allow`)
sessionMgr := session.NewSessionManager(settingsMgr, test.NewFakeProjLister(), "", nil, session.NewUserStateStorage(nil))
projectWithAppWithInvalidCluster := existingProj.DeepCopy()
argoDB := db.NewDB("default", settingsMgr, kubeclientset)
invalidApp := v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{Name: "test-invalid", Namespace: "default"},
Spec: v1alpha1.ApplicationSpec{Source: &v1alpha1.ApplicationSource{}, Project: "test", Destination: v1alpha1.ApplicationDestination{Namespace: "ns3", Server: "https://server4"}},
}
projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projectWithAppWithInvalidCluster, &invalidApp), enforcer, sync.NewKeyLock(), sessionMgr, nil, projInformer, settingsMgr, argoDB, testEnableEventList)
// Add sync window
syncWindow := v1alpha1.SyncWindow{
Kind: "deny",
Schedule: "* * * * *",
Duration: "1h",
Applications: []string{"*"},
Clusters: []string{"*"},
}
projectWithAppWithInvalidCluster.Spec.SyncWindows = append(projectWithAppWithInvalidCluster.Spec.SyncWindows, &syncWindow)
res, err := projectServer.Update(ctx, &project.ProjectUpdateRequest{
Project: projectWithAppWithInvalidCluster,
})
require.NoError(t, err)
assert.Len(t, res.Spec.SyncWindows, 1)
})
}
func newEnforcer(kubeclientset *fake.Clientset) *rbac.Enforcer {

View File

@@ -313,7 +313,7 @@ func TestRepositoryServer(t *testing.T) {
testRepo := &appsv1.Repository{
Repo: url,
Type: "git",
Username: "foo",
Username: "",
InheritedCreds: true,
}
db.On("ListRepositories", t.Context()).Return([]*appsv1.Repository{testRepo}, nil)

View File

@@ -564,7 +564,7 @@ func (server *ArgoCDServer) Run(ctx context.Context, listeners *Listeners) {
svcSet := newArgoCDServiceSet(server)
server.serviceSet = svcSet
grpcS, appResourceTreeFn := server.newGRPCServer()
grpcS, appResourceTreeFn := server.newGRPCServer(metricsServ.PrometheusRegistry)
grpcWebS := grpcweb.WrapServer(grpcS)
var httpS *http.Server
var httpsS *http.Server
@@ -887,14 +887,13 @@ func (server *ArgoCDServer) useTLS() bool {
return true
}
func (server *ArgoCDServer) newGRPCServer() (*grpc.Server, application.AppResourceTreeFn) {
func (server *ArgoCDServer) newGRPCServer(prometheusRegistry *prometheus.Registry) (*grpc.Server, application.AppResourceTreeFn) {
var serverMetricsOptions []grpc_prometheus.ServerMetricsOption
if enableGRPCTimeHistogram {
serverMetricsOptions = append(serverMetricsOptions, grpc_prometheus.WithServerHandlingTimeHistogram())
}
serverMetrics := grpc_prometheus.NewServerMetrics(serverMetricsOptions...)
reg := prometheus.NewRegistry()
reg.MustRegister(serverMetrics)
prometheusRegistry.MustRegister(serverMetrics)
sOpts := []grpc.ServerOption{
// Set the both send and receive the bytes limit to be 100MB
@@ -1226,7 +1225,7 @@ func (server *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWeb
// Webhook handler for git events (Note: cache timeouts are hardcoded because API server does not write to cache and not really using them)
argoDB := db.NewDB(server.Namespace, server.settingsMgr, server.KubeClientset)
acdWebhookHandler := webhook.NewHandler(server.Namespace, server.ArgoCDServerOpts.ApplicationNamespaces, server.ArgoCDServerOpts.WebhookParallelism, server.AppClientset, server.settings, server.settingsMgr, server.RepoServerCache, server.Cache, argoDB, server.settingsMgr.GetMaxWebhookPayloadSize())
acdWebhookHandler := webhook.NewHandler(server.Namespace, server.ArgoCDServerOpts.ApplicationNamespaces, server.ArgoCDServerOpts.WebhookParallelism, server.AppClientset, server.appLister, server.settings, server.settingsMgr, server.RepoServerCache, server.Cache, argoDB, server.settingsMgr.GetMaxWebhookPayloadSize())
mux.HandleFunc("/api/webhook", acdWebhookHandler.Handler)

View File

@@ -8,11 +8,11 @@ RUN ln -s /usr/lib/$(uname -m)-linux-gnu /usr/lib/linux-gnu
# Please make sure to also check the contained yarn version and update the references below when upgrading this image's version
FROM docker.io/library/node:22.9.0@sha256:69e667a79aa41ec0db50bc452a60e705ca16f35285eaf037ebe627a65a5cdf52 AS node
FROM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS golang
FROM docker.io/library/golang:1.24.6@sha256:2c89c41fb9efc3807029b59af69645867cfe978d2b877d475be0d72f6c6ce6f6 AS golang
FROM docker.io/library/registry:2.8@sha256:543dade69668e02e5768d7ea2b0aa4fae6aa7384c9a5a8dbecc2be5136079ddb AS registry
FROM docker.io/bitnami/kubectl:1.32@sha256:493d1b871556d48d6b25d471f192c2427571cd6f78523eebcaf4d263353c7487 AS kubectl
FROM docker.io/bitnamilegacy/kubectl:1.32@sha256:493d1b871556d48d6b25d471f192c2427571cd6f78523eebcaf4d263353c7487 AS kubectl
FROM docker.io/library/ubuntu:24.04@sha256:80dd3c3b9c6cecb9f1667e9290b3bc61b78c2678c02cbdae5f0fea92cc6734ab

View File

@@ -103,4 +103,6 @@ func TestKubectlMetrics(t *testing.T) {
assert.Contains(t, string(body), "argocd_kubectl_response_size_bytes", "metrics should have contained argocd_kubectl_response_size_bytes")
assert.Contains(t, string(body), "argocd_kubectl_rate_limiter_duration_seconds", "metrics should have contained argocd_kubectl_rate_limiter_duration_seconds")
assert.Contains(t, string(body), "argocd_kubectl_requests_total", "metrics should have contained argocd_kubectl_requests_total")
assert.Contains(t, string(body), "grpc_server_handled_total", "metrics should have contained grpc_server_handled_total for all the reflected methods")
assert.Contains(t, string(body), "grpc_server_msg_received_total", "metrics should have contained grpc_server_msg_received_total for all the reflected methods")
}

View File

@@ -1,6 +1,6 @@
ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:80dd3c3b9c6cecb9f1667e9290b3bc61b78c2678c02cbdae5f0fea92cc6734ab
FROM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS go
FROM docker.io/library/golang:1.24.6@sha256:2c89c41fb9efc3807029b59af69645867cfe978d2b877d475be0d72f6c6ce6f6 AS go
RUN go install github.com/mattn/goreman@latest && \
go install github.com/kisielk/godepgraph@latest

View File

@@ -564,7 +564,7 @@ func ValidatePermissions(ctx context.Context, spec *argoappv1.ApplicationSpec, p
if !proj.IsSourcePermitted(spec.SourceHydrator.GetDrySource()) {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: fmt.Sprintf("application repo %s is not permitted in project '%s'", spec.GetSource().RepoURL, spec.Project),
Message: fmt.Sprintf("application repo %s is not permitted in project '%s'", spec.SourceHydrator.GetDrySource().RepoURL, proj.Name),
})
}
case spec.HasMultipleSources():
@@ -578,7 +578,7 @@ func ValidatePermissions(ctx context.Context, spec *argoappv1.ApplicationSpec, p
if !proj.IsSourcePermitted(source) {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: fmt.Sprintf("application repo %s is not permitted in project '%s'", source.RepoURL, spec.Project),
Message: fmt.Sprintf("application repo %s is not permitted in project '%s'", source.RepoURL, proj.Name),
})
}
}
@@ -591,7 +591,7 @@ func ValidatePermissions(ctx context.Context, spec *argoappv1.ApplicationSpec, p
if !proj.IsSourcePermitted(spec.GetSource()) {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: fmt.Sprintf("application repo %s is not permitted in project '%s'", spec.GetSource().RepoURL, spec.Project),
Message: fmt.Sprintf("application repo %s is not permitted in project '%s'", spec.GetSource().RepoURL, proj.Name),
})
}
}
@@ -604,22 +604,21 @@ func ValidatePermissions(ctx context.Context, spec *argoappv1.ApplicationSpec, p
})
return conditions, nil
}
if destCluster.Server != "" {
permitted, err := proj.IsDestinationPermitted(destCluster, spec.Destination.Namespace, func(project string) ([]*argoappv1.Cluster, error) {
return db.GetProjectClusters(ctx, project)
permitted, err := proj.IsDestinationPermitted(destCluster, spec.Destination.Namespace, func(project string) ([]*argoappv1.Cluster, error) {
return db.GetProjectClusters(ctx, project)
})
if err != nil {
return nil, err
}
if !permitted {
server := destCluster.Server
if spec.Destination.Name != "" {
server = destCluster.Name
}
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: fmt.Sprintf("application destination server '%s' and namespace '%s' do not match any of the allowed destinations in project '%s'", server, spec.Destination.Namespace, proj.Name),
})
if err != nil {
return nil, err
}
if !permitted {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: fmt.Sprintf("application destination server '%s' and namespace '%s' do not match any of the allowed destinations in project '%s'", spec.Destination.Server, spec.Destination.Namespace, spec.Project),
})
}
} else if destCluster.Server == "" {
conditions = append(conditions, argoappv1.ApplicationCondition{Type: argoappv1.ApplicationConditionInvalidSpecError, Message: ErrDestinationMissing})
}
return conditions, nil
}

View File

@@ -94,12 +94,14 @@ func (db *db) ListClusters(_ context.Context) (*appv1.ClusterList, error) {
// CreateCluster creates a cluster
func (db *db) CreateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Cluster, error) {
settings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
if c.Server == appv1.KubernetesInternalAPIServerAddr && !settings.InClusterEnabled {
return nil, status.Errorf(codes.InvalidArgument, "cannot register cluster: in-cluster has been disabled")
if c.Server == appv1.KubernetesInternalAPIServerAddr {
settings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
if !settings.InClusterEnabled {
return nil, status.Errorf(codes.InvalidArgument, "cannot register cluster: in-cluster has been disabled")
}
}
secName, err := URIToSecretName("cluster", c.Server)
if err != nil {
@@ -225,12 +227,14 @@ func (db *db) getClusterSecret(server string) (*corev1.Secret, error) {
// GetCluster returns a cluster from a query
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
if server == appv1.KubernetesInternalAPIServerAddr && !argoSettings.InClusterEnabled {
return nil, status.Errorf(codes.NotFound, "cluster %q is disabled", server)
if server == appv1.KubernetesInternalAPIServerAddr {
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
if !argoSettings.InClusterEnabled {
return nil, status.Errorf(codes.NotFound, "cluster %q is disabled", server)
}
}
informer, err := db.settingsMgr.GetSecretsInformer()

View File

@@ -719,6 +719,7 @@ func (creds AzureWorkloadIdentityCreds) Environ() (io.Closer, []string, error) {
}
nonce := creds.store.Add("", token)
env := creds.store.Environ(nonce)
env = append(env, fmt.Sprintf("%s=Authorization: Bearer %s", bearerAuthHeaderEnv, token))
return argoioutils.NewCloser(func() error {
creds.store.Remove(nonce)

View File

@@ -419,7 +419,7 @@ func TestAzureWorkloadIdentityCreds_Environ(t *testing.T) {
workloadIdentityMock := new(mocks.TokenProvider)
workloadIdentityMock.On("GetToken", azureDevopsEntraResourceId).Return(&workloadidentity.Token{AccessToken: "accessToken", ExpiresOn: time.Now().Add(time.Minute)}, nil)
creds := AzureWorkloadIdentityCreds{store, workloadIdentityMock}
_, _, err := creds.Environ()
_, env, err := creds.Environ()
require.NoError(t, err)
assert.Len(t, store.creds, 1)
@@ -427,6 +427,9 @@ func TestAzureWorkloadIdentityCreds_Environ(t *testing.T) {
assert.Equal(t, "", value.username)
assert.Equal(t, "accessToken", value.password)
}
require.Len(t, env, 1)
assert.Equal(t, "ARGOCD_GIT_BEARER_AUTH_HEADER=Authorization: Bearer accessToken", env[0], "ARGOCD_GIT_BEARER_AUTH_HEADER env var must be set")
}
func TestAzureWorkloadIdentityCreds_Environ_cleanup(t *testing.T) {

View File

@@ -1,13 +1,14 @@
package git
import (
"crypto/fips140"
"fmt"
gitssh "github.com/go-git/go-git/v5/plumbing/transport/ssh"
"golang.org/x/crypto/ssh"
)
// List of all currently supported algorithms for SSH key exchange
// SupportedSSHKeyExchangeAlgorithms is a list of all currently supported algorithms for SSH key exchange
// Unfortunately, crypto/ssh does not offer public constants or list for
// this.
var SupportedSSHKeyExchangeAlgorithms = []string{
@@ -21,10 +22,15 @@ var SupportedSSHKeyExchangeAlgorithms = []string{
"diffie-hellman-group14-sha1",
}
// List of default key exchange algorithms to use. We use those that are
// available by default, we can become more opinionated later on (when
// we support configuration of algorithms to use).
var DefaultSSHKeyExchangeAlgorithms = SupportedSSHKeyExchangeAlgorithms
// SupportedFIPSCompliantSSHKeyExchangeAlgorithms is a list of all currently supported algorithms for SSH key exchange
// that are FIPS compliant
var SupportedFIPSCompliantSSHKeyExchangeAlgorithms = []string{
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"diffie-hellman-group-exchange-sha256",
"diffie-hellman-group14-sha256",
}
// PublicKeysWithOptions is an auth method for go-git's SSH client that
// inherits from PublicKeys, but provides the possibility to override
@@ -51,9 +57,17 @@ func (a *PublicKeysWithOptions) ClientConfig() (*ssh.ClientConfig, error) {
if len(a.KexAlgorithms) > 0 {
kexAlgos = a.KexAlgorithms
} else {
kexAlgos = DefaultSSHKeyExchangeAlgorithms
kexAlgos = getDefaultSSHKeyExchangeAlgorithms()
}
config := ssh.Config{KeyExchanges: kexAlgos}
opts := &ssh.ClientConfig{Config: config, User: a.User, Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)}}
return a.SetHostKeyCallback(opts)
}
// getDefaultSSHKeyExchangeAlgorithms returns the default key exchange algorithms to be used
func getDefaultSSHKeyExchangeAlgorithms() []string {
if fips140.Enabled() {
return SupportedFIPSCompliantSSHKeyExchangeAlgorithms
}
return SupportedSSHKeyExchangeAlgorithms
}

View File

@@ -496,7 +496,11 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) ([]string, error)
).Info("took to get tags")
if c.indexCache != nil {
if err := c.indexCache.SetHelmIndex(tagsURL, data); err != nil {
cacheData, err := json.Marshal(entries)
if err != nil {
return nil, fmt.Errorf("failed to encode tags: %w", err)
}
if err := c.indexCache.SetHelmIndex(tagsURL, cacheData); err != nil {
log.Warnf("Failed to store tags list cache for repo: %s: %v", tagsURL, err)
}
}

View File

@@ -481,3 +481,95 @@ func TestGetTagsFromURLEnvironmentAuthentication(t *testing.T) {
})
}
}
func TestGetTagsCaching(t *testing.T) {
requestCount := 0
server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestCount++
t.Logf("request %d called %s", requestCount, r.URL.Path)
responseTags := fakeTagsList{
Tags: []string{
"1.0.0",
"1.1.0",
"2.0.0_beta",
},
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
require.NoError(t, json.NewEncoder(w).Encode(responseTags))
}))
t.Cleanup(server.Close)
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
t.Run("should cache tags correctly", func(t *testing.T) {
cache := &fakeIndexCache{}
client := NewClient(serverURL.Host, HelmCreds{
InsecureSkipVerify: true,
}, true, "", "", WithIndexCache(cache))
tags1, err := client.GetTags("mychart", false)
require.NoError(t, err)
assert.ElementsMatch(t, tags1, []string{
"1.0.0",
"1.1.0",
"2.0.0+beta",
})
assert.Equal(t, 1, requestCount)
requestCount = 0
tags2, err := client.GetTags("mychart", false)
require.NoError(t, err)
assert.ElementsMatch(t, tags2, []string{
"1.0.0",
"1.1.0",
"2.0.0+beta",
})
assert.Equal(t, 0, requestCount)
assert.NotEmpty(t, cache.data)
type entriesStruct struct {
Tags []string
}
var entries entriesStruct
err = json.Unmarshal(cache.data, &entries)
require.NoError(t, err)
assert.ElementsMatch(t, entries.Tags, []string{
"1.0.0",
"1.1.0",
"2.0.0+beta",
})
})
t.Run("should bypass cache when noCache is true", func(t *testing.T) {
cache := &fakeIndexCache{}
client := NewClient(serverURL.Host, HelmCreds{
InsecureSkipVerify: true,
}, true, "", "", WithIndexCache(cache))
requestCount = 0
tags1, err := client.GetTags("mychart", true)
require.NoError(t, err)
assert.ElementsMatch(t, tags1, []string{
"1.0.0",
"1.1.0",
"2.0.0+beta",
})
assert.Equal(t, 1, requestCount)
tags2, err := client.GetTags("mychart", true)
require.NoError(t, err)
assert.ElementsMatch(t, tags2, []string{
"1.0.0",
"1.1.0",
"2.0.0+beta",
})
assert.Equal(t, 2, requestCount)
})
}

View File

@@ -49,101 +49,101 @@ The command removes all the Kubernetes components associated with the chart and
The following table lists the configurable parameters of the Redis chart and their default values.
| Parameter | Description | Default |
|--------------------------------------------|----------------------------------------------------------------------------------------------------------------|--------------------------------------|
| `image.registry` | Redis Image registry | `docker.io` |
| `image.repository` | Redis Image name | `bitnami/redis` |
| `image.tag` | Redis Image tag | `{VERSION}` |
| `image.pullPolicy` | Image pull policy | `Always` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` |
| `cluster.enabled` | Use master-slave topology | `true` |
| `cluster.slaveCount` | Number of slaves | 1 |
| `existingSecret` | Name of existing secret object (for password authentication) | `nil` |
| `usePassword` | Use password | `true` |
| `password` | Redis password (ignored if existingSecret set) | Randomly generated |
| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template |
| `rbac.create` | Specifies whether RBAC resources should be created | `false` |
| `rbac.role.rules` | Rules to create | `[]` |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image.registry` | Redis exporter image registry | `docker.io` |
| `metrics.image.repository` | Redis exporter image name | `bitnami/redis` |
| `metrics.image.tag` | Redis exporter image tag | `v0.20.2` |
| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` |
| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} |
| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} |
| `master.service.type` | Kubernetes Service type (redis metrics) | `LoadBalancer` |
| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} |
| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` |
| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` |
| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` |
| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` |
| `master.persistence.path` | Path to mount the volume at, to use other images | `/bitnami` |
| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` |
| `master.persistence.storageClass` | Storage class of backing PVC | `generic` |
| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
| `master.persistence.size` | Size of data volume | `8Gi` |
| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete |
| `master.statefulset.rollingUpdatePartition`| Partition update strategy | `nil` |
| `master.podLabels` | Additional labels for Redis master pod | {} |
| `master.podAnnotations` | Additional annotations for Redis master pod | {} |
| `master.port` | Redis master port | 6379 |
| `master.args` | Redis master command-line args | [] |
| `master.disableCommands` | Comma-separated list of Redis commands to disable (master) | `FLUSHDB,FLUSHALL` |
| `master.extraFlags` | Redis master additional command line flags | [] |
| `master.nodeSelector` | Redis master Node labels for pod assignment | {"kubernetes.io/arch": "amd64"} |
| `master.tolerations` | Toleration labels for Redis master pod assignment | [] |
| `master.affinity ` | Affinity settings for Redis master pod assignment | [] |
| `master.schedulerName` | Name of an alternate scheduler | `nil` |
| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` |
| `master.service.annotations` | annotations for redis master service | {} |
| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` |
| `master.securityContext.enabled` | Enable security context (redis master pod) | `true` |
| `master.securityContext.fsGroup` | Group ID for the container (redis master pod) | `1001` |
| `master.securityContext.runAsUser` | User ID for the container (redis master pod) | `1001` |
| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` |
| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` |
| `master.readinessProbe.initialDelaySeconds`| Delay before readiness probe is initiated (redis master pod) | `5` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` |
| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `slave.serviceType` | Kubernetes Service type (redis slave) | `LoadBalancer` |
| `slave.service.annotations` | annotations for redis slave service | {} |
| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` |
| `slave.port` | Redis slave port | `master.port` |
| `slave.args` | Redis slave command-line args | `master.args` |
| `slave.disableCommands` | Comma-separated list of Redis commands to disable (slave) | `master.disableCommands` |
| `slave.extraFlags` | Redis slave additional command line flags | `master.extraFlags` |
| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `master.livenessProbe.enabled` |
| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `master.livenessProbe.initialDelaySeconds` |
| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `master.livenessProbe.periodSeconds` |
| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `master.livenessProbe.timeoutSeconds` |
| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `master.livenessProbe.successThreshold` |
| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `master.livenessProbe.failureThreshold` |
| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `master.readinessProbe.enabled` |
| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `master.readinessProbe.initialDelaySeconds` |
| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `master.readinessProbe.periodSeconds` |
| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `master.readinessProbe.timeoutSeconds` |
| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `master.readinessProbe.successThreshold` |
| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `master.readinessProbe.failureThreshold` |
| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` |
| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` |
| `slave.schedulerName` | Name of an alternate scheduler | `nil` |
| `slave.securityContext.enabled` | Enable security context (redis slave pod) | `master.securityContext.enabled` |
| `slave.securityContext.fsGroup` | Group ID for the container (redis slave pod) | `master.securityContext.fsGroup` |
| `slave.securityContext.runAsUser` | User ID for the container (redis slave pod) | `master.securityContext.runAsUser` |
| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `master.resources` |
| `slave.affinity` | Enable node/pod affinity for slaves | {} |
| Parameter | Description | Default |
|--------------------------------------------|----------------------------------------------------------------------------------------------------------------|---------------------------------------------|
| `image.registry` | Redis Image registry | `docker.io` |
| `image.repository` | Redis Image name | `bitnamilegacy/redis` |
| `image.tag` | Redis Image tag | `{VERSION}` |
| `image.pullPolicy` | Image pull policy | `Always` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` |
| `cluster.enabled` | Use master-slave topology | `true` |
| `cluster.slaveCount` | Number of slaves | 1 |
| `existingSecret` | Name of existing secret object (for password authentication) | `nil` |
| `usePassword` | Use password | `true` |
| `password` | Redis password (ignored if existingSecret set) | Randomly generated |
| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template |
| `rbac.create` | Specifies whether RBAC resources should be created | `false` |
| `rbac.role.rules` | Rules to create | `[]` |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image.registry` | Redis exporter image registry | `docker.io` |
| `metrics.image.repository` | Redis exporter image name | `bitnamilegacy/redis` |
| `metrics.image.tag` | Redis exporter image tag | `v0.20.2` |
| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` |
| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} |
| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} |
| `master.service.type` | Kubernetes Service type (redis metrics) | `LoadBalancer` |
| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} |
| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` |
| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` |
| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` |
| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` |
| `master.persistence.path` | Path to mount the volume at, to use other images | `/bitnami` |
| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` |
| `master.persistence.storageClass` | Storage class of backing PVC | `generic` |
| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
| `master.persistence.size` | Size of data volume | `8Gi` |
| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete |
| `master.statefulset.rollingUpdatePartition`| Partition update strategy | `nil` |
| `master.podLabels` | Additional labels for Redis master pod | {} |
| `master.podAnnotations` | Additional annotations for Redis master pod | {} |
| `master.port` | Redis master port | 6379 |
| `master.args` | Redis master command-line args | [] |
| `master.disableCommands` | Comma-separated list of Redis commands to disable (master) | `FLUSHDB,FLUSHALL` |
| `master.extraFlags` | Redis master additional command line flags | [] |
| `master.nodeSelector` | Redis master Node labels for pod assignment | {"kubernetes.io/arch": "amd64"} |
| `master.tolerations` | Toleration labels for Redis master pod assignment | [] |
| `master.affinity ` | Affinity settings for Redis master pod assignment | [] |
| `master.schedulerName` | Name of an alternate scheduler | `nil` |
| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` |
| `master.service.annotations` | annotations for redis master service | {} |
| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` |
| `master.securityContext.enabled` | Enable security context (redis master pod) | `true` |
| `master.securityContext.fsGroup` | Group ID for the container (redis master pod) | `1001` |
| `master.securityContext.runAsUser` | User ID for the container (redis master pod) | `1001` |
| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` |
| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` |
| `master.readinessProbe.initialDelaySeconds`| Delay before readiness probe is initiated (redis master pod) | `5` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` |
| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `slave.serviceType` | Kubernetes Service type (redis slave) | `LoadBalancer` |
| `slave.service.annotations` | annotations for redis slave service | {} |
| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` |
| `slave.port` | Redis slave port | `master.port` |
| `slave.args` | Redis slave command-line args | `master.args` |
| `slave.disableCommands` | Comma-separated list of Redis commands to disable (slave) | `master.disableCommands` |
| `slave.extraFlags` | Redis slave additional command line flags | `master.extraFlags` |
| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `master.livenessProbe.enabled` |
| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `master.livenessProbe.initialDelaySeconds` |
| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `master.livenessProbe.periodSeconds` |
| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `master.livenessProbe.timeoutSeconds` |
| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `master.livenessProbe.successThreshold` |
| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `master.livenessProbe.failureThreshold` |
| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `master.readinessProbe.enabled` |
| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `master.readinessProbe.initialDelaySeconds` |
| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `master.readinessProbe.periodSeconds` |
| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `master.readinessProbe.timeoutSeconds` |
| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `master.readinessProbe.successThreshold` |
| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `master.readinessProbe.failureThreshold` |
| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` |
| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` |
| `slave.schedulerName` | Name of an alternate scheduler | `nil` |
| `slave.securityContext.enabled` | Enable security context (redis slave pod) | `master.securityContext.enabled` |
| `slave.securityContext.fsGroup` | Group ID for the container (redis slave pod) | `master.securityContext.fsGroup` |
| `slave.securityContext.runAsUser` | User ID for the container (redis slave pod) | `master.securityContext.runAsUser` |
| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `master.resources` |
| `slave.affinity` | Enable node/pod affinity for slaves | {} |
The above parameters map to the env variables defined in [bitnami/redis](https://github.com/bitnami/bitnami-docker-redis). For more information please refer to the [bitnami/redis](https://github.com/bitnami/bitnami-docker-redis) image documentation.

View File

@@ -3,7 +3,7 @@
##
image:
registry: docker.io
repository: bitnami/redis
repository: bitnamilegacy/redis
tag: 4.0.10-debian-9
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@@ -3,7 +3,7 @@
##
image:
registry: docker.io
repository: bitnami/redis
repository: bitnamilegacy/redis
tag: 4.0.10-debian-9
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'

View File

@@ -15,6 +15,8 @@ import (
"github.com/Masterminds/semver/v3"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo-cd/v3/util/io"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -24,8 +26,6 @@ import (
executil "github.com/argoproj/argo-cd/v3/util/exec"
"github.com/argoproj/argo-cd/v3/util/git"
"github.com/argoproj/argo-cd/v3/util/proxy"
securejoin "github.com/cyphar/filepath-securejoin"
)
// Image represents a Docker image in the format NAME[:TAG].
@@ -346,12 +346,18 @@ func (k *kustomize) Build(opts *v1alpha1.ApplicationSourceKustomize, kustomizeOp
foundComponents := opts.Components
if opts.IgnoreMissingComponents {
foundComponents = make([]string, 0)
root, err := os.OpenRoot(k.repoRoot)
defer io.Close(root)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to open the repo folder: %w", err)
}
for _, c := range opts.Components {
resolvedPath, err := securejoin.SecureJoin(k.path, c)
resolvedPath, err := filepath.Rel(k.repoRoot, filepath.Join(k.path, c))
if err != nil {
return nil, nil, nil, fmt.Errorf("Kustomize components path failed: %w", err)
return nil, nil, nil, fmt.Errorf("kustomize components path failed: %w", err)
}
_, err = os.Stat(resolvedPath)
_, err = root.Stat(resolvedPath)
if err != nil {
log.Debugf("%s component directory does not exist", resolvedPath)
continue
@@ -359,15 +365,18 @@ func (k *kustomize) Build(opts *v1alpha1.ApplicationSourceKustomize, kustomizeOp
foundComponents = append(foundComponents, c)
}
}
args := []string{"edit", "add", "component"}
args = append(args, foundComponents...)
cmd := exec.Command(k.getBinaryPath(), args...)
cmd.Dir = k.path
cmd.Env = env
commands = append(commands, executil.GetCommandArgsToLog(cmd))
_, err := executil.Run(cmd)
if err != nil {
return nil, nil, nil, err
if len(foundComponents) > 0 {
args := []string{"edit", "add", "component"}
args = append(args, foundComponents...)
cmd := exec.Command(k.getBinaryPath(), args...)
cmd.Dir = k.path
cmd.Env = env
commands = append(commands, executil.GetCommandArgsToLog(cmd))
_, err := executil.Run(cmd)
if err != nil {
return nil, nil, nil, err
}
}
}
}

View File

@@ -25,6 +25,7 @@ const (
kustomization6 = "kustomization_yaml_components"
kustomization7 = "label_without_selector"
kustomization8 = "kustomization_yaml_patches_empty"
kustomization9 = "kustomization_yaml_components_monorepo"
)
func testDataDir(tb testing.TB, testData string) (string, error) {
@@ -506,6 +507,31 @@ func TestKustomizeBuildComponents(t *testing.T) {
assert.Equal(t, int64(3), replicas)
}
func TestKustomizeBuildComponentsMonoRepo(t *testing.T) {
rootPath, err := testDataDir(t, kustomization9)
require.NoError(t, err)
appPath := path.Join(rootPath, "envs/inseng-pdx-egert-sandbox/namespaces/inst-system/apps/hello-world")
kustomize := NewKustomizeApp(rootPath, appPath, git.NopCreds{}, "", "", "", "")
kustomizeSource := v1alpha1.ApplicationSourceKustomize{
Components: []string{"../../../../../../kustomize/components/all"},
IgnoreMissingComponents: true,
}
objs, _, _, err := kustomize.Build(&kustomizeSource, nil, nil, nil)
require.NoError(t, err)
obj := objs[2]
require.Equal(t, "hello-world-kustomize", obj.GetName())
require.Equal(t, map[string]string{
"app.kubernetes.io/name": "hello-world-kustomize",
"app.kubernetes.io/owner": "fire-team",
}, obj.GetLabels())
replicas, ok, err := unstructured.NestedSlice(obj.Object, "spec", "template", "spec", "tolerations")
require.NoError(t, err)
require.True(t, ok)
require.Len(t, replicas, 1)
require.Equal(t, "my-special-toleration", replicas[0].(map[string]any)["key"])
require.Equal(t, "Exists", replicas[0].(map[string]any)["operator"])
}
func TestKustomizeBuildPatches(t *testing.T) {
appPath, err := testDataDir(t, kustomization5)
require.NoError(t, err)
@@ -582,3 +608,23 @@ func TestFailKustomizeBuildPatches(t *testing.T) {
_, _, _, err = kustomize.Build(&kustomizeSource, nil, nil, nil)
require.EqualError(t, err, "kustomization file not found in the path")
}
func TestKustomizeBuildComponentsNoFoundComponents(t *testing.T) {
appPath, err := testDataDir(t, kustomization6)
require.NoError(t, err)
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", "", "", "")
// Test with non-existent components and IgnoreMissingComponents = true
// This should result in foundComponents being empty, so no "edit add component" command should be executed
kustomizeSource := v1alpha1.ApplicationSourceKustomize{
Components: []string{"./non-existent-component1", "./non-existent-component2"},
IgnoreMissingComponents: true,
}
_, _, commands, err := kustomize.Build(&kustomizeSource, nil, nil, nil)
require.NoError(t, err)
// Verify that no "edit add component" command was executed
for _, cmd := range commands {
assert.NotContains(t, cmd, "edit add component", "kustomize edit add component should not be invoked when foundComponents is empty")
}
}

View File

@@ -0,0 +1,3 @@
---
kustomize:
componentsPath: ../../../../../../kustomize/components

View File

@@ -0,0 +1,33 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# All the members of this group are meant be populated from the
# same nonproduction overlay of the matching app
resources:
- ../../../../../../kustomize/apps/hello-world/base
nameSuffix: -kustomize
labels:
- pairs:
app.kubernetes.io/name: hello-world-kustomize
includeSelectors: true
includeTemplates: true
patches:
# Adjusting the serviceAccount ref
- patch: |-
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
spec:
template:
spec:
serviceAccountName: hello-world-kustomize
# Container image versions across the members
images:
- name: hello-world
newTag: 1.17.0

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
labels:
app.kubernetes.io/name: hello-world
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: hello-world
template:
metadata:
labels:
app.kubernetes.io/name: hello-world
spec:
serviceAccountName: hello-world
containers:
- name: hello-world
image: "nginx:1.16.0"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
tolerations: []

View File

@@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- serviceaccount.yaml

View File

@@ -0,0 +1,16 @@
---
apiVersion: v1
kind: Service
metadata:
name: hello-world
labels:
app.kubernetes.io/name: hello-world
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: hello-world

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: hello-world
labels:
app.kubernetes.io/name: hello-world

View File

@@ -0,0 +1,19 @@
---
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
labels:
- pairs:
app.kubernetes.io/owner: fire-team
includeSelectors: false
includeTemplates: false
patches:
- target:
kind: Deployment
patch: |-
- op: add
path: /spec/template/spec/tolerations/-
value:
key: my-special-toleration
operator: Exists

View File

@@ -25,13 +25,9 @@ func (t testNormalizer) Normalize(un *unstructured.Unstructured) error {
if un == nil {
return nil
}
if un.GetKind() == "Job" {
err := unstructured.SetNestedField(un.Object, map[string]any{"name": "not sure why this works"}, "metadata")
if err != nil {
return fmt.Errorf("failed to normalize Job: %w", err)
}
}
switch un.GetKind() {
case "Job":
return t.normalizeJob(un)
case "DaemonSet", "Deployment", "StatefulSet":
err := unstructured.SetNestedStringMap(un.Object, map[string]string{"kubectl.kubernetes.io/restartedAt": "0001-01-01T00:00:00Z"}, "spec", "template", "metadata", "annotations")
if err != nil {
@@ -84,6 +80,28 @@ func (t testNormalizer) Normalize(un *unstructured.Unstructured) error {
return nil
}
func (t testNormalizer) normalizeJob(un *unstructured.Unstructured) error {
if conditions, exist, err := unstructured.NestedSlice(un.Object, "status", "conditions"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
} else if exist {
changed := false
for i := range conditions {
condition := conditions[i].(map[string]any)
cType := condition["type"].(string)
if cType == "FailureTarget" {
condition["lastTransitionTime"] = "0001-01-01T00:00:00Z"
changed = true
}
}
if changed {
if err := unstructured.SetNestedSlice(un.Object, conditions, "status", "conditions"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
}
}
return nil
}
type ActionTestStructure struct {
DiscoveryTests []IndividualDiscoveryTest `yaml:"discoveryTests"`
ActionTests []IndividualActionTest `yaml:"actionTests"`
@@ -179,8 +197,7 @@ func TestLuaResourceActionsScript(t *testing.T) {
assert.EqualValues(t, sourceObj.GetNamespace(), result.GetNamespace())
case CreateOperation:
switch result.GetKind() {
case "Job":
case "Workflow":
case "Job", "Workflow":
// The name of the created resource is derived from the source object name, so the returned name is not actually equal to the testdata output name
result.SetName(expectedObj.GetName())
}

View File

@@ -258,7 +258,8 @@ func cleanReturnedObj(newObj, obj map[string]any) map[string]any {
switch oldValue := oldValueInterface.(type) {
case map[string]any:
if len(newValue) == 0 {
mapToReturn[key] = oldValue
// Lua incorrectly decoded the empty object as an empty array, so set it to an empty object
mapToReturn[key] = map[string]any{}
}
case []any:
newArray := cleanReturnedArray(newValue, oldValue)
@@ -275,6 +276,10 @@ func cleanReturnedObj(newObj, obj map[string]any) map[string]any {
func cleanReturnedArray(newObj, obj []any) []any {
arrayToReturn := newObj
for i := range newObj {
if i >= len(obj) {
// If the new object is longer than the old one, we added an item to the array
break
}
switch newValue := newObj[i].(type) {
case map[string]any:
if oldValue, ok := obj[i].(map[string]any); ok {

View File

@@ -516,7 +516,7 @@ const expectedCreatedMultipleJobsObjList = `
kind: Job
metadata:
name: hello-2
namespace: test-ns
namespace: test-ns
`
const expectedActionMixedOperationObjList = `
@@ -533,9 +533,9 @@ const expectedActionMixedOperationObjList = `
kind: CronJob
metadata:
name: hello
namespace: test-ns
namespace: test-ns
labels:
test: test
test: test
`
const createJobActionLua = `
@@ -707,7 +707,9 @@ func TestExecuteResourceActionInvalidUnstructured(t *testing.T) {
require.Error(t, err)
}
const objWithEmptyStruct = `
func TestCleanPatch(t *testing.T) {
t.Run("Empty Struct preserved", func(t *testing.T) {
const obj = `
apiVersion: argoproj.io/v1alpha1
kind: Test
metadata:
@@ -719,7 +721,8 @@ metadata:
resourceVersion: "123"
spec:
resources: {}
paused: true
updated:
something: true
containers:
- name: name1
test: {}
@@ -727,8 +730,7 @@ spec:
- name: name2
test2: {}
`
const expectedUpdatedObjWithEmptyStruct = `
const expected = `
apiVersion: argoproj.io/v1alpha1
kind: Test
metadata:
@@ -740,7 +742,7 @@ metadata:
resourceVersion: "123"
spec:
resources: {}
paused: false
updated: {}
containers:
- name: name1
test: {}
@@ -748,21 +750,133 @@ spec:
- name: name2
test2: {}
`
const pausedToFalseLua = `
obj.spec.paused = false
const luaAction = `
obj.spec.updated = {}
return obj
`
testObj := StrToUnstructured(obj)
expectedObj := StrToUnstructured(expected)
vm := VM{}
newObjects, err := vm.ExecuteResourceAction(testObj, luaAction)
require.NoError(t, err)
assert.Len(t, newObjects, 1)
assert.Equal(t, newObjects[0].K8SOperation, K8SOperation("patch"))
assert.Equal(t, expectedObj, newObjects[0].UnstructuredObj)
})
func TestCleanPatch(t *testing.T) {
testObj := StrToUnstructured(objWithEmptyStruct)
expectedObj := StrToUnstructured(expectedUpdatedObjWithEmptyStruct)
vm := VM{}
newObjects, err := vm.ExecuteResourceAction(testObj, pausedToFalseLua)
require.NoError(t, err)
assert.Len(t, newObjects, 1)
assert.Equal(t, newObjects[0].K8SOperation, K8SOperation("patch"))
assert.Equal(t, expectedObj, newObjects[0].UnstructuredObj)
t.Run("New item added to array", func(t *testing.T) {
const obj = `
apiVersion: argoproj.io/v1alpha1
kind: Test
metadata:
labels:
app.kubernetes.io/instance: helm-guestbook
test: test
name: helm-guestbook
namespace: default
resourceVersion: "123"
spec:
containers:
- name: name1
test: {}
anotherList:
- name: name2
test2: {}
`
const expected = `
apiVersion: argoproj.io/v1alpha1
kind: Test
metadata:
labels:
app.kubernetes.io/instance: helm-guestbook
test: test
name: helm-guestbook
namespace: default
resourceVersion: "123"
spec:
containers:
- name: name1
test: {}
anotherList:
- name: name2
test2: {}
- name: added
#test: {} ### would be decoded as an empty array and is not supported. The type is unknown
testArray: [] ### works since it is decoded in the correct type
another:
supported: true
`
// `test: {}` in new container would be decoded as an empty array and is not supported. The type is unknown
// `testArray: []` works since it is decoded in the correct type
const luaAction = `
table.insert(obj.spec.containers, {name = "added", testArray = {}, another = {supported = true}})
return obj
`
testObj := StrToUnstructured(obj)
expectedObj := StrToUnstructured(expected)
vm := VM{}
newObjects, err := vm.ExecuteResourceAction(testObj, luaAction)
require.NoError(t, err)
assert.Len(t, newObjects, 1)
assert.Equal(t, newObjects[0].K8SOperation, K8SOperation("patch"))
assert.Equal(t, expectedObj, newObjects[0].UnstructuredObj)
})
t.Run("Last item removed from array", func(t *testing.T) {
const obj = `
apiVersion: argoproj.io/v1alpha1
kind: Test
metadata:
labels:
app.kubernetes.io/instance: helm-guestbook
test: test
name: helm-guestbook
namespace: default
resourceVersion: "123"
spec:
containers:
- name: name1
test: {}
anotherList:
- name: name2
test2: {}
- name: name3
test: {}
anotherList:
- name: name4
test2: {}
`
const expected = `
apiVersion: argoproj.io/v1alpha1
kind: Test
metadata:
labels:
app.kubernetes.io/instance: helm-guestbook
test: test
name: helm-guestbook
namespace: default
resourceVersion: "123"
spec:
containers:
- name: name1
test: {}
anotherList:
- name: name2
test2: {}
`
const luaAction = `
table.remove(obj.spec.containers)
return obj
`
testObj := StrToUnstructured(obj)
expectedObj := StrToUnstructured(expected)
vm := VM{}
newObjects, err := vm.ExecuteResourceAction(testObj, luaAction)
require.NoError(t, err)
assert.Len(t, newObjects, 1)
assert.Equal(t, newObjects[0].K8SOperation, K8SOperation("patch"))
assert.Equal(t, expectedObj, newObjects[0].UnstructuredObj)
})
}
func TestGetResourceHealth(t *testing.T) {

View File

@@ -4,6 +4,7 @@ import (
"context"
"net/url"
"strconv"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -163,69 +164,30 @@ func ResetAll() {
transportCreateCallsCounter.Reset()
}
type KubectlMetrics struct {
clientCertRotationAgeMetric kubectlClientCertRotationAgeMetric
requestLatencyMetric kubectlRequestLatencyMetric
resolverLatencyMetric kubectlResolverLatencyMetric
requestSizeMetric kubectlRequestSizeMetric
responseSizeMetric kubectlResponseSizeMetric
rateLimiterLatencyMetric kubectlRateLimiterLatencyMetric
requestResultMetric kubectlRequestResultMetric
execPluginCallsMetric kubectlExecPluginCallsMetric
requestRetryMetric kubectlRequestRetryMetric
transportCacheEntriesMetric kubectlTransportCacheEntriesMetric
transportCreateCallsMetric kubectlTransportCreateCallsMetric
}
// NewKubectlMetrics returns a new KubectlMetrics instance with the given initiator, which should be the name of the
// Argo CD component that is producing the metrics.
//
// After initializing the KubectlMetrics instance, you must call RegisterWithClientGo to register the metrics with the
// client-go metrics library.
//
// You must also call RegisterWithPrometheus to register the metrics with the metrics server's prometheus registry.
//
// So these three lines should be enough to set up kubectl metrics in your metrics server:
//
// kubectlMetricsServer := metricsutil.NewKubectlMetrics("your-component-name")
// kubectlMetricsServer.RegisterWithClientGo()
// metricsutil.RegisterWithPrometheus(registry)
//
// Once those functions have been called, everything else should happen automatically. client-go will send observations
// to the handlers in this struct, and your metrics server will collect and expose the metrics.
func NewKubectlMetrics() *KubectlMetrics {
return &KubectlMetrics{
clientCertRotationAgeMetric: kubectlClientCertRotationAgeMetric{},
requestLatencyMetric: kubectlRequestLatencyMetric{},
resolverLatencyMetric: kubectlResolverLatencyMetric{},
requestSizeMetric: kubectlRequestSizeMetric{},
responseSizeMetric: kubectlResponseSizeMetric{},
rateLimiterLatencyMetric: kubectlRateLimiterLatencyMetric{},
requestResultMetric: kubectlRequestResultMetric{},
execPluginCallsMetric: kubectlExecPluginCallsMetric{},
requestRetryMetric: kubectlRequestRetryMetric{},
transportCacheEntriesMetric: kubectlTransportCacheEntriesMetric{},
transportCreateCallsMetric: kubectlTransportCreateCallsMetric{},
}
}
var newKubectlMetricsOnce sync.Once
// RegisterWithClientGo sets the metrics handlers for the go-client library. We do not use the metrics library's `RegisterWithClientGo` method,
// because it is protected by a sync.Once. controller-runtime registers a single handler, which blocks our registration
// of our own handlers. So we must rudely set them all directly.
//
// Since the metrics are global, this function should only be called once for a given Argo CD component.
func (k *KubectlMetrics) RegisterWithClientGo() {
metrics.ClientCertRotationAge = &k.clientCertRotationAgeMetric
metrics.RequestLatency = &k.requestLatencyMetric
metrics.ResolverLatency = &k.resolverLatencyMetric
metrics.RequestSize = &k.requestSizeMetric
metrics.ResponseSize = &k.responseSizeMetric
metrics.RateLimiterLatency = &k.rateLimiterLatencyMetric
metrics.RequestResult = &k.requestResultMetric
metrics.ExecPluginCalls = &k.execPluginCallsMetric
metrics.RequestRetry = &k.requestRetryMetric
metrics.TransportCacheEntries = &k.transportCacheEntriesMetric
metrics.TransportCreateCalls = &k.transportCreateCallsMetric
// Since the metrics are global, this function only needs to be called once for a given Argo CD component.
//
// You must also call RegisterWithPrometheus to register the metrics with the metrics server's prometheus registry.
func RegisterWithClientGo() {
// Do once to avoid races in unit tests that call this function.
newKubectlMetricsOnce.Do(func() {
metrics.ClientCertRotationAge = &kubectlClientCertRotationAgeMetric{}
metrics.RequestLatency = &kubectlRequestLatencyMetric{}
metrics.ResolverLatency = &kubectlResolverLatencyMetric{}
metrics.RequestSize = &kubectlRequestSizeMetric{}
metrics.ResponseSize = &kubectlResponseSizeMetric{}
metrics.RateLimiterLatency = &kubectlRateLimiterLatencyMetric{}
metrics.RequestResult = &kubectlRequestResultMetric{}
metrics.ExecPluginCalls = &kubectlExecPluginCallsMetric{}
metrics.RequestRetry = &kubectlRequestRetryMetric{}
metrics.TransportCacheEntries = &kubectlTransportCacheEntriesMetric{}
metrics.TransportCreateCalls = &kubectlTransportCreateCallsMetric{}
})
}
type kubectlClientCertRotationAgeMetric struct{}

View File

@@ -0,0 +1,11 @@
package kubectl
import (
"testing"
)
func Test_RegisterWithClientGo_race(_ *testing.T) {
// This test ensures that the RegisterWithClientGo function can be called concurrently without causing a data race.
go RegisterWithClientGo()
go RegisterWithClientGo()
}

View File

@@ -11,6 +11,8 @@ import (
"strings"
"sync"
alpha1 "github.com/argoproj/argo-cd/v3/pkg/client/listers/application/v1alpha1"
"github.com/Masterminds/semver/v3"
"github.com/go-playground/webhooks/v6/azuredevops"
"github.com/go-playground/webhooks/v6/bitbucket"
@@ -20,7 +22,7 @@ import (
"github.com/go-playground/webhooks/v6/gogs"
gogsclient "github.com/gogits/go-gogs-client"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
@@ -56,6 +58,7 @@ type ArgoCDWebhookHandler struct {
ns string
appNs []string
appClientset appclientset.Interface
appsLister alpha1.ApplicationLister
github *github.Webhook
gitlab *gitlab.Webhook
bitbucket *bitbucket.Webhook
@@ -67,7 +70,7 @@ type ArgoCDWebhookHandler struct {
maxWebhookPayloadSizeB int64
}
func NewHandler(namespace string, applicationNamespaces []string, webhookParallelism int, appClientset appclientset.Interface, set *settings.ArgoCDSettings, settingsSrc settingsSource, repoCache *cache.Cache, serverCache *servercache.Cache, argoDB db.ArgoDB, maxWebhookPayloadSizeB int64) *ArgoCDWebhookHandler {
func NewHandler(namespace string, applicationNamespaces []string, webhookParallelism int, appClientset appclientset.Interface, appsLister alpha1.ApplicationLister, set *settings.ArgoCDSettings, settingsSrc settingsSource, repoCache *cache.Cache, serverCache *servercache.Cache, argoDB db.ArgoDB, maxWebhookPayloadSizeB int64) *ArgoCDWebhookHandler {
githubWebhook, err := github.New(github.Options.Secret(set.WebhookGitHubSecret))
if err != nil {
log.Warnf("Unable to init the GitHub webhook")
@@ -109,6 +112,7 @@ func NewHandler(namespace string, applicationNamespaces []string, webhookParalle
db: argoDB,
queue: make(chan any, payloadQueueSize),
maxWebhookPayloadSizeB: maxWebhookPayloadSizeB,
appsLister: appsLister,
}
acdWebhook.startWorkerPool(webhookParallelism)
@@ -268,8 +272,8 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload any) {
nsFilter = ""
}
appIf := a.appClientset.ArgoprojV1alpha1().Applications(nsFilter)
apps, err := appIf.List(context.Background(), metav1.ListOptions{})
appIf := a.appsLister.Applications(nsFilter)
apps, err := appIf.List(labels.Everything())
if err != nil {
log.Warnf("Failed to list applications: %v", err)
return
@@ -294,9 +298,9 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload any) {
// Skip any application that is neither in the control plane's namespace
// nor in the list of enabled namespaces.
var filteredApps []v1alpha1.Application
for _, app := range apps.Items {
for _, app := range apps {
if app.Namespace == a.ns || glob.MatchStringInList(a.appNs, app.Namespace, glob.REGEXP) {
filteredApps = append(filteredApps, app)
filteredApps = append(filteredApps, *app)
}
}

View File

@@ -2,6 +2,7 @@ package webhook
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
@@ -11,14 +12,16 @@ import (
"testing"
"time"
"k8s.io/apimachinery/pkg/types"
argov1 "github.com/argoproj/argo-cd/v3/pkg/client/listers/application/v1alpha1"
"github.com/go-playground/webhooks/v6/bitbucket"
bitbucketserver "github.com/go-playground/webhooks/v6/bitbucket-server"
"github.com/go-playground/webhooks/v6/github"
"github.com/go-playground/webhooks/v6/gitlab"
gogsclient "github.com/gogits/go-gogs-client"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kubetesting "k8s.io/client-go/testing"
"github.com/argoproj/argo-cd/v3/util/cache/appstate"
@@ -64,6 +67,31 @@ func NewMockHandler(reactor *reactorDef, applicationNamespaces []string, objects
return NewMockHandlerWithPayloadLimit(reactor, applicationNamespaces, defaultMaxPayloadSize, objects...)
}
type fakeAppsLister struct {
argov1.ApplicationLister
argov1.ApplicationNamespaceLister
namespace string
clientset *appclientset.Clientset
}
func (f *fakeAppsLister) Applications(namespace string) argov1.ApplicationNamespaceLister {
return &fakeAppsLister{namespace: namespace, clientset: f.clientset}
}
func (f *fakeAppsLister) List(selector labels.Selector) ([]*v1alpha1.Application, error) {
res, err := f.clientset.ArgoprojV1alpha1().Applications(f.namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return nil, err
}
var apps []*v1alpha1.Application
for i := range res.Items {
apps = append(apps, &res.Items[i])
}
return apps, nil
}
func NewMockHandlerWithPayloadLimit(reactor *reactorDef, applicationNamespaces []string, maxPayloadSize int64, objects ...runtime.Object) *ArgoCDWebhookHandler {
appClientset := appclientset.NewSimpleClientset(objects...)
if reactor != nil {
@@ -76,7 +104,7 @@ func NewMockHandlerWithPayloadLimit(reactor *reactorDef, applicationNamespaces [
}
cacheClient := cacheutil.NewCache(cacheutil.NewInMemoryCache(1 * time.Hour))
return NewHandler("argocd", applicationNamespaces, 10, appClientset, &settings.ArgoCDSettings{}, &fakeSettingsSrc{}, cache.NewCache(
return NewHandler("argocd", applicationNamespaces, 10, appClientset, &fakeAppsLister{clientset: appClientset}, &settings.ArgoCDSettings{}, &fakeSettingsSrc{}, cache.NewCache(
cacheClient,
1*time.Minute,
1*time.Minute,

View File

@@ -1,12 +1,7 @@
package workloadidentity
import (
"context"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
)
const (
@@ -22,34 +17,9 @@ type TokenProvider interface {
GetToken(scope string) (*Token, error)
}
type WorkloadIdentityTokenProvider struct {
tokenCredential azcore.TokenCredential
}
// Used to propagate initialization error if any
var initError error
func NewWorkloadIdentityTokenProvider() TokenProvider {
cred, err := azidentity.NewDefaultAzureCredential(&azidentity.DefaultAzureCredentialOptions{})
initError = err
return WorkloadIdentityTokenProvider{tokenCredential: cred}
}
func (c WorkloadIdentityTokenProvider) GetToken(scope string) (*Token, error) {
if initError != nil {
return nil, initError
}
token, err := c.tokenCredential.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{scope},
})
if err != nil {
return nil, err
}
return &Token{AccessToken: token.Token, ExpiresOn: token.ExpiresOn}, nil
}
func CalculateCacheExpiryBasedOnTokenExpiry(tokenExpiry time.Time) time.Duration {
// Calculate the cache expiry as 5 minutes before the token expires
cacheExpiry := time.Until(tokenExpiry) - time.Minute*5

View File

@@ -0,0 +1,36 @@
//go:build !darwin || (cgo && darwin)
package workloadidentity
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
)
type WorkloadIdentityTokenProvider struct {
tokenCredential azcore.TokenCredential
}
func NewWorkloadIdentityTokenProvider() TokenProvider {
cred, err := azidentity.NewDefaultAzureCredential(&azidentity.DefaultAzureCredentialOptions{})
initError = err
return WorkloadIdentityTokenProvider{tokenCredential: cred}
}
func (c WorkloadIdentityTokenProvider) GetToken(scope string) (*Token, error) {
if initError != nil {
return nil, initError
}
token, err := c.tokenCredential.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{scope},
})
if err != nil {
return nil, err
}
return &Token{AccessToken: token.Token, ExpiresOn: token.ExpiresOn}, nil
}

Some files were not shown because too many files have changed in this diff Show More