mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 17:48:47 +01:00
Compare commits
153 Commits
stable
...
reggie-k-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ea4ad6eb9 | ||
|
|
a9da448046 | ||
|
|
91475509e1 | ||
|
|
6cd65b4622 | ||
|
|
23c021f53d | ||
|
|
b18d576fe2 | ||
|
|
42f09f7529 | ||
|
|
ef21768b92 | ||
|
|
fee6962f68 | ||
|
|
bfbb88e5fe | ||
|
|
82597111a1 | ||
|
|
fded82ad57 | ||
|
|
3453367509 | ||
|
|
2e638831a6 | ||
|
|
1049d40b7d | ||
|
|
6994a42fa9 | ||
|
|
ef40ba8805 | ||
|
|
67712c19d8 | ||
|
|
c4f3bb8be4 | ||
|
|
2d762e4a2b | ||
|
|
a6cc7ad9a6 | ||
|
|
d2cb56d7c7 | ||
|
|
c32286a9a4 | ||
|
|
429fc1f2d9 | ||
|
|
275c5de627 | ||
|
|
b320854f04 | ||
|
|
38363f3388 | ||
|
|
912e216be3 | ||
|
|
f76046fc7e | ||
|
|
61c8ce2fc9 | ||
|
|
8866fcf207 | ||
|
|
bde6f667e1 | ||
|
|
c212bb77bd | ||
|
|
0da603db11 | ||
|
|
1488a13b89 | ||
|
|
6a3a540c9a | ||
|
|
fb56875397 | ||
|
|
b137439c07 | ||
|
|
43dd717183 | ||
|
|
c19d0461ff | ||
|
|
667b7d658c | ||
|
|
d08a87931e | ||
|
|
a1955019f8 | ||
|
|
2322cdca32 | ||
|
|
f83906d877 | ||
|
|
e988c55a11 | ||
|
|
05504d623c | ||
|
|
3c01ab15ee | ||
|
|
3ac7a0b69a | ||
|
|
93a7717c71 | ||
|
|
b4a52fc5c8 | ||
|
|
af64957452 | ||
|
|
cbc7ecdb85 | ||
|
|
5d790e5c94 | ||
|
|
7317cde9e7 | ||
|
|
946a3ab44b | ||
|
|
e6825529ab | ||
|
|
dab6f3bfae | ||
|
|
79b0981b05 | ||
|
|
bb894e8c16 | ||
|
|
312a841f8c | ||
|
|
abde22229a | ||
|
|
2d19fa0781 | ||
|
|
ee1bf89bf8 | ||
|
|
f6d00b7733 | ||
|
|
08390e21cb | ||
|
|
b357063c02 | ||
|
|
040cc37ad3 | ||
|
|
73b4d9884f | ||
|
|
b0e4e84f23 | ||
|
|
e7aa9b099a | ||
|
|
d9b38a8e0e | ||
|
|
726b764f1e | ||
|
|
4edc1a96d3 | ||
|
|
5959693845 | ||
|
|
4aa2ba4715 | ||
|
|
ced94022b3 | ||
|
|
4a5d3a79cc | ||
|
|
ca82ee11e2 | ||
|
|
93205a7a08 | ||
|
|
f8899ee310 | ||
|
|
884b639e1e | ||
|
|
9213601160 | ||
|
|
93c736cf6a | ||
|
|
774f48e23e | ||
|
|
e4a28fa71f | ||
|
|
d8f9ed90f2 | ||
|
|
deb79bbfc4 | ||
|
|
5598f87d82 | ||
|
|
d11d025186 | ||
|
|
4409ec0ab8 | ||
|
|
20bf53f4a6 | ||
|
|
fa6f5c63c8 | ||
|
|
5113f820de | ||
|
|
23b387f117 | ||
|
|
2f9bea6892 | ||
|
|
104cd72c77 | ||
|
|
5cce5fe59b | ||
|
|
c34d44ab7f | ||
|
|
bac8c4bc19 | ||
|
|
da042b7f96 | ||
|
|
28beb3ec42 | ||
|
|
ef75a2e7a5 | ||
|
|
91a1311bbe | ||
|
|
bbeaa2e359 | ||
|
|
d63aa846c5 | ||
|
|
4c77f0c963 | ||
|
|
5ec311001b | ||
|
|
abf2233426 | ||
|
|
dd9799385d | ||
|
|
cc5cd7e30b | ||
|
|
b543e18b10 | ||
|
|
1dc85e564b | ||
|
|
0114636cdc | ||
|
|
5859065650 | ||
|
|
6f21978637 | ||
|
|
91e9b22624 | ||
|
|
9a777c63fa | ||
|
|
6f0de8b858 | ||
|
|
b1a93b4756 | ||
|
|
b5a91a18cd | ||
|
|
4bfd6243a1 | ||
|
|
cc0752d334 | ||
|
|
f3d0c1233e | ||
|
|
4fabbcebea | ||
|
|
3b24d33cda | ||
|
|
13b8b458f4 | ||
|
|
b414432ddb | ||
|
|
216611ff3b | ||
|
|
df3be1cdf0 | ||
|
|
0a2ae95be8 | ||
|
|
67d425f237 | ||
|
|
2e6e6cfc12 | ||
|
|
474d9005f4 | ||
|
|
43a9524d0c | ||
|
|
b2df60414c | ||
|
|
fee1c565c3 | ||
|
|
106acdafec | ||
|
|
a439c6c5ec | ||
|
|
95d19f2eda | ||
|
|
34e8935bf8 | ||
|
|
e8eebd7b12 | ||
|
|
48f01b5965 | ||
|
|
b92b7a6fd8 | ||
|
|
4832c5e7a5 | ||
|
|
5b8ce54f9d | ||
|
|
eedf6cc58e | ||
|
|
96f1266846 | ||
|
|
89cd590cb8 | ||
|
|
2de45e7532 | ||
|
|
8d24a9a211 | ||
|
|
f70c47a7fb | ||
|
|
faf0b75a73 |
1
.github/ISSUE_TEMPLATE/release.md
vendored
1
.github/ISSUE_TEMPLATE/release.md
vendored
@@ -41,6 +41,7 @@ Target GA date: ___. __, ____
|
||||
Thanks to all the folks who spent their time contributing to this release in any way possible!
|
||||
```
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate during the RC period (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After creating the RC, open a documentation PR for the next minor version using [this](../../docs/operator-manual/templates/minor_version_upgrade.md) template.
|
||||
|
||||
## GA Release Checklist
|
||||
|
||||
|
||||
4
.github/workflows/bump-major-version.yaml
vendored
4
.github/workflows/bump-major-version.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically update major version
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Add ~/go/bin to PATH
|
||||
|
||||
2
.github/workflows/cherry-pick-single.yml
vendored
2
.github/workflows/cherry-pick-single.yml
vendored
@@ -28,7 +28,7 @@ on:
|
||||
jobs:
|
||||
cherry-pick:
|
||||
name: Cherry Pick to ${{ inputs.version_number }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate-token
|
||||
|
||||
2
.github/workflows/cherry-pick.yml
vendored
2
.github/workflows/cherry-pick.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
(github.event.action == 'labeled' && startsWith(github.event.label.name, 'cherry-pick/')) ||
|
||||
(github.event.action == 'closed' && contains(toJSON(github.event.pull_request.labels.*.name), 'cherry-pick/'))
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
labels: ${{ steps.extract-labels.outputs.labels }}
|
||||
steps:
|
||||
|
||||
98
.github/workflows/ci-build.yaml
vendored
98
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.3'
|
||||
GOLANG_VERSION: '1.25.6'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -25,7 +25,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
backend: ${{ steps.filter.outputs.backend_any_changed }}
|
||||
frontend: ${{ steps.filter.outputs.frontend_any_changed }}
|
||||
@@ -50,14 +50,14 @@ jobs:
|
||||
check-go:
|
||||
name: Ensure Go modules synchronicity
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -70,18 +70,18 @@ jobs:
|
||||
build-go:
|
||||
name: Build & cache Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -97,14 +97,14 @@ jobs:
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Lint Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -152,7 +152,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -173,7 +173,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -181,7 +181,7 @@ jobs:
|
||||
test-go-race:
|
||||
name: Run unit tests with -race for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
@@ -194,9 +194,9 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -216,7 +216,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -237,7 +237,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -245,14 +245,14 @@ jobs:
|
||||
codegen:
|
||||
name: Check changes to generated code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.docs == 'true'}}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -271,13 +271,13 @@ jobs:
|
||||
# We need to vendor go modules for codegen yet
|
||||
go mod download
|
||||
go mod vendor -v
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
- name: Install toolchain for codegen
|
||||
run: |
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
@@ -302,20 +302,20 @@ jobs:
|
||||
name: Build, test & lint UI code
|
||||
# We run UI logic for backend changes so that we have a complete set of coverage documents to send to codecov.
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
# renovate: datasource=node-version packageName=node versioning=node
|
||||
node-version: '22.9.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -338,7 +338,7 @@ jobs:
|
||||
working-directory: ui/
|
||||
|
||||
shellcheck:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- run: |
|
||||
@@ -349,7 +349,7 @@ jobs:
|
||||
analyze:
|
||||
name: Process & analyze test artifacts
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- test-go
|
||||
- build-ui
|
||||
@@ -357,6 +357,7 @@ jobs:
|
||||
- test-e2e
|
||||
env:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
codecov_secret: ${{ secrets.CODECOV_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
@@ -364,23 +365,27 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
if: env.codecov_secret != ''
|
||||
- name: Remove other node_modules directory
|
||||
run: |
|
||||
rm -rf ui/node_modules/argo-ui/node_modules
|
||||
if: env.codecov_secret != ''
|
||||
- name: Get e2e code coverage
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: e2e-code-coverage
|
||||
if: env.codecov_secret != ''
|
||||
- name: Get unit test code coverage
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
if: env.codecov_secret != ''
|
||||
- name: combine-go-coverage
|
||||
# We generate coverage reports for all Argo CD components, but only the applicationset-controller,
|
||||
# app-controller, repo-server, and commit-server report contain coverage data. The other components currently
|
||||
@@ -388,6 +393,7 @@ jobs:
|
||||
# references to their coverage output directories.
|
||||
run: |
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller,e2e-code-coverage/commit-server -o test-results/full-coverage.out
|
||||
if: env.codecov_secret != ''
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
with:
|
||||
@@ -395,13 +401,16 @@ jobs:
|
||||
fail_ci_if_error: true
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
if: env.codecov_secret != ''
|
||||
- name: Upload test results to Codecov
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push' && github.repository == 'argoproj/argo-cd'
|
||||
uses: codecov/test-results-action@47f89e9acb64b76debcd5ea40642d25a4adced9f # v1.1.1
|
||||
# Codecov uploads test results to Codecov.io on upstream master branch and on fork master branch if the token is configured.
|
||||
if: env.codecov_secret != '' && github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
with:
|
||||
file: test-results/junit.xml
|
||||
files: test-results/junit.xml
|
||||
fail_ci_if_error: true
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
report_type: test_results
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -411,29 +420,26 @@ jobs:
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ${{ github.repository == 'argoproj/argo-cd' && 'oracle-vm-16cpu-64gb-x86-64' || 'ubuntu-22.04' }}
|
||||
runs-on: ${{ github.repository == 'argoproj/argo-cd' && 'oracle-vm-16cpu-64gb-x86-64' || 'ubuntu-24.04' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# latest: true means that this version mush upload the coverage report to codecov.io
|
||||
# We designate the latest version because we only collect code coverage for that version.
|
||||
k3s:
|
||||
- version: v1.33.1
|
||||
- version: v1.34.2
|
||||
latest: true
|
||||
- version: v1.33.1
|
||||
latest: false
|
||||
- version: v1.32.1
|
||||
latest: false
|
||||
- version: v1.31.0
|
||||
latest: false
|
||||
- version: v1.30.4
|
||||
latest: false
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
ARGOCD_FAKE_IN_CLUSTER: 'true'
|
||||
ARGOCD_SSH_DATA_PATH: '/tmp/argo-e2e/app/config/ssh'
|
||||
ARGOCD_TLS_DATA_PATH: '/tmp/argo-e2e/app/config/tls'
|
||||
ARGOCD_E2E_SSH_KNOWN_HOSTS: '../fixture/certs/ssh_known_hosts'
|
||||
ARGOCD_E2E_K3S: 'true'
|
||||
ARGOCD_IN_CI: 'true'
|
||||
ARGOCD_E2E_APISERVER_PORT: '8088'
|
||||
@@ -452,7 +458,7 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Set GOPATH
|
||||
@@ -474,7 +480,7 @@ jobs:
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -532,13 +538,13 @@ jobs:
|
||||
goreman run stop-all || echo "goreman trouble"
|
||||
sleep 30
|
||||
- name: Upload e2e coverage report
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: /tmp/coverage
|
||||
if: ${{ matrix.k3s.latest }}
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s.version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
@@ -556,7 +562,7 @@ jobs:
|
||||
needs:
|
||||
- test-e2e
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- run: |
|
||||
result="${{ needs.test-e2e.result }}"
|
||||
|
||||
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -26,14 +26,14 @@ jobs:
|
||||
if: github.repository == 'argoproj/argo-cd' || vars.enable_codeql
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
|
||||
8
.github/workflows/image-reuse.yaml
vendored
8
.github/workflows/image-reuse.yaml
vendored
@@ -51,8 +51,8 @@ jobs:
|
||||
contents: read
|
||||
packages: write # Used to push images to `ghcr.io` if used.
|
||||
id-token: write # Needed to create an OIDC token for keyless signing
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
image-digest: ${{ steps.image.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
cache: false
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
- uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
|
||||
8
.github/workflows/image.yaml
vendored
8
.github/workflows/image.yaml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
# Always run to calculate variables - other jobs check outputs
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
image-tag: ${{ steps.image.outputs.tag}}
|
||||
platforms: ${{ steps.platforms.outputs.platforms }}
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.6
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
ghcr_image_name: ${{ needs.set-vars.outputs.ghcr_image_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.6
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
|
||||
2
.github/workflows/init-release.yaml
vendored
2
.github/workflows/init-release.yaml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically generate version and manifests on ${{ inputs.TARGET_BRANCH }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# Calculate image names with defaults, this will be used in the make manifests-local command
|
||||
# to generate the correct image name in the manifests
|
||||
|
||||
2
.github/workflows/pr-title-check.yml
vendored
2
.github/workflows/pr-title-check.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
name: Validate PR Title
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@7fbfe05602bdd86f926d3fb3bccb6f3aed43bc70 # v1.4.3
|
||||
with:
|
||||
|
||||
16
.github/workflows/release.yaml
vendored
16
.github/workflows/release.yaml
vendored
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.3' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.25.6' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
quay_image_name: ${{ needs.setup-variables.outputs.quay_image_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.6
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
setup-variables:
|
||||
name: Setup Release Variables
|
||||
if: github.repository == 'argoproj/argo-cd' || (github.repository_owner != 'argoproj' && vars.ENABLE_FORK_RELEASES == 'true' && vars.IMAGE_NAMESPACE && vars.IMAGE_NAMESPACE != 'argoproj')
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
is_pre_release: ${{ steps.var.outputs.is_pre_release }}
|
||||
is_latest_release: ${{ steps.var.outputs.is_latest_release }}
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
permissions:
|
||||
contents: write # used for uploading assets
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
GORELEASER_MAKE_LATEST: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
outputs:
|
||||
@@ -133,7 +133,7 @@ jobs:
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -210,7 +210,7 @@ jobs:
|
||||
outputs:
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes }}
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
@@ -219,7 +219,7 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -295,7 +295,7 @@ jobs:
|
||||
contents: write # Needed to push commit to update stable tag
|
||||
pull-requests: write # Needed to create PR for VERSION update.
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
TAG_STABLE: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
steps:
|
||||
|
||||
8
.github/workflows/renovate.yaml
vendored
8
.github/workflows/renovate.yaml
vendored
@@ -9,7 +9,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
steps:
|
||||
- name: Get token
|
||||
@@ -24,13 +24,13 @@ jobs:
|
||||
|
||||
# Some codegen commands require Go to be setup
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.6
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@5712c6a41dea6cdf32c72d92a763bd417e6606aa #44.0.5
|
||||
uses: renovatebot/github-action@66387ab8c2464d575b933fa44e9e5a86b2822809 #44.2.4
|
||||
with:
|
||||
configurationFile: .github/configs/renovate-config.js
|
||||
token: '${{ steps.get_token.outputs.token }}'
|
||||
|
||||
4
.github/workflows/scorecard.yaml
vendored
4
.github/workflows/scorecard.yaml
vendored
@@ -17,7 +17,7 @@ permissions: read-all
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecards analysis
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
||||
2
.github/workflows/update-snyk.yaml
vendored
2
.github/workflows/update-snyk.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
pull-requests: write
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
name: Update Snyk report in the docs directory
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:27771fb7b40a58237c98e8d3e6b9ecdd9289cec69a857fccfb85ff36294dac20
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.10@sha256:5922638447b1e3ba114332c896a2c7288c876bb94adec923d70d58a17d2fec5e
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.25.3@sha256:6d4e5e74f47db00f7f24da5f53c1b4198ae46862a47395e30477365458347bf2 AS builder
|
||||
FROM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -50,10 +50,10 @@ RUN groupadd -g $ARGOCD_USER_ID argocd && \
|
||||
chmod g=u /home/argocd && \
|
||||
apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install -y \
|
||||
git git-lfs tini gpg tzdata connect-proxy && \
|
||||
apt-get install --no-install-recommends -y \
|
||||
git git-lfs tini ca-certificates gpg gpg-agent tzdata connect-proxy openssh-client && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/doc/*
|
||||
|
||||
COPY hack/gpg-wrapper.sh \
|
||||
hack/git-verify-wrapper.sh \
|
||||
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.3@sha256:6d4e5e74f47db00f7f24da5f53c1b4198ae46862a47395e30477365458347bf2 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.25.3@sha256:6d4e5e74f47db00f7f24da5f53c1b4198ae46862a47395e30477365458347bf2
|
||||
FROM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
39
MAINTAINERS.md
Normal file
39
MAINTAINERS.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Argo CD Maintainers
|
||||
|
||||
This document lists the maintainers of the Argo CD project.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Project Roles | Affiliation |
|
||||
|---------------------------|---------------------------------------------------------|----------------------|-------------------------------------------------|
|
||||
| Zach Aller | [zachaller](https://github.com/zachaller) | Reviewer | [Intuit](https://www.github.com/intuit/) |
|
||||
| Leonardo Luz Almeida | [leoluz](https://github.com/leoluz) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Chetan Banavikalmutt | [chetan-rns](https://github.com/chetan-rns) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Keith Chong | [keithchong](https://github.com/keithchong) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Alex Collins | [alexec](https://github.com/alexec) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Michael Crenshaw | [crenshaw-dev](https://github.com/crenshaw-dev) | Lead | [Intuit](https://www.github.com/intuit/) |
|
||||
| Soumya Ghosh Dastidar | [gdsoumya](https://github.com/gdsoumya) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Eugene Doudine | [dudinea](https://github.com/dudinea) | Reviewer | [Octopus Deploy](https://octopus.com/) |
|
||||
| Jann Fischer | [jannfis](https://github.com/jannfis) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Dan Garfield | [todaywasawesome](https://github.com/todaywasawesome) | Approver(docs) | [Octopus Deploy](https://octopus.com/) |
|
||||
| Alexandre Gaudreault | [agaudreault](https://github.com/agaudreault) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Christian Hernandez | [christianh814](https://github.com/christianh814) | Reviewer(docs) | [Akuity](https://akuity.io/) |
|
||||
| Peter Jiang | [pjiang](https://github.com/pjiang) | Reviewer | [Intuit](https://www.intuit.com/) |
|
||||
| Andrii Korotkov | [andrii-korotkov](https://github.com/andrii-korotkov) | Reviewer | [Verkada](https://www.verkada.com/) |
|
||||
| Pasha Kostohrys | [pasha-codefresh](https://github.com/pasha-codefresh) | Approver | [Codefresh](https://www.github.com/codefresh/) |
|
||||
| Nitish Kumar | [nitishfy](https://github.com/nitishfy) | Approver(cli,docs) | [Akuity](https://akuity.io/) |
|
||||
| Justin Marquis | [34fathombelow](https://github.com/34fathombelow) | Approver(docs/ci) | [Akuity](https://akuity.io/) |
|
||||
| Alexander Matyushentsev | [alexmt](https://github.com/alexmt) | Lead | [Akuity](https://akuity.io/) |
|
||||
| Nicholas Morey | [morey-tech](https://github.com/morey-tech) | Reviewer(docs) | [Akuity](https://akuity.io/) |
|
||||
| Papapetrou Patroklos | [ppapapetrou76](https://github.com/ppapapetrou76) | Reviewer | [Octopus Deploy](https://octopus.com/) |
|
||||
| Blake Pettersson | [blakepettersson](https://github.com/blakepettersson) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Ishita Sequeira | [ishitasequeira](https://github.com/ishitasequeira) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Ashutosh Singh | [ashutosh16](https://github.com/ashutosh16) | Approver(docs) | [Intuit](https://www.github.com/intuit/) |
|
||||
| Linghao Su | [linghaoSu](https://github.com/linghaoSu) | Reviewer | [DaoCloud](https://daocloud.io) |
|
||||
| Jesse Suen | [jessesuen](https://github.com/jessesuen) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Yuan Tang | [terrytangyuan](https://github.com/terrytangyuan) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| William Tam | [wtam2018](https://github.com/wtam2018) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Ryan Umstead | [rumstead](https://github.com/rumstead) | Approver | [Black Rock](https://www.github.com/blackrock/) |
|
||||
| Regina Voloshin | [reggie-k](https://github.com/reggie-k) | Approver | [Octopus Deploy](https://octopus.com/) |
|
||||
| Hong Wang | [wanghong230](https://github.com/wanghong230) | Reviewer | [Akuity](https://akuity.io/) |
|
||||
| Jonathan West | [jgwest](https://github.com/jgwest) | Approver | [Red Hat](https://redhat.com/) |
|
||||
74
Makefile
74
Makefile
@@ -76,15 +76,15 @@ ARGOCD_E2E_REDIS_PORT?=6379
|
||||
ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
ARGOCD_E2E_DIR?=/tmp/argo-e2e
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
ARGOCD_E2E_RERUN_FAILS?=5
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
ARGOCD_BIN_MODE?=true
|
||||
|
||||
ARGOCD_LINT_GOGC?=20
|
||||
|
||||
# Depending on where we are (legacy or non-legacy pwd), we need to use
|
||||
# different Docker volume mounts for our source tree
|
||||
LEGACY_PATH=$(GOPATH)/src/github.com/argoproj/argo-cd
|
||||
@@ -144,7 +144,6 @@ define run-in-test-client
|
||||
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
|
||||
-e GITHUB_TOKEN \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
-e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \
|
||||
-v ${DOCKER_SRC_MOUNT} \
|
||||
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
@@ -203,18 +202,35 @@ else
|
||||
IMAGE_TAG?=latest
|
||||
endif
|
||||
|
||||
# defaults for building images and manifests
|
||||
ifeq (${DOCKER_PUSH},true)
|
||||
ifndef IMAGE_NAMESPACE
|
||||
$(error IMAGE_NAMESPACE must be set to push images (e.g. IMAGE_NAMESPACE=argoproj))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Consruct prefix for docker image
|
||||
# Note: keeping same logic as in hacks/update_manifests.sh
|
||||
ifdef IMAGE_REGISTRY
|
||||
ifdef IMAGE_NAMESPACE
|
||||
IMAGE_PREFIX=${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/
|
||||
else
|
||||
$(error IMAGE_NAMESPACE must be set when IMAGE_REGISTRY is set (e.g. IMAGE_NAMESPACE=argoproj))
|
||||
endif
|
||||
else
|
||||
ifdef IMAGE_NAMESPACE
|
||||
# for backwards compatibility with the old way like IMAGE_NAMESPACE='quay.io/argoproj'
|
||||
IMAGE_PREFIX=${IMAGE_NAMESPACE}/
|
||||
else
|
||||
# Neither namespace nor registry given - apply the default values
|
||||
IMAGE_REGISTRY="quay.io"
|
||||
IMAGE_NAMESPACE="argoproj"
|
||||
IMAGE_PREFIX=${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef IMAGE_REGISTRY
|
||||
IMAGE_REGISTRY="quay.io"
|
||||
ifndef IMAGE_REPOSITORY
|
||||
IMAGE_REPOSITORY=argocd
|
||||
endif
|
||||
|
||||
.PHONY: all
|
||||
@@ -347,23 +363,23 @@ ifeq ($(DEV_IMAGE), true)
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=${CGO_FLAG} GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
GOOS=linux GOARCH=$(TARGET_ARCH:linux/%=%) GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
|
||||
cp Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) ; fi
|
||||
|
||||
.PHONY: armimage
|
||||
armimage:
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)(IMAGE_REPOSITORY):$(IMAGE_TAG)-arm .
|
||||
|
||||
.PHONY: builder-image
|
||||
builder-image:
|
||||
@@ -395,9 +411,7 @@ lint: test-tools-image
|
||||
.PHONY: lint-local
|
||||
lint-local:
|
||||
golangci-lint --version
|
||||
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
|
||||
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose
|
||||
golangci-lint run --fix --verbose
|
||||
|
||||
.PHONY: lint-ui
|
||||
lint-ui: test-tools-image
|
||||
@@ -429,13 +443,19 @@ test: test-tools-image
|
||||
|
||||
# Run all unit tests (local version)
|
||||
.PHONY: test-local
|
||||
test-local:
|
||||
test-local: test-gitops-engine
|
||||
if test "$(TEST_MODULE)" = ""; then \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -args -test.gocoverdir="$(PWD)/test-results"; \
|
||||
else \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -args -test.gocoverdir="$(PWD)/test-results" "$(TEST_MODULE)"; \
|
||||
fi
|
||||
|
||||
# Run gitops-engine unit tests
|
||||
.PHONY: test-gitops-engine
|
||||
test-gitops-engine:
|
||||
mkdir -p $(PWD)/test-results
|
||||
cd gitops-engine && go test -race -cover ./... -args -test.gocoverdir="$(PWD)/test-results"
|
||||
|
||||
.PHONY: test-race
|
||||
test-race: test-tools-image
|
||||
mkdir -p $(GOCACHE)
|
||||
@@ -461,7 +481,7 @@ test-e2e:
|
||||
test-e2e-local: cli-local
|
||||
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
|
||||
export GO111MODULE=off
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=$(ARGOCD_E2E_RERUN_FAILS) PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
|
||||
# Spawns a shell in the test server container for debugging purposes
|
||||
debug-test-server: test-tools-image
|
||||
@@ -485,13 +505,13 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
kubectl create ns argocd-e2e-external || true
|
||||
kubectl create ns argocd-e2e-external-2 || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply --server-side -f -
|
||||
kustomize build test/manifests/base | kubectl apply --server-side --force-conflicts -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
|
||||
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
|
||||
if test -d $(ARGOCD_E2E_DIR)/app/config/gpg; then rm -rf $(ARGOCD_E2E_DIR)/app/config/gpg/*; fi
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/keys && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/keys
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/source && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/source
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/plugin && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/plugin
|
||||
# create folders to hold go coverage results for each component
|
||||
mkdir -p /tmp/coverage/app-controller
|
||||
mkdir -p /tmp/coverage/api-server
|
||||
@@ -500,13 +520,15 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
mkdir -p /tmp/coverage/notification
|
||||
mkdir -p /tmp/coverage/commit-server
|
||||
# set paths for locally managed ssh known hosts and tls certs data
|
||||
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_E2E_DIR=$(ARGOCD_E2E_DIR) \
|
||||
ARGOCD_SSH_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=$(ARGOCD_E2E_DIR)/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_GIT_CONFIG=$(PWD)/test/e2e/fixture/gitconfig \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
|
||||
4
Procfile
4
Procfile
@@ -2,7 +2,7 @@ controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v3/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: hack/start-redis-with-password.sh
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "export PATH=./dist:\$PATH && [ -n \"\$ARGOCD_GIT_CONFIG\" ] && export GIT_CONFIG_GLOBAL=\$ARGOCD_GIT_CONFIG && export GIT_CONFIG_NOSYSTEM=1; GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
commit-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/commit-server} FORCE_LOG_COLORS=1 ARGOCD_BINARY_NAME=argocd-commit-server $COMMAND --loglevel debug --port ${ARGOCD_E2E_COMMITSERVER_PORT:-8086}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
@@ -11,4 +11,4 @@ helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
oci-registry: test/fixture/testrepos/start-authenticated-helm-registry.sh
|
||||
dev-mounter: [ "$ARGOCD_E2E_TEST" != "true" ] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/applicationset-controller} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
[](https://twitter.com/argoproj)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://www.linkedin.com/company/argoproj/)
|
||||
[](https://bsky.app/profile/argoproj.bsky.social)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ header:
|
||||
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
|
||||
last-updated: '2023-10-27'
|
||||
last-reviewed: '2023-10-27'
|
||||
commit-hash: 06ef059f9fc7cf9da2dfaef2a505ee1e3c693485
|
||||
commit-hash: 814db444c36503851dc3d45cf9c44394821ca1a4
|
||||
project-url: https://github.com/argoproj/argo-cd
|
||||
project-release: v3.3.0
|
||||
project-release: v3.4.0
|
||||
changelog: https://github.com/argoproj/argo-cd/releases
|
||||
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
|
||||
project-lifecycle:
|
||||
|
||||
2
USERS.md
2
USERS.md
@@ -208,6 +208,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Kurly](https://www.kurly.com/)
|
||||
1. [Kvist](https://kvistsolutions.com)
|
||||
1. [Kyriba](https://www.kyriba.com/)
|
||||
1. [Lattice](https://lattice.com)
|
||||
1. [LeFigaro](https://www.lefigaro.fr/)
|
||||
1. [Lely](https://www.lely.com/)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
@@ -245,6 +246,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Municipality of The Hague](https://www.denhaag.nl/)
|
||||
1. [My Job Glasses](https://myjobglasses.com)
|
||||
1. [Natura &Co](https://naturaeco.com/)
|
||||
1. [Netease Cloud Music](https://music.163.com/)
|
||||
1. [Nethopper](https://nethopper.io)
|
||||
1. [New Relic](https://newrelic.com/)
|
||||
1. [Nextbasket](https://nextbasket.com)
|
||||
|
||||
@@ -57,6 +57,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
applog "github.com/argoproj/argo-cd/v3/util/app/log"
|
||||
"github.com/argoproj/argo-cd/v3/util/db"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
argoutil "github.com/argoproj/argo-cd/v3/util/argo"
|
||||
@@ -110,6 +111,7 @@ type ApplicationSetReconciler struct {
|
||||
GlobalPreservedLabels []string
|
||||
Metrics *metrics.ApplicationsetMetrics
|
||||
MaxResourcesStatusCount int
|
||||
ClusterInformer *settings.ClusterInformer
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -669,8 +671,9 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
Watches(
|
||||
&corev1.Secret{},
|
||||
&clusterSecretEventHandler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: r.ApplicationSetNamespaces,
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -824,7 +827,7 @@ func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, a
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace)
|
||||
clusterList, err := utils.ListClusters(r.ClusterInformer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -1188,6 +1190,8 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
@@ -1244,9 +1248,6 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret",
|
||||
@@ -1264,8 +1265,12 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet, secret}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
|
||||
objects := append([]runtime.Object{}, secret)
|
||||
kubeclientset := kubefake.NewSimpleClientset(objects...)
|
||||
kubeclientset := kubefake.NewClientset(objects...)
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), kubeclientset, "argocd")
|
||||
@@ -1273,6 +1278,11 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
_ = settingsMgr.ResyncInformers()
|
||||
argodb := db.NewDB("argocd", settingsMgr, kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -1281,7 +1291,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
Metrics: metrics,
|
||||
ArgoDB: argodb,
|
||||
}
|
||||
clusterList, err := utils.ListClusters(t.Context(), kubeclientset, "namespace")
|
||||
clusterList, err := utils.ListClusters(clusterInformer)
|
||||
require.NoError(t, err)
|
||||
|
||||
appLog := log.WithFields(applog.GetAppLogFields(&app)).WithField("appSet", "")
|
||||
@@ -1298,7 +1308,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
// App on the cluster should have the expected finalizers
|
||||
assert.ElementsMatch(t, c.expectedFinalizers, retrievedApp.Finalizers)
|
||||
|
||||
// App object passed in as a parameter should have the expected finaliers
|
||||
// App object passed in as a parameter should have the expected finalizers
|
||||
assert.ElementsMatch(t, c.expectedFinalizers, appInputParam.Finalizers)
|
||||
|
||||
bytes, _ := json.MarshalIndent(retrievedApp, "", " ")
|
||||
@@ -1311,6 +1321,8 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
@@ -1403,9 +1415,6 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret",
|
||||
@@ -1423,6 +1432,10 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet, secret}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
|
||||
kubeclientset := getDefaultTestClientSet(secret)
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
@@ -1431,6 +1444,11 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
_ = settingsMgr.ResyncInformers()
|
||||
argodb := db.NewDB("argocd", settingsMgr, kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -1440,7 +1458,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
ArgoDB: argodb,
|
||||
}
|
||||
|
||||
clusterList, err := utils.ListClusters(t.Context(), kubeclientset, "argocd")
|
||||
clusterList, err := utils.ListClusters(clusterInformer)
|
||||
require.NoError(t, err)
|
||||
|
||||
appLog := log.WithFields(applog.GetAppLogFields(&app)).WithField("appSet", "")
|
||||
@@ -1745,6 +1763,8 @@ func TestDeleteInCluster(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// appSet is the application set on which the delete function is called
|
||||
@@ -1857,12 +1877,19 @@ func TestDeleteInCluster(t *testing.T) {
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
kubeclientset := kubefake.NewClientset()
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)),
|
||||
KubeClientset: kubefake.NewSimpleClientset(),
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
err = r.deleteInCluster(t.Context(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
|
||||
@@ -2193,6 +2220,8 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
project := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"},
|
||||
@@ -2236,6 +2265,9 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -2249,6 +2281,7 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
ArgoCDNamespace: "argocd",
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -2279,7 +2312,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
someTime := &metav1.Time{Time: time.Now().Add(-5 * time.Minute)}
|
||||
existingParameterGeneratedCondition := getParametersGeneratedCondition(true, "")
|
||||
existingParameterGeneratedCondition.LastTransitionTime = someTime
|
||||
@@ -2750,6 +2783,8 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -2806,10 +2841,14 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
|
||||
kubeclientset := getDefaultTestClientSet(secret)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject, secret).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -2825,6 +2864,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
EnablePolicyOverride: allowPolicyOverride,
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -2925,6 +2965,8 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -2981,11 +3023,16 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
|
||||
kubeclientset := getDefaultTestClientSet(secret)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject, secret).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -3000,6 +3047,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
EnablePolicyOverride: allowPolicyOverride,
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -3092,6 +3140,8 @@ func TestPolicies(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -3175,6 +3225,11 @@ func TestPolicies(t *testing.T) {
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -3187,6 +3242,7 @@ func TestPolicies(t *testing.T) {
|
||||
ArgoCDNamespace: "argocd",
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: policy,
|
||||
ClusterInformer: clusterInformer,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
@@ -3196,27 +3252,27 @@ func TestPolicies(t *testing.T) {
|
||||
Name: "name",
|
||||
},
|
||||
}
|
||||
|
||||
// Check if Application is created
|
||||
res, err := r.Reconcile(t.Context(), req)
|
||||
ctx := t.Context()
|
||||
// Check if the application is created
|
||||
res, err := r.Reconcile(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, time.Duration(0), res.RequeueAfter)
|
||||
|
||||
var app v1alpha1.Application
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "value", app.Annotations["key"])
|
||||
|
||||
// Check if Application is updated
|
||||
// Check if the Application is updated
|
||||
app.Annotations["key"] = "edited"
|
||||
err = r.Update(t.Context(), &app)
|
||||
err = r.Update(ctx, &app)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err = r.Reconcile(t.Context(), req)
|
||||
res, err = r.Reconcile(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, time.Duration(0), res.RequeueAfter)
|
||||
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
require.NoError(t, err)
|
||||
|
||||
if c.allowedUpdate {
|
||||
@@ -3225,22 +3281,22 @@ func TestPolicies(t *testing.T) {
|
||||
assert.Equal(t, "edited", app.Annotations["key"])
|
||||
}
|
||||
|
||||
// Check if Application is deleted
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "name"}, &appSet)
|
||||
// Check if the Application is deleted
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "name"}, &appSet)
|
||||
require.NoError(t, err)
|
||||
appSet.Spec.Generators[0] = v1alpha1.ApplicationSetGenerator{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{},
|
||||
},
|
||||
}
|
||||
err = r.Update(t.Context(), &appSet)
|
||||
err = r.Update(ctx, &appSet)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err = r.Reconcile(t.Context(), req)
|
||||
res, err = r.Reconcile(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, time.Duration(0), res.RequeueAfter)
|
||||
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
require.NoError(t, err)
|
||||
if c.allowedDelete {
|
||||
assert.NotNil(t, app.DeletionTimestamp)
|
||||
@@ -3256,7 +3312,7 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
@@ -4133,7 +4189,7 @@ func TestBuildAppDependencyList(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
@@ -4568,7 +4624,7 @@ func TestGetAppsToSync(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
@@ -5255,7 +5311,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -6008,7 +6064,7 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -6281,7 +6337,7 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -6371,7 +6427,7 @@ func TestResourceStatusAreOrdered(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -7576,7 +7632,7 @@ func TestReconcileProgressiveSyncDisabled(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
@@ -7649,3 +7705,14 @@ func TestReconcileProgressiveSyncDisabled(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func startAndSyncInformer(t *testing.T, informer cache.SharedIndexInformer) context.CancelFunc {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
go informer.Run(ctx.Done())
|
||||
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
|
||||
cancel()
|
||||
t.Fatal("Timed out waiting for caches to sync")
|
||||
}
|
||||
return cancel
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -22,8 +23,9 @@ import (
|
||||
// requeue any related ApplicationSets.
|
||||
type clusterSecretEventHandler struct {
|
||||
// handler.EnqueueRequestForOwner
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
ApplicationSetNamespaces []string
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
@@ -68,6 +70,10 @@ func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Contex
|
||||
|
||||
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
|
||||
for _, appSet := range appSetList.Items {
|
||||
if !utils.IsNamespaceAllowed(h.ApplicationSetNamespaces, appSet.GetNamespace()) {
|
||||
// Ignore it as not part of the allowed list of namespaces in which to watch Appsets
|
||||
continue
|
||||
}
|
||||
foundClusterGenerator := false
|
||||
for _, generator := range appSet.Spec.Generators {
|
||||
if generator.Clusters != nil {
|
||||
|
||||
@@ -137,7 +137,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
@@ -171,9 +171,37 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cluster generators in other namespaces should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "my-namespace-not-allowed",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
@@ -552,8 +580,9 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
|
||||
|
||||
handler := &clusterSecretEventHandler{
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: []string{"argocd"},
|
||||
}
|
||||
|
||||
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
appsetmetrics "github.com/argoproj/argo-cd/v3/applicationset/metrics"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/mocks"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
@@ -57,12 +58,17 @@ func TestRequeueAfter(t *testing.T) {
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
scmConfig := generators.NewSCMConfig("", []string{""}, true, true, nil, true)
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(ctx, k8sClient, appClientset, "argocd"),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer, "namespace"),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), scmConfig),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd", clusterInformer),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, scmConfig),
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
@@ -22,19 +21,15 @@ var _ Generator = (*ClusterGenerator)(nil)
|
||||
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type ClusterGenerator struct {
|
||||
client.Client
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
// namespace is the Argo CD namespace
|
||||
namespace string
|
||||
}
|
||||
|
||||
var render = &utils.Render{}
|
||||
|
||||
func NewClusterGenerator(ctx context.Context, c client.Client, clientset kubernetes.Interface, namespace string) Generator {
|
||||
func NewClusterGenerator(c client.Client, namespace string) Generator {
|
||||
g := &ClusterGenerator{
|
||||
Client: c,
|
||||
ctx: ctx,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
}
|
||||
return g
|
||||
@@ -64,16 +59,7 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
// - Since local clusters do not have secrets, they do not have labels to match against
|
||||
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
|
||||
|
||||
// ListCluster will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get cluster secrets using the cached controller-runtime client
|
||||
clusterSecrets, err := g.getSecretsByClusterName(logCtx, appSetGenerator)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting cluster secrets: %w", err)
|
||||
@@ -82,32 +68,14 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
paramHolder := ¶mHolder{isFlatMode: appSetGenerator.Clusters.FlatList}
|
||||
logCtx.Debugf("Using flat mode = %t for cluster generator", paramHolder.isFlatMode)
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
for _, cluster := range clustersFromArgoCD {
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
|
||||
secretsFound = append(secretsFound, secretForCluster)
|
||||
} else if !ignoreLocalClusters {
|
||||
// If there is no secret for the cluster, it's the local cluster, so handle it here.
|
||||
params := map[string]any{}
|
||||
params["name"] = cluster.Name
|
||||
params["nameNormalized"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
params["project"] = ""
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
// Convert map values to slice to check for an in-cluster secret
|
||||
secretsList := make([]corev1.Secret, 0, len(clusterSecrets))
|
||||
for _, secret := range clusterSecrets {
|
||||
secretsList = append(secretsList, secret)
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range secretsFound {
|
||||
for _, cluster := range clusterSecrets {
|
||||
params := g.getClusterParameters(cluster, appSet)
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
@@ -119,6 +87,23 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
logCtx.WithField("cluster", cluster.Name).Debug("matched cluster secret")
|
||||
}
|
||||
|
||||
// Add the in-cluster last if it doesn't have a secret, and we're not ignoring in-cluster
|
||||
if !ignoreLocalClusters && !utils.SecretsContainInClusterCredentials(secretsList) {
|
||||
params := map[string]any{}
|
||||
params["name"] = argoappsetv1alpha1.KubernetesInClusterName
|
||||
params["nameNormalized"] = argoappsetv1alpha1.KubernetesInClusterName
|
||||
params["server"] = argoappsetv1alpha1.KubernetesInternalAPIServerAddr
|
||||
params["project"] = ""
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
|
||||
return paramHolder.consolidate(), nil
|
||||
}
|
||||
|
||||
@@ -186,7 +171,7 @@ func (g *ClusterGenerator) getSecretsByClusterName(log *log.Entry, appSetGenerat
|
||||
return nil, fmt.Errorf("error converting label selector: %w", err)
|
||||
}
|
||||
|
||||
if err := g.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
if err := g.List(context.Background(), clusterSecretList, client.InNamespace(g.namespace), client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("clusters matching labels: %d", len(clusterSecretList.Items))
|
||||
|
||||
@@ -7,12 +7,9 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
|
||||
@@ -299,23 +296,15 @@ func TestGenerateParams(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -336,12 +325,25 @@ func TestGenerateParams(t *testing.T) {
|
||||
require.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
assertEqualParamsFlat(t, testCase.expected, got, testCase.isFlatMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualParamsFlat(t *testing.T, expected, got []map[string]any, isFlatMode bool) {
|
||||
t.Helper()
|
||||
if isFlatMode && len(expected) == 1 && len(got) == 1 {
|
||||
expectedClusters, ok1 := expected[0]["clusters"].([]map[string]any)
|
||||
gotClusters, ok2 := got[0]["clusters"].([]map[string]any)
|
||||
if ok1 && ok2 {
|
||||
assert.ElementsMatch(t, expectedClusters, gotClusters)
|
||||
return
|
||||
}
|
||||
}
|
||||
assert.ElementsMatch(t, expected, got)
|
||||
}
|
||||
|
||||
func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
@@ -837,23 +839,15 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -876,7 +870,7 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
require.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
assertEqualParamsFlat(t, testCase.expected, got, testCase.isFlatMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,24 +19,27 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
var _ Generator = (*DuckTypeGenerator)(nil)
|
||||
|
||||
// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type DuckTypeGenerator struct {
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
clusterInformer *settings.ClusterInformer
|
||||
}
|
||||
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator {
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string, clusterInformer *settings.ClusterInformer) Generator {
|
||||
g := &DuckTypeGenerator{
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
clusterInformer: clusterInformer,
|
||||
}
|
||||
return g
|
||||
}
|
||||
@@ -65,8 +68,7 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
|
||||
return nil, ErrEmptyAppSetGenerator
|
||||
}
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.clusterInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -290,9 +292,14 @@ func TestGenerateParamsForDuckType(t *testing.T) {
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
fakeDynClient := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace")
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer test.StartInformer(clusterInformer)()
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace", clusterInformer)
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -586,9 +593,14 @@ func TestGenerateParamsForDuckTypeGoTemplate(t *testing.T) {
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
fakeDynClient := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace")
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer test.StartInformer(clusterInformer)()
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace", clusterInformer)
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -16,8 +15,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
@@ -223,7 +220,7 @@ func TestTransForm(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
testGenerators := map[string]Generator{
|
||||
"Clusters": getMockClusterGenerator(t.Context()),
|
||||
"Clusters": getMockClusterGenerator(),
|
||||
}
|
||||
|
||||
applicationSetInfo := argov1alpha1.ApplicationSet{
|
||||
@@ -260,7 +257,7 @@ func emptyTemplate() argov1alpha1.ApplicationSetTemplate {
|
||||
}
|
||||
}
|
||||
|
||||
func getMockClusterGenerator(ctx context.Context) Generator {
|
||||
func getMockClusterGenerator() Generator {
|
||||
clusters := []crtclient.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -335,14 +332,8 @@ func getMockClusterGenerator(ctx context.Context) Generator {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
return NewClusterGenerator(ctx, fakeClient, appClientset, "namespace")
|
||||
return NewClusterGenerator(fakeClient, "namespace")
|
||||
}
|
||||
|
||||
func getMockGitGenerator() Generator {
|
||||
@@ -354,7 +345,7 @@ func getMockGitGenerator() Generator {
|
||||
|
||||
func TestGetRelevantGenerators(t *testing.T) {
|
||||
testGenerators := map[string]Generator{
|
||||
"Clusters": getMockClusterGenerator(t.Context()),
|
||||
"Clusters": getMockClusterGenerator(),
|
||||
"Git": getMockGitGenerator(),
|
||||
}
|
||||
|
||||
|
||||
@@ -316,7 +316,7 @@ func (g *GitGenerator) filterApps(directories []argoprojiov1alpha1.GitDirectoryG
|
||||
appExclude = true
|
||||
}
|
||||
}
|
||||
// Whenever there is a path with exclude: true it wont be included, even if it is included in a different path pattern
|
||||
// Whenever there is a path with exclude: true it won't be included, even if it is included in a different path pattern
|
||||
if appInclude && !appExclude {
|
||||
res = append(res, appPath)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
@@ -624,11 +623,6 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
@@ -637,13 +631,12 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
|
||||
genMock := &generatorsMock.Generator{}
|
||||
appSet := &v1alpha1.ApplicationSet{}
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := v1alpha1.ApplicationSetGenerator{
|
||||
@@ -803,11 +796,6 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
@@ -820,13 +808,12 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := v1alpha1.ApplicationSetGenerator{
|
||||
|
||||
@@ -8,15 +8,16 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig, clusterInformer *settings.ClusterInformer) map[string]Generator {
|
||||
terminalGenerators := map[string]Generator{
|
||||
"List": NewListGenerator(),
|
||||
"Clusters": NewClusterGenerator(ctx, c, k8sClient, controllerNamespace),
|
||||
"Clusters": NewClusterGenerator(c, controllerNamespace),
|
||||
"Git": NewGitGenerator(argoCDService, controllerNamespace),
|
||||
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace, clusterInformer),
|
||||
"PullRequest": NewPullRequestGenerator(c, scmConfig),
|
||||
"Plugin": NewPluginGenerator(c, controllerNamespace),
|
||||
}
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/db"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
// ClusterSpecifier contains only the name and server URL of a cluster. We use this struct to avoid partially-populating
|
||||
@@ -19,42 +16,44 @@ type ClusterSpecifier struct {
|
||||
Server string
|
||||
}
|
||||
|
||||
func ListClusters(ctx context.Context, clientset kubernetes.Interface, namespace string) ([]ClusterSpecifier, error) {
|
||||
clusterSecretsList, err := clientset.CoreV1().Secrets(namespace).List(ctx,
|
||||
metav1.ListOptions{LabelSelector: common.LabelKeySecretType + "=" + common.LabelValueSecretTypeCluster})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clusterSecretsList == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
clusterSecrets := clusterSecretsList.Items
|
||||
|
||||
clusterList := make([]ClusterSpecifier, len(clusterSecrets))
|
||||
|
||||
hasInClusterCredentials := false
|
||||
for i, clusterSecret := range clusterSecrets {
|
||||
cluster, err := db.SecretToCluster(&clusterSecret)
|
||||
if err != nil || cluster == nil {
|
||||
return nil, fmt.Errorf("unable to convert cluster secret to cluster object '%s': %w", clusterSecret.Name, err)
|
||||
// SecretsContainInClusterCredentials checks if any of the provided secrets represent the in-cluster configuration.
|
||||
func SecretsContainInClusterCredentials(secrets []corev1.Secret) bool {
|
||||
for _, secret := range secrets {
|
||||
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr {
|
||||
return true
|
||||
}
|
||||
clusterList[i] = ClusterSpecifier{
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ListClusters returns a list of cluster specifiers using the ClusterInformer.
|
||||
func ListClusters(clusterInformer *settings.ClusterInformer) ([]ClusterSpecifier, error) {
|
||||
clusters, err := clusterInformer.ListClusters()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
// len of clusters +1 for the in cluster secret
|
||||
clusterList := make([]ClusterSpecifier, 0, len(clusters)+1)
|
||||
hasInCluster := false
|
||||
|
||||
for _, cluster := range clusters {
|
||||
clusterList = append(clusterList, ClusterSpecifier{
|
||||
Name: cluster.Name,
|
||||
Server: cluster.Server,
|
||||
}
|
||||
})
|
||||
if cluster.Server == appv1.KubernetesInternalAPIServerAddr {
|
||||
hasInClusterCredentials = true
|
||||
hasInCluster = true
|
||||
}
|
||||
}
|
||||
if !hasInClusterCredentials {
|
||||
|
||||
if !hasInCluster {
|
||||
// There was no secret for the in-cluster config, so we add it here. We don't fully-populate the Cluster struct,
|
||||
// since only the name and server fields are used by the generator.
|
||||
clusterList = append(clusterList, ClusterSpecifier{
|
||||
Name: "in-cluster",
|
||||
Name: appv1.KubernetesInClusterName,
|
||||
Server: appv1.KubernetesInternalAPIServerAddr,
|
||||
})
|
||||
}
|
||||
|
||||
return clusterList, nil
|
||||
}
|
||||
|
||||
43
assets/swagger.json
generated
43
assets/swagger.json
generated
@@ -2265,6 +2265,44 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}/events": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationSetService"
|
||||
],
|
||||
"summary": "ListResourceEvents returns a list of event resources",
|
||||
"operationId": "ApplicationSetService_ListResourceEvents",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "the applicationsets's name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The application set namespace. Default empty is argocd control plane namespace.",
|
||||
"name": "appsetNamespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EventList"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}/resource-tree": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -7261,7 +7299,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"applyNestedSelectors": {
|
||||
"description": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators\nDeprecated: This field is ignored, and the behavior is always enabled. The field will be removed in a future\nversion of the ApplicationSet CRD.",
|
||||
"description": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators.\n\nDeprecated: This field is ignored, and the behavior is always enabled. The field will be removed in a future\nversion of the ApplicationSet CRD.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"generators": {
|
||||
@@ -7319,6 +7357,9 @@
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetCondition"
|
||||
}
|
||||
},
|
||||
"health": {
|
||||
"$ref": "#/definitions/v1alpha1HealthStatus"
|
||||
},
|
||||
"resources": {
|
||||
"description": "Resources is a list of Applications resources managed by this application set.",
|
||||
"type": "array",
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -193,6 +194,18 @@ func NewCommand() *cobra.Command {
|
||||
argoSettingsMgr := argosettings.NewSettingsManager(ctx, k8sClient, namespace)
|
||||
argoCDDB := db.NewDB(namespace, argoSettingsMgr, k8sClient)
|
||||
|
||||
clusterInformer, err := argosettings.NewClusterInformer(k8sClient, namespace)
|
||||
if err != nil {
|
||||
log.Error(err, "unable to create cluster informer")
|
||||
os.Exit(1)
|
||||
}
|
||||
go clusterInformer.Run(ctx.Done())
|
||||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), clusterInformer.HasSynced) {
|
||||
log.Error("Timed out waiting for cluster cache to sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
scmConfig := generators.NewSCMConfig(scmRootCAPath, allowedScmProviders, enableScmProviders, enableGitHubAPIMetrics, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)), tokenRefStrictMode)
|
||||
|
||||
tlsConfig := apiclient.TLSConfiguration{
|
||||
@@ -212,7 +225,7 @@ func NewCommand() *cobra.Command {
|
||||
repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, repoServerTimeoutSeconds, tlsConfig)
|
||||
argoCDService := services.NewArgoCDService(argoCDDB, gitSubmoduleEnabled, repoClientset, enableNewGitFileGlobbing)
|
||||
|
||||
topLevelGenerators := generators.GetGenerators(ctx, mgr.GetClient(), k8sClient, namespace, argoCDService, dynamicClient, scmConfig)
|
||||
topLevelGenerators := generators.GetGenerators(ctx, mgr.GetClient(), k8sClient, namespace, argoCDService, dynamicClient, scmConfig, clusterInformer)
|
||||
|
||||
// start a webhook server that listens to incoming webhook payloads
|
||||
webhookHandler, err := webhook.NewWebhookHandler(webhookParallelism, argoSettingsMgr, mgr.GetClient(), topLevelGenerators)
|
||||
@@ -248,6 +261,7 @@ func NewCommand() *cobra.Command {
|
||||
GlobalPreservedLabels: globalPreservedLabels,
|
||||
Metrics: &metrics,
|
||||
MaxResourcesStatusCount: maxResourcesStatusCount,
|
||||
ClusterInformer: clusterInformer,
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
|
||||
@@ -295,7 +295,8 @@ spec:
|
||||
spec:
|
||||
destination: {}
|
||||
project: ""
|
||||
status: {}
|
||||
status:
|
||||
health: {}
|
||||
---
|
||||
`,
|
||||
},
|
||||
@@ -325,7 +326,8 @@ spec:
|
||||
spec:
|
||||
destination: {}
|
||||
project: ""
|
||||
status: {}
|
||||
status:
|
||||
health: {}
|
||||
---
|
||||
`,
|
||||
},
|
||||
|
||||
@@ -60,8 +60,7 @@ func Test_loadClusters(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
for i := range clusters {
|
||||
// This changes, nil it to avoid testing it.
|
||||
//nolint:staticcheck
|
||||
clusters[i].ConnectionState.ModifiedAt = nil
|
||||
clusters[i].Info.ConnectionState.ModifiedAt = nil
|
||||
}
|
||||
|
||||
expected := []ClusterWithInfo{{
|
||||
@@ -69,11 +68,13 @@ func Test_loadClusters(t *testing.T) {
|
||||
ID: "",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Name: "in-cluster",
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "Successful",
|
||||
Info: v1alpha1.ClusterInfo{
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "Successful",
|
||||
},
|
||||
ServerVersion: ".",
|
||||
},
|
||||
ServerVersion: ".",
|
||||
Shard: ptr.To(int64(0)),
|
||||
Shard: ptr.To(int64(0)),
|
||||
},
|
||||
Namespaces: []string{"test"},
|
||||
}}
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/hook"
|
||||
@@ -1275,24 +1277,32 @@ type objKeyLiveTarget struct {
|
||||
target *unstructured.Unstructured
|
||||
}
|
||||
|
||||
// addServerSideDiffPerfFlags adds server-side diff performance tuning flags to a command
|
||||
func addServerSideDiffPerfFlags(command *cobra.Command, serverSideDiffConcurrency *int, serverSideDiffMaxBatchKB *int) {
|
||||
command.Flags().IntVar(serverSideDiffConcurrency, "server-side-diff-concurrency", -1, "Max concurrent batches for server-side diff. -1 = unlimited, 1 = sequential, 2+ = concurrent (0 = invalid)")
|
||||
command.Flags().IntVar(serverSideDiffMaxBatchKB, "server-side-diff-max-batch-kb", 250, "Max batch size in KB for server-side diff. Smaller values are safer for proxies")
|
||||
}
|
||||
|
||||
// NewApplicationDiffCommand returns a new instance of an `argocd app diff` command
|
||||
func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
exitCode bool
|
||||
diffExitCode int
|
||||
local string
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
serverSideDiff bool
|
||||
localIncludes []string
|
||||
appNamespace string
|
||||
revisions []string
|
||||
sourcePositions []int64
|
||||
sourceNames []string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
exitCode bool
|
||||
diffExitCode int
|
||||
local string
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
serverSideDiff bool
|
||||
serverSideDiffConcurrency int
|
||||
serverSideDiffMaxBatchKB int
|
||||
localIncludes []string
|
||||
appNamespace string
|
||||
revisions []string
|
||||
sourcePositions []int64
|
||||
sourceNames []string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
shortDesc := "Perform a diff against the target and live state."
|
||||
command := &cobra.Command{
|
||||
@@ -1423,7 +1433,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
|
||||
|
||||
foundDiffs := findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, app.GetName(), app.GetNamespace())
|
||||
foundDiffs := findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, app.GetName(), app.GetNamespace(), serverSideDiffConcurrency, serverSideDiffMaxBatchKB)
|
||||
if foundDiffs && exitCode {
|
||||
os.Exit(diffExitCode)
|
||||
}
|
||||
@@ -1438,6 +1448,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
|
||||
command.Flags().BoolVar(&serverSideGenerate, "server-side-generate", false, "Used with --local, this will send your manifests to the server for diffing")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff", false, "Use server-side diff to calculate the diff. This will default to true if the ServerSideDiff annotation is set on the application.")
|
||||
addServerSideDiffPerfFlags(command, &serverSideDiffConcurrency, &serverSideDiffMaxBatchKB)
|
||||
command.Flags().StringArrayVar(&localIncludes, "local-include", []string{"*.yaml", "*.yml", "*.json"}, "Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path.")
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only render the difference in namespace")
|
||||
command.Flags().StringArrayVar(&revisions, "revisions", []string{}, "Show manifests at specific revisions for source position in source-positions")
|
||||
@@ -1454,7 +1465,14 @@ func printResourceDiff(group, kind, namespace, name string, live, target *unstru
|
||||
}
|
||||
|
||||
// findAndPrintServerSideDiff performs a server-side diff by making requests to the api server and prints the response
|
||||
func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application, items []objKeyLiveTarget, resources *application.ManagedResourcesResponse, appIf application.ApplicationServiceClient, appName, appNs string) bool {
|
||||
func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application, items []objKeyLiveTarget, resources *application.ManagedResourcesResponse, appIf application.ApplicationServiceClient, appName, appNs string, maxConcurrency int, maxBatchSizeKB int) bool {
|
||||
if maxConcurrency == 0 {
|
||||
errors.CheckError(stderrors.New("invalid value for --server-side-diff-concurrency: 0 is not allowed (use -1 for unlimited, or a positive number to limit concurrency)"))
|
||||
}
|
||||
|
||||
liveResources := make([]*argoappv1.ResourceDiff, 0, len(items))
|
||||
targetManifests := make([]string, 0, len(items))
|
||||
|
||||
// Process each item for server-side diff
|
||||
foundDiffs := false
|
||||
for _, item := range items {
|
||||
@@ -1488,6 +1506,7 @@ func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application,
|
||||
Modified: true,
|
||||
}
|
||||
}
|
||||
liveResources = append(liveResources, liveResource)
|
||||
|
||||
if item.target != nil {
|
||||
jsonBytes, err := json.Marshal(item.target)
|
||||
@@ -1496,23 +1515,63 @@ func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application,
|
||||
}
|
||||
targetManifest = string(jsonBytes)
|
||||
}
|
||||
targetManifests = append(targetManifests, targetManifest)
|
||||
}
|
||||
|
||||
// Call server-side diff for this individual resource
|
||||
serverSideDiffQuery := &application.ApplicationServerSideDiffQuery{
|
||||
AppName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Project: &app.Spec.Project,
|
||||
LiveResources: []*argoappv1.ResourceDiff{liveResource},
|
||||
TargetManifests: []string{targetManifest},
|
||||
if len(liveResources) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Batch by size to avoid proxy limits
|
||||
maxBatchSize := maxBatchSizeKB * 1024
|
||||
var batches []struct{ start, end int }
|
||||
for i := 0; i < len(liveResources); {
|
||||
start := i
|
||||
size := 0
|
||||
for i < len(liveResources) {
|
||||
resourceSize := len(liveResources[i].LiveState) + len(targetManifests[i])
|
||||
if size+resourceSize > maxBatchSize && i > start {
|
||||
break
|
||||
}
|
||||
size += resourceSize
|
||||
i++
|
||||
}
|
||||
batches = append(batches, struct{ start, end int }{start, i})
|
||||
}
|
||||
|
||||
serverSideDiffRes, err := appIf.ServerSideDiff(ctx, serverSideDiffQuery)
|
||||
if err != nil {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
// Process batches in parallel
|
||||
g, errGroupCtx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(maxConcurrency)
|
||||
|
||||
// Extract diff for this resource
|
||||
for _, resultItem := range serverSideDiffRes.Items {
|
||||
results := make([][]*argoappv1.ResourceDiff, len(batches))
|
||||
|
||||
for idx, batch := range batches {
|
||||
i := idx
|
||||
b := batch
|
||||
g.Go(func() error {
|
||||
// Call server-side diff for this batch of resources
|
||||
serverSideDiffQuery := &application.ApplicationServerSideDiffQuery{
|
||||
AppName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Project: &app.Spec.Project,
|
||||
LiveResources: liveResources[b.start:b.end],
|
||||
TargetManifests: targetManifests[b.start:b.end],
|
||||
}
|
||||
serverSideDiffRes, err := appIf.ServerSideDiff(errGroupCtx, serverSideDiffQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
results[i] = serverSideDiffRes.Items
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
for _, items := range results {
|
||||
for _, resultItem := range items {
|
||||
if resultItem.Hook || (!resultItem.Modified && resultItem.TargetState != "" && resultItem.LiveState != "") {
|
||||
continue
|
||||
}
|
||||
@@ -1522,13 +1581,12 @@ func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application,
|
||||
|
||||
if resultItem.TargetState != "" && resultItem.TargetState != "null" {
|
||||
target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(resultItem.TargetState), target)
|
||||
err := json.Unmarshal([]byte(resultItem.TargetState), target)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
if resultItem.LiveState != "" && resultItem.LiveState != "null" {
|
||||
live = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(resultItem.LiveState), live)
|
||||
err := json.Unmarshal([]byte(resultItem.LiveState), live)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
@@ -1554,14 +1612,14 @@ type DifferenceOption struct {
|
||||
}
|
||||
|
||||
// findAndPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
|
||||
func findAndPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, useServerSideDiff bool, appIf application.ApplicationServiceClient, appName, appNs string) bool {
|
||||
func findAndPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, useServerSideDiff bool, appIf application.ApplicationServiceClient, appName, appNs string, serverSideDiffConcurrency int, serverSideDiffMaxBatchKB int) bool {
|
||||
var foundDiffs bool
|
||||
|
||||
items, err := prepareObjectsForDiff(ctx, app, proj, resources, argoSettings, diffOptions)
|
||||
errors.CheckError(err)
|
||||
|
||||
if useServerSideDiff {
|
||||
return findAndPrintServerSideDiff(ctx, app, items, resources, appIf, appName, appNs)
|
||||
return findAndPrintServerSideDiff(ctx, app, items, resources, appIf, appName, appNs, serverSideDiffConcurrency, serverSideDiffMaxBatchKB)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
@@ -2075,36 +2133,38 @@ func printTreeViewDetailed(nodeMapping map[string]argoappv1.ResourceNode, parent
|
||||
// NewApplicationSyncCommand returns a new instance of an `argocd app sync` command
|
||||
func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
revision string
|
||||
revisions []string
|
||||
sourcePositions []int64
|
||||
sourceNames []string
|
||||
resources []string
|
||||
labels []string
|
||||
selector string
|
||||
prune bool
|
||||
dryRun bool
|
||||
timeout uint
|
||||
strategy string
|
||||
force bool
|
||||
replace bool
|
||||
serverSideApply bool
|
||||
applyOutOfSyncOnly bool
|
||||
async bool
|
||||
retryLimit int64
|
||||
retryRefresh bool
|
||||
retryBackoffDuration time.Duration
|
||||
retryBackoffMaxDuration time.Duration
|
||||
retryBackoffFactor int64
|
||||
local string
|
||||
localRepoRoot string
|
||||
infos []string
|
||||
diffChanges bool
|
||||
diffChangesConfirm bool
|
||||
projects []string
|
||||
output string
|
||||
appNamespace string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
revision string
|
||||
revisions []string
|
||||
sourcePositions []int64
|
||||
sourceNames []string
|
||||
resources []string
|
||||
labels []string
|
||||
selector string
|
||||
prune bool
|
||||
dryRun bool
|
||||
timeout uint
|
||||
strategy string
|
||||
force bool
|
||||
replace bool
|
||||
serverSideApply bool
|
||||
applyOutOfSyncOnly bool
|
||||
async bool
|
||||
retryLimit int64
|
||||
retryRefresh bool
|
||||
retryBackoffDuration time.Duration
|
||||
retryBackoffMaxDuration time.Duration
|
||||
retryBackoffFactor int64
|
||||
local string
|
||||
localRepoRoot string
|
||||
infos []string
|
||||
diffChanges bool
|
||||
diffChangesConfirm bool
|
||||
projects []string
|
||||
output string
|
||||
appNamespace string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
serverSideDiffConcurrency int
|
||||
serverSideDiffMaxBatchKB int
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "sync [APPNAME... | -l selector | --project project-name]",
|
||||
@@ -2393,7 +2453,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
// Check if application has ServerSideDiff annotation
|
||||
serverSideDiff := resourceutil.HasAnnotationOption(app, argocommon.AnnotationCompareOptions, "ServerSideDiff=true")
|
||||
|
||||
foundDiffs = findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, appName, appNs)
|
||||
foundDiffs = findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, appName, appNs, serverSideDiffConcurrency, serverSideDiffMaxBatchKB)
|
||||
if !foundDiffs {
|
||||
fmt.Printf("====== No Differences found ======\n")
|
||||
// if no differences found, then no need to sync
|
||||
@@ -2458,6 +2518,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringArrayVar(&revisions, "revisions", []string{}, "Show manifests at specific revisions for source position in source-positions")
|
||||
command.Flags().Int64SliceVar(&sourcePositions, "source-positions", []int64{}, "List of source positions. Default is empty array. Counting start at 1.")
|
||||
command.Flags().StringArrayVar(&sourceNames, "source-names", []string{}, "List of source names. Default is an empty array.")
|
||||
addServerSideDiffPerfFlags(command, &serverSideDiffConcurrency, &serverSideDiffMaxBatchKB)
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -3222,8 +3283,7 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
errors.CheckError(err)
|
||||
|
||||
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
|
||||
//nolint:staticcheck
|
||||
unstructureds = getLocalObjects(context.Background(), app, proj.Project, local, localRepoRoot, argoSettings.AppLabelKey, cluster.ServerVersion, cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod)
|
||||
unstructureds = getLocalObjects(context.Background(), app, proj.Project, local, localRepoRoot, argoSettings.AppLabelKey, cluster.Info.ServerVersion, cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod)
|
||||
case len(revisions) > 0 && len(sourcePositions) > 0:
|
||||
q := application.ApplicationManifestQuery{
|
||||
Name: &appName,
|
||||
|
||||
@@ -395,12 +395,12 @@ func printApplicationSetNames(apps []arogappsetv1.ApplicationSet) {
|
||||
func printApplicationSetTable(apps []arogappsetv1.ApplicationSet, output *string) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
var fmtStr string
|
||||
headers := []any{"NAME", "PROJECT", "SYNCPOLICY", "CONDITIONS"}
|
||||
headers := []any{"NAME", "PROJECT", "SYNCPOLICY", "HEALTH", "CONDITIONS"}
|
||||
if *output == "wide" {
|
||||
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
|
||||
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
|
||||
headers = append(headers, "REPO", "PATH", "TARGET")
|
||||
} else {
|
||||
fmtStr = "%s\t%s\t%s\t%s\n"
|
||||
fmtStr = "%s\t%s\t%s\t%s\t%s\n"
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, fmtStr, headers...)
|
||||
for _, app := range apps {
|
||||
@@ -414,6 +414,7 @@ func printApplicationSetTable(apps []arogappsetv1.ApplicationSet, output *string
|
||||
app.QualifiedName(),
|
||||
app.Spec.Template.Spec.Project,
|
||||
app.Spec.SyncPolicy,
|
||||
app.Status.Health.Status,
|
||||
conditions,
|
||||
}
|
||||
if *output == "wide" {
|
||||
@@ -437,6 +438,7 @@ func printAppSetSummaryTable(appSet *arogappsetv1.ApplicationSet) {
|
||||
fmt.Printf(printOpFmtStr, "Project:", appSet.Spec.Template.Spec.GetProject())
|
||||
fmt.Printf(printOpFmtStr, "Server:", getServerForAppSet(appSet))
|
||||
fmt.Printf(printOpFmtStr, "Namespace:", appSet.Spec.Template.Spec.Destination.Namespace)
|
||||
fmt.Printf(printOpFmtStr, "Health Status:", appSet.Status.Health.Status)
|
||||
if !appSet.Spec.Template.Spec.HasMultipleSources() {
|
||||
fmt.Println("Source:")
|
||||
} else {
|
||||
|
||||
@@ -107,7 +107,7 @@ func TestPrintApplicationSetTable(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
expectation := "NAME PROJECT SYNCPOLICY CONDITIONS\napp-name default nil [{ResourcesUpToDate <nil> True }]\nteam-two/app-name default nil [{ResourcesUpToDate <nil> True }]\n"
|
||||
expectation := "NAME PROJECT SYNCPOLICY HEALTH CONDITIONS\napp-name default nil [{ResourcesUpToDate <nil> True }]\nteam-two/app-name default nil [{ResourcesUpToDate <nil> True }]\n"
|
||||
assert.Equal(t, expectation, output)
|
||||
}
|
||||
|
||||
@@ -200,6 +200,7 @@ func TestPrintAppSetSummaryTable(t *testing.T) {
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Health Status:
|
||||
Source:
|
||||
- Repo:
|
||||
Target:
|
||||
@@ -213,6 +214,7 @@ SyncPolicy: <none>
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Health Status:
|
||||
Source:
|
||||
- Repo:
|
||||
Target:
|
||||
@@ -226,6 +228,7 @@ SyncPolicy: Automated
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Health Status:
|
||||
Source:
|
||||
- Repo:
|
||||
Target:
|
||||
@@ -239,6 +242,7 @@ SyncPolicy: Automated
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Health Status:
|
||||
Source:
|
||||
- Repo: test1
|
||||
Target: master1
|
||||
@@ -253,6 +257,7 @@ SyncPolicy: <none>
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Health Status:
|
||||
Sources:
|
||||
- Repo: test1
|
||||
Target: master1
|
||||
|
||||
@@ -380,8 +380,7 @@ func printClusterDetails(clusters []argoappv1.Cluster) {
|
||||
fmt.Printf("Cluster information\n\n")
|
||||
fmt.Printf(" Server URL: %s\n", cluster.Server)
|
||||
fmt.Printf(" Server Name: %s\n", strWithDefault(cluster.Name, "-"))
|
||||
//nolint:staticcheck
|
||||
fmt.Printf(" Server Version: %s\n", cluster.ServerVersion)
|
||||
fmt.Printf(" Server Version: %s\n", cluster.Info.ServerVersion)
|
||||
fmt.Printf(" Namespaces: %s\n", formatNamespaces(cluster))
|
||||
fmt.Printf("\nTLS configuration\n\n")
|
||||
fmt.Printf(" Client cert: %v\n", len(cluster.Config.CertData) != 0)
|
||||
@@ -475,8 +474,7 @@ func printClusterTable(clusters []argoappv1.Cluster) {
|
||||
if len(c.Namespaces) > 0 {
|
||||
server = fmt.Sprintf("%s (%d namespaces)", c.Server, len(c.Namespaces))
|
||||
}
|
||||
//nolint:staticcheck
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", server, c.Name, c.ServerVersion, c.ConnectionState.Status, c.ConnectionState.Message, c.Project)
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", server, c.Name, c.Info.ServerVersion, c.Info.ConnectionState.Status, c.Info.ConnectionState.Message, c.Project)
|
||||
}
|
||||
_ = w.Flush()
|
||||
}
|
||||
|
||||
@@ -39,12 +39,14 @@ func Test_printClusterTable(_ *testing.T) {
|
||||
AWSAuthConfig: nil,
|
||||
DisableCompression: false,
|
||||
},
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "my-status",
|
||||
Message: "my-message",
|
||||
ModifiedAt: &metav1.Time{},
|
||||
Info: v1alpha1.ClusterInfo{
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "my-status",
|
||||
Message: "my-message",
|
||||
ModifiedAt: &metav1.Time{},
|
||||
},
|
||||
ServerVersion: "my-version",
|
||||
},
|
||||
ServerVersion: "my-version",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
209
commitserver/commit/addnote_race_test.go
Normal file
209
commitserver/commit/addnote_race_test.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/git"
|
||||
)
|
||||
|
||||
// TestAddNoteConcurrentStaggered tests that when multiple AddNote operations run
|
||||
// with slightly staggered timing, all notes persist correctly.
|
||||
// Each operation gets its own git clone, simulating multiple concurrent hydration requests.
|
||||
func TestAddNoteConcurrentStaggered(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
remotePath, localPath := setupRepoWithRemote(t)
|
||||
|
||||
// Create 3 branches with commits (simulating different hydration targets)
|
||||
branches := []string{"env/dev", "env/staging", "env/prod"}
|
||||
commitSHAs := make([]string, 3)
|
||||
|
||||
for i, branch := range branches {
|
||||
commitSHAs[i] = commitAndPushBranch(t, localPath, branch)
|
||||
}
|
||||
|
||||
// Create separate clones for concurrent operations
|
||||
cloneClients := make([]git.Client, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
cloneClients[i] = getClientForClone(t, remotePath)
|
||||
}
|
||||
|
||||
// Add notes concurrently with slight stagger
|
||||
var wg sync.WaitGroup
|
||||
errors := make([]error, 3)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
time.Sleep(time.Duration(idx*50) * time.Millisecond)
|
||||
errors[idx] = AddNote(cloneClients[idx], fmt.Sprintf("dry-sha-%d", idx), commitSHAs[idx])
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Verify all notes persisted
|
||||
verifyClient := getClientForClone(t, remotePath)
|
||||
|
||||
for i, commitSHA := range commitSHAs {
|
||||
note, err := verifyClient.GetCommitNote(commitSHA, NoteNamespace)
|
||||
require.NoError(t, err, "Note should exist for commit %d", i)
|
||||
assert.Contains(t, note, fmt.Sprintf("dry-sha-%d", i))
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddNoteConcurrentSimultaneous tests that when multiple AddNote operations run
|
||||
// simultaneously (without delays), all notes persist correctly.
|
||||
// Each operation gets its own git clone, simulating multiple concurrent hydration requests.
|
||||
func TestAddNoteConcurrentSimultaneous(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
remotePath, localPath := setupRepoWithRemote(t)
|
||||
|
||||
// Create 3 branches with commits (simulating different hydration targets)
|
||||
branches := []string{"env/dev", "env/staging", "env/prod"}
|
||||
commitSHAs := make([]string, 3)
|
||||
|
||||
for i, branch := range branches {
|
||||
commitSHAs[i] = commitAndPushBranch(t, localPath, branch)
|
||||
}
|
||||
|
||||
// Create separate clones for concurrent operations
|
||||
cloneClients := make([]git.Client, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
cloneClients[i] = getClientForClone(t, remotePath)
|
||||
}
|
||||
|
||||
// Add notes concurrently without delays
|
||||
var wg sync.WaitGroup
|
||||
startChan := make(chan struct{})
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
<-startChan
|
||||
_ = AddNote(cloneClients[idx], fmt.Sprintf("dry-sha-%d", idx), commitSHAs[idx])
|
||||
}(i)
|
||||
}
|
||||
|
||||
close(startChan)
|
||||
wg.Wait()
|
||||
|
||||
// Verify all notes persisted
|
||||
verifyClient := getClientForClone(t, remotePath)
|
||||
|
||||
for i, commitSHA := range commitSHAs {
|
||||
note, err := verifyClient.GetCommitNote(commitSHA, NoteNamespace)
|
||||
require.NoError(t, err, "Note should exist for commit %d", i)
|
||||
assert.Contains(t, note, fmt.Sprintf("dry-sha-%d", i))
|
||||
}
|
||||
}
|
||||
|
||||
// setupRepoWithRemote creates a bare remote repo and a local repo configured to push to it.
|
||||
// Returns the remote path and local path.
|
||||
func setupRepoWithRemote(t *testing.T) (remotePath, localPath string) {
|
||||
t.Helper()
|
||||
ctx := t.Context()
|
||||
|
||||
// Create bare remote repository
|
||||
remoteDir := t.TempDir()
|
||||
remotePath = filepath.Join(remoteDir, "remote.git")
|
||||
err := os.MkdirAll(remotePath, 0o755)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, remotePath, "init", "--bare")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local repository
|
||||
localDir := t.TempDir()
|
||||
localPath = filepath.Join(localDir, "local")
|
||||
err = os.MkdirAll(localPath, 0o755)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "init")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "config", "user.name", "Test User")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "config", "user.email", "test@example.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "remote", "add", "origin", remotePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
return remotePath, localPath
|
||||
}
|
||||
|
||||
// commitAndPushBranch writes a file, commits it, creates a branch, and pushes to remote.
|
||||
// Returns the commit SHA.
|
||||
func commitAndPushBranch(t *testing.T, localPath, branch string) string {
|
||||
t.Helper()
|
||||
ctx := t.Context()
|
||||
|
||||
testFile := filepath.Join(localPath, "test.txt")
|
||||
err := os.WriteFile(testFile, []byte("content for "+branch), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "add", ".")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "commit", "-m", "commit "+branch)
|
||||
require.NoError(t, err)
|
||||
|
||||
sha, err := runGitCmd(ctx, localPath, "rev-parse", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "branch", branch)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "push", "origin", branch)
|
||||
require.NoError(t, err)
|
||||
|
||||
return sha
|
||||
}
|
||||
|
||||
// getClientForClone creates a git client with a fresh clone of the remote repo.
|
||||
func getClientForClone(t *testing.T, remotePath string) git.Client {
|
||||
t.Helper()
|
||||
ctx := t.Context()
|
||||
|
||||
workDir := t.TempDir()
|
||||
|
||||
client, err := git.NewClientExt(remotePath, workDir, &git.NopCreds{}, false, false, "", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, workDir, "config", "user.name", "Test User")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, workDir, "config", "user.email", "test@example.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Fetch("", 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// runGitCmd is a helper function to run git commands
|
||||
func runGitCmd(ctx context.Context, dir string, args ...string) (string, error) {
|
||||
cmd := exec.CommandContext(ctx, "git", args...)
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.CombinedOutput()
|
||||
return strings.TrimSpace(string(output)), err
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydr
|
||||
// short-circuit if already hydrated
|
||||
if isHydrated {
|
||||
logCtx.Debugf("this dry sha %s is already hydrated", r.DrySha)
|
||||
return "", "", nil
|
||||
return "", hydratedSha, nil
|
||||
}
|
||||
|
||||
logCtx.Debug("Writing manifests")
|
||||
@@ -197,13 +197,14 @@ func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydr
|
||||
return "", "", fmt.Errorf("failed to write manifests: %w", err)
|
||||
}
|
||||
if !shouldCommit {
|
||||
// add the note and return
|
||||
// Manifests did not change, so we don't need to create a new commit.
|
||||
// Add a git note to track that this dry SHA has been processed, and return the existing hydrated SHA.
|
||||
logCtx.Debug("Adding commit note")
|
||||
err = AddNote(gitClient, r.DrySha, hydratedSha)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to add commit note: %w", err)
|
||||
}
|
||||
return "", "", nil
|
||||
return "", hydratedSha, nil
|
||||
}
|
||||
logCtx.Debug("Committing and pushing changes")
|
||||
out, err = gitClient.CommitAndPush(r.TargetBranch, r.CommitMessage)
|
||||
|
||||
@@ -108,7 +108,7 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), validRequest)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha) // changes introduced by commit note. hydration won't happen if there are no new manifest|s to commit
|
||||
assert.Equal(t, "it-worked!", resp.HydratedSha, "Should return existing hydrated SHA for no-op")
|
||||
})
|
||||
|
||||
t.Run("root path with dot and blank - no directory removal", func(t *testing.T) {
|
||||
@@ -283,12 +283,13 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
DrySha: "dry-sha-456",
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithEmptyPaths)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha) // changes introduced by commit note. hydration won't happen if there are no new manifest|s to commit
|
||||
assert.Equal(t, "empty-paths-sha", resp.HydratedSha, "Should return existing hydrated SHA for no-op")
|
||||
})
|
||||
|
||||
t.Run("duplicate request already hydrated", func(t *testing.T) {
|
||||
@@ -329,7 +330,7 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), request)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha) // changes introduced by commit note. hydration won't happen if there are no new manifest|s to commit
|
||||
assert.Equal(t, "dupe-test-sha", resp.HydratedSha, "Should return existing hydrated SHA when already hydrated")
|
||||
})
|
||||
|
||||
t.Run("root path with dot - no changes to manifest - should commit note only", func(t *testing.T) {
|
||||
@@ -355,6 +356,7 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
DrySha: "dry-sha-123",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: ".",
|
||||
@@ -370,7 +372,8 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithRootAndBlank)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha)
|
||||
// BUG FIX: When manifests don't change (no-op), the existing hydrated SHA should be returned.
|
||||
assert.Equal(t, "root-and-blank-sha", resp.HydratedSha, "Should return existing hydrated SHA for no-op")
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -23,8 +23,8 @@ import (
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
|
||||
const gitAttributesContents = `*/README.md linguist-generated=true
|
||||
*/hydrator.metadata linguist-generated=true`
|
||||
const gitAttributesContents = `**/README.md linguist-generated=true
|
||||
**/hydrator.metadata linguist-generated=true`
|
||||
|
||||
func init() {
|
||||
// Avoid allowing the user to learn things about the environment.
|
||||
@@ -247,9 +247,5 @@ func AddNote(gitClient git.Client, drySha, commitSha string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal commit note: %w", err)
|
||||
}
|
||||
err = gitClient.AddAndPushNote(commitSha, NoteNamespace, string(jsonBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add commit note: %w", err)
|
||||
}
|
||||
return nil
|
||||
return gitClient.AddAndPushNote(commitSha, NoteNamespace, string(jsonBytes)) // nolint:wrapcheck // wrapping the error wouldn't add any information
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -352,8 +354,76 @@ func TestWriteGitAttributes(t *testing.T) {
|
||||
gitAttributesPath := filepath.Join(root.Name(), ".gitattributes")
|
||||
gitAttributesBytes, err := os.ReadFile(gitAttributesPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/hydrator.metadata linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "hydrator.metadata linguist-generated=true")
|
||||
}
|
||||
|
||||
func TestWriteGitAttributes_MatchesAllDepths(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
err := writeGitAttributes(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The gitattributes pattern needs to match files at all depths:
|
||||
// - hydrator.metadata (root level)
|
||||
// - path1/hydrator.metadata (one level deep)
|
||||
// - path1/nested/deep/hydrator.metadata (multiple levels deep)
|
||||
// Same for README.md files
|
||||
//
|
||||
// The pattern "**/hydrator.metadata" matches at any depth including root
|
||||
// The pattern "*/hydrator.metadata" only matches exactly one directory level deep
|
||||
|
||||
// Test actual Git behavior using git check-attr
|
||||
// Initialize a git repo
|
||||
ctx := t.Context()
|
||||
repoPath := root.Name()
|
||||
cmd := exec.CommandContext(ctx, "git", "init")
|
||||
cmd.Dir = repoPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, "Failed to init git repo: %s", string(output))
|
||||
|
||||
// Test files at different depths
|
||||
testCases := []struct {
|
||||
path string
|
||||
shouldMatch bool
|
||||
description string
|
||||
}{
|
||||
{"hydrator.metadata", true, "root level hydrator.metadata"},
|
||||
{"README.md", true, "root level README.md"},
|
||||
{"path1/hydrator.metadata", true, "one level deep hydrator.metadata"},
|
||||
{"path1/README.md", true, "one level deep README.md"},
|
||||
{"path1/nested/hydrator.metadata", true, "two levels deep hydrator.metadata"},
|
||||
{"path1/nested/README.md", true, "two levels deep README.md"},
|
||||
{"path1/nested/deep/hydrator.metadata", true, "three levels deep hydrator.metadata"},
|
||||
{"path1/nested/deep/README.md", true, "three levels deep README.md"},
|
||||
{"manifest.yaml", false, "manifest.yaml should not match"},
|
||||
{"path1/manifest.yaml", false, "nested manifest.yaml should not match"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// Use git check-attr to verify if linguist-generated attribute is set
|
||||
cmd := exec.CommandContext(ctx, "git", "check-attr", "linguist-generated", tc.path)
|
||||
cmd.Dir = repoPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, "Failed to run git check-attr: %s", string(output))
|
||||
|
||||
// Output format: <path>: <attribute>: <value>
|
||||
// Example: "hydrator.metadata: linguist-generated: true"
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
if tc.shouldMatch {
|
||||
expectedOutput := tc.path + ": linguist-generated: true"
|
||||
assert.Equal(t, expectedOutput, outputStr,
|
||||
"File %s should have linguist-generated=true attribute", tc.path)
|
||||
} else {
|
||||
// Attribute should be unspecified
|
||||
expectedOutput := tc.path + ": linguist-generated: unspecified"
|
||||
assert.Equal(t, expectedOutput, outputStr,
|
||||
"File %s should not have linguist-generated=true attribute", tc.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsHydrated(t *testing.T) {
|
||||
@@ -401,3 +471,69 @@ func TestAddNote(t *testing.T) {
|
||||
err = AddNote(mockGitClient, drySha, commitShaErr)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestWriteForPaths_NoOpScenario tests that when manifests don't change between two hydrations,
|
||||
// shouldCommit returns false. This reproduces the bug where a new DRY commit that doesn't affect
|
||||
// manifests should not create a new hydrated commit.
|
||||
func TestWriteForPaths_NoOpScenario(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
repoURL := "https://github.com/example/repo"
|
||||
drySha1 := "abc123"
|
||||
drySha2 := "def456" // Different dry SHA
|
||||
paths := []*apiclient.PathDetails{
|
||||
{
|
||||
Path: "guestbook",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{ManifestJSON: `{"apiVersion":"v1","kind":"Service","metadata":{"name":"guestbook-ui"}}`},
|
||||
{ManifestJSON: `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"guestbook-ui"}}`},
|
||||
},
|
||||
Commands: []string{"kustomize build ."},
|
||||
},
|
||||
}
|
||||
|
||||
now1 := metav1.NewTime(time.Now())
|
||||
metadata1 := &appsv1.RevisionMetadata{
|
||||
Author: "test-author",
|
||||
Date: &now1,
|
||||
Message: "Initial commit",
|
||||
}
|
||||
|
||||
// First hydration - manifests are new, so HasFileChanged should return true
|
||||
mockGitClient1 := gitmocks.NewClient(t)
|
||||
mockGitClient1.On("HasFileChanged", "guestbook/manifest.yaml").Return(true, nil).Once()
|
||||
|
||||
shouldCommit1, err := WriteForPaths(root, repoURL, drySha1, metadata1, paths, mockGitClient1)
|
||||
require.NoError(t, err)
|
||||
require.True(t, shouldCommit1, "First hydration should commit because manifests are new")
|
||||
|
||||
// Second hydration - same manifest content but different dry SHA and metadata
|
||||
// Simulate adding a README.md to the dry source (which doesn't affect manifests)
|
||||
now2 := metav1.NewTime(time.Now().Add(1 * time.Hour)) // Different timestamp
|
||||
metadata2 := &appsv1.RevisionMetadata{
|
||||
Author: "test-author",
|
||||
Date: &now2,
|
||||
Message: "Add README.md", // Different commit message
|
||||
}
|
||||
|
||||
// The manifests are identical, so HasFileChanged should return false
|
||||
mockGitClient2 := gitmocks.NewClient(t)
|
||||
mockGitClient2.On("HasFileChanged", "guestbook/manifest.yaml").Return(false, nil).Once()
|
||||
|
||||
shouldCommit2, err := WriteForPaths(root, repoURL, drySha2, metadata2, paths, mockGitClient2)
|
||||
require.NoError(t, err)
|
||||
require.False(t, shouldCommit2, "Second hydration should NOT commit because manifests didn't change")
|
||||
|
||||
// Verify that the root-level metadata WAS updated (even though we're not committing)
|
||||
// The files get written to the working directory, but since shouldCommit is false, they won't be committed
|
||||
topMetadataPath := filepath.Join(root.Name(), "hydrator.metadata")
|
||||
topMetadataBytes, err := os.ReadFile(topMetadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
var topMetadata hydratorMetadataFile
|
||||
err = json.Unmarshal(topMetadataBytes, &topMetadata)
|
||||
require.NoError(t, err)
|
||||
// The top-level metadata should have the NEW dry SHA (files are written, just not committed)
|
||||
assert.Equal(t, drySha2, topMetadata.DrySHA)
|
||||
assert.Equal(t, metadata2.Date.Format(time.RFC3339), topMetadata.Date)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/ignore"
|
||||
kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
@@ -21,27 +20,35 @@ import (
|
||||
func setApplicationHealth(resources []managedResource, statuses []appv1.ResourceStatus, resourceOverrides map[string]appv1.ResourceOverride, app *appv1.Application, persistResourceHealth bool) (health.HealthStatusCode, error) {
|
||||
var savedErr error
|
||||
var errCount uint
|
||||
var containsResources, containsLiveResources bool
|
||||
|
||||
appHealthStatus := health.HealthStatusHealthy
|
||||
for i, res := range resources {
|
||||
if res.Target != nil && hookutil.Skip(res.Target) {
|
||||
continue
|
||||
}
|
||||
if res.Live != nil && res.Live.GetAnnotations() != nil && res.Live.GetAnnotations()[common.AnnotationIgnoreHealthCheck] == "true" {
|
||||
if res.Live != nil && (hookutil.IsHook(res.Live) || ignore.Ignore(res.Live)) {
|
||||
continue
|
||||
}
|
||||
if res.Live != nil && (hookutil.IsHook(res.Live) || ignore.Ignore(res.Live)) {
|
||||
|
||||
// Contains actual resources that are not hooks
|
||||
containsResources = true
|
||||
if res.Live != nil {
|
||||
containsLiveResources = true
|
||||
}
|
||||
|
||||
// Do not aggregate the health of the resource if the annotation to ignore health check is set to true
|
||||
if res.Live != nil && res.Live.GetAnnotations() != nil && res.Live.GetAnnotations()[common.AnnotationIgnoreHealthCheck] == "true" {
|
||||
continue
|
||||
}
|
||||
|
||||
var healthStatus *health.HealthStatus
|
||||
var err error
|
||||
healthOverrides := lua.ResourceHealthOverrides(resourceOverrides)
|
||||
gvk := schema.GroupVersionKind{Group: res.Group, Version: res.Version, Kind: res.Kind}
|
||||
if res.Live == nil {
|
||||
healthStatus = &health.HealthStatus{Status: health.HealthStatusMissing}
|
||||
} else {
|
||||
// App the manages itself should not affect own health
|
||||
// App that manages itself should not affect own health
|
||||
if isSelfReferencedApp(app, kubeutil.GetObjectRef(res.Live)) {
|
||||
continue
|
||||
}
|
||||
@@ -65,8 +72,8 @@ func setApplicationHealth(resources []managedResource, statuses []appv1.Resource
|
||||
statuses[i].Health = nil
|
||||
}
|
||||
|
||||
// Is health status is missing but resource has not built-in/custom health check then it should not affect parent app health
|
||||
if _, hasOverride := healthOverrides[lua.GetConfigMapKey(gvk)]; healthStatus.Status == health.HealthStatusMissing && !hasOverride && health.GetHealthCheckFunc(gvk) == nil {
|
||||
// Missing resources should not affect parent app health - the OutOfSync status already indicates resources are missing
|
||||
if res.Live == nil && healthStatus.Status == health.HealthStatusMissing {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -79,6 +86,12 @@ func setApplicationHealth(resources []managedResource, statuses []appv1.Resource
|
||||
appHealthStatus = healthStatus.Status
|
||||
}
|
||||
}
|
||||
|
||||
// If the app is expected to have resources but does not contain any live resources, set the app health to missing
|
||||
if containsResources && !containsLiveResources && health.IsWorse(appHealthStatus, health.HealthStatusMissing) {
|
||||
appHealthStatus = health.HealthStatusMissing
|
||||
}
|
||||
|
||||
if persistResourceHealth {
|
||||
app.Status.ResourceHealthSource = appv1.ResourceHealthLocationInline
|
||||
} else {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/lua"
|
||||
@@ -103,12 +104,103 @@ func TestSetApplicationHealth_ResourceHealthNotPersisted(t *testing.T) {
|
||||
assert.Nil(t, resourceStatuses[0].Health)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_NoResource(t *testing.T) {
|
||||
resources := []managedResource{}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_OnlyHooks(t *testing.T) {
|
||||
pod := resourceFromFile("./testdata/pod-running-restart-always.yaml")
|
||||
pod.SetAnnotations(map[string]string{synccommon.AnnotationKeyHook: string(synccommon.HookTypeSync)})
|
||||
|
||||
resources := []managedResource{{
|
||||
Group: "", Version: "v1", Kind: "Pod", Target: &pod, Live: &pod,
|
||||
}}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_MissingResource(t *testing.T) {
|
||||
pod := resourceFromFile("./testdata/pod-running-restart-always.yaml")
|
||||
pod2 := pod.DeepCopy()
|
||||
pod2.SetName("pod2")
|
||||
|
||||
resources := []managedResource{
|
||||
{Group: "", Version: "v1", Kind: "Pod", Target: &pod},
|
||||
{Group: "", Version: "v1", Kind: "Pod", Target: pod2, Live: pod2},
|
||||
}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_MissingResource_WithIgnoreHealthcheck(t *testing.T) {
|
||||
pod := resourceFromFile("./testdata/pod-running-restart-always.yaml")
|
||||
pod2 := pod.DeepCopy()
|
||||
pod2.SetName("pod2")
|
||||
pod2.SetAnnotations(map[string]string{common.AnnotationIgnoreHealthCheck: "true"})
|
||||
|
||||
resources := []managedResource{
|
||||
{Group: "", Version: "v1", Kind: "Pod", Target: &pod},
|
||||
{Group: "", Version: "v1", Kind: "Pod", Target: pod2, Live: pod2},
|
||||
}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_MissingResource_WithChildApp(t *testing.T) {
|
||||
childApp := newAppLiveObj(health.HealthStatusUnknown)
|
||||
pod := resourceFromFile("./testdata/pod-running-restart-always.yaml")
|
||||
resources := []managedResource{
|
||||
{Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Target: childApp, Live: childApp},
|
||||
{Group: "", Version: "v1", Kind: "Pod", Target: &pod},
|
||||
}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_AllMissingResources(t *testing.T) {
|
||||
pod := resourceFromFile("./testdata/pod-running-restart-always.yaml")
|
||||
pod2 := pod.DeepCopy()
|
||||
pod2.SetName("pod2")
|
||||
|
||||
resources := []managedResource{
|
||||
{Group: "", Version: "v1", Kind: "Pod", Target: &pod},
|
||||
{Group: "", Version: "v1", Kind: "Pod", Target: pod2},
|
||||
}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusMissing, healthStatus)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_AllMissingResources_WithHooks(t *testing.T) {
|
||||
pod := resourceFromFile("./testdata/pod-running-restart-always.yaml")
|
||||
pod2 := pod.DeepCopy()
|
||||
pod2.SetName("pod2")
|
||||
pod2.SetAnnotations(map[string]string{synccommon.AnnotationKeyHook: string(synccommon.HookTypeSync)})
|
||||
|
||||
resources := []managedResource{{
|
||||
Group: "", Version: "v1", Kind: "Pod", Target: &pod,
|
||||
}, {}}
|
||||
}, {
|
||||
Group: "", Version: "v1", Kind: "Pod", Target: pod2, Live: pod2,
|
||||
}}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
@@ -149,32 +241,6 @@ func TestSetApplicationHealth_HealthImproves(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_MissingResourceNoBuiltHealthCheck(t *testing.T) {
|
||||
cm := resourceFromFile("./testdata/configmap.yaml")
|
||||
|
||||
resources := []managedResource{{
|
||||
Group: "", Version: "v1", Kind: "ConfigMap", Target: &cm,
|
||||
}}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
t.Run("NoOverride", func(t *testing.T) {
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
assert.Equal(t, health.HealthStatusMissing, resourceStatuses[0].Health.Status)
|
||||
})
|
||||
|
||||
t.Run("HasOverride", func(t *testing.T) {
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{
|
||||
lua.GetConfigMapKey(schema.GroupVersionKind{Version: "v1", Kind: "ConfigMap"}): appv1.ResourceOverride{
|
||||
HealthLua: "some health check",
|
||||
},
|
||||
}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusMissing, healthStatus)
|
||||
})
|
||||
}
|
||||
|
||||
func newAppLiveObj(status health.HealthStatusCode) *unstructured.Unstructured {
|
||||
app := appv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -214,9 +280,9 @@ return hs`,
|
||||
}
|
||||
|
||||
t.Run("ChildAppDegraded", func(t *testing.T) {
|
||||
degradedApp := newAppLiveObj(health.HealthStatusDegraded)
|
||||
childApp := newAppLiveObj(health.HealthStatusDegraded)
|
||||
resources := []managedResource{{
|
||||
Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Live: degradedApp,
|
||||
Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Live: childApp,
|
||||
}, {}}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
@@ -226,9 +292,21 @@ return hs`,
|
||||
})
|
||||
|
||||
t.Run("ChildAppMissing", func(t *testing.T) {
|
||||
degradedApp := newAppLiveObj(health.HealthStatusMissing)
|
||||
childApp := newAppLiveObj(health.HealthStatusMissing)
|
||||
resources := []managedResource{{
|
||||
Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Live: degradedApp,
|
||||
Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Live: childApp,
|
||||
}, {}}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, overrides, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
})
|
||||
|
||||
t.Run("ChildAppUnknown", func(t *testing.T) {
|
||||
childApp := newAppLiveObj(health.HealthStatusUnknown)
|
||||
resources := []managedResource{{
|
||||
Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Live: childApp,
|
||||
}, {}}
|
||||
resourceStatuses := initStatuses(resources)
|
||||
|
||||
|
||||
@@ -255,9 +255,6 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
|
||||
|
||||
appNamespace := app.Spec.Destination.Namespace
|
||||
apiVersions := argo.APIResourcesToStrings(apiResources, true)
|
||||
if !sendRuntimeState {
|
||||
appNamespace = ""
|
||||
}
|
||||
|
||||
updateRevisions := processManifestGeneratePathsEnabled &&
|
||||
// updating revisions result is not required if automated sync is not enabled
|
||||
@@ -886,7 +883,9 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
Hook: isHook(obj),
|
||||
RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj,
|
||||
RequiresDeletionConfirmation: targetObj != nil && resourceutil.HasAnnotationOption(targetObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDeleteRequireConfirm) ||
|
||||
liveObj != nil && resourceutil.HasAnnotationOption(liveObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDeleteRequireConfirm),
|
||||
liveObj != nil && resourceutil.HasAnnotationOption(liveObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDeleteRequireConfirm) ||
|
||||
targetObj != nil && resourceutil.HasAnnotationOption(targetObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionPruneRequireConfirm) ||
|
||||
liveObj != nil && resourceutil.HasAnnotationOption(liveObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionPruneRequireConfirm),
|
||||
}
|
||||
if targetObj != nil {
|
||||
resState.SyncWave = int64(syncwaves.Wave(targetObj))
|
||||
|
||||
@@ -416,6 +416,55 @@ func TestCompareAppStateSkipHook(t *testing.T) {
|
||||
assert.Empty(t, app.Status.Conditions)
|
||||
}
|
||||
|
||||
func TestCompareAppStateRequireDeletion(t *testing.T) {
|
||||
obj1 := NewPod()
|
||||
obj1.SetName("my-pod-1")
|
||||
obj1.SetAnnotations(map[string]string{"argocd.argoproj.io/sync-options": "Delete=confirm"})
|
||||
obj2 := NewPod()
|
||||
obj2.SetName("my-pod-2")
|
||||
obj2.SetAnnotations(map[string]string{"argocd.argoproj.io/sync-options": "Prune=confirm"})
|
||||
obj3 := NewPod()
|
||||
obj3.SetName("my-pod-3")
|
||||
|
||||
app := newFakeApp()
|
||||
data := fakeData{
|
||||
apps: []runtime.Object{app},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{toJSON(t, obj1), toJSON(t, obj2), toJSON(t, obj3)},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(obj1): obj1,
|
||||
kube.GetResourceKey(obj2): obj2,
|
||||
kube.GetResourceKey(obj3): obj3,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &data, nil)
|
||||
sources := make([]v1alpha1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, v1alpha1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
assert.Len(t, compRes.resources, 3)
|
||||
assert.Len(t, compRes.managedResources, 3)
|
||||
assert.Empty(t, app.Status.Conditions)
|
||||
|
||||
countRequireDeletion := 0
|
||||
for _, res := range compRes.resources {
|
||||
if res.RequiresDeletionConfirmation {
|
||||
countRequireDeletion++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 2, countRequireDeletion)
|
||||
}
|
||||
|
||||
// checks that ignore resources are detected, but excluded from status
|
||||
func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
|
||||
pod := NewPod()
|
||||
|
||||
@@ -30,7 +30,7 @@ to understand our toolchain and our continuous integration processes. It contain
|
||||
## Quick start
|
||||
|
||||
If you want a quick start contributing to Argo CD, take a look at issues that are labeled with
|
||||
[help wanted](https://github.com/argoproj/argo-cd/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
[help-wanted](https://github.com/argoproj/argo-cd/issues?q=is%3Aopen+is%3Aissue+label%3A%22help-wanted%22)
|
||||
or
|
||||
[good first issue](https://github.com/argoproj/argo-cd/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22).
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ These are the upcoming releases dates:
|
||||
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](https://github.com/argoproj/argo-cd/issues/23347) |
|
||||
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | [Michael Crenshaw](https://github.com/crenshaw-dev) | [checklist](https://github.com/argoproj/argo-cd/issues/24539) |
|
||||
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | [Peter Jiang](https://github.com/pjiang-dev) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/25211) |
|
||||
| v3.4 | Monday, Mar. 16, 2026 | Monday, May. 4, 2026 | | |
|
||||
| v3.4 | Monday, Mar. 16, 2026 | Monday, May. 4, 2026 | [Codey Jenkins](https://github.com/FourFifthsCode) | |
|
||||
| v3.5 | Monday, Jun. 15, 2026 | Monday, Aug. 3, 2026 | | |
|
||||
|
||||
Actual release dates might differ from the plan by a few days.
|
||||
|
||||
@@ -98,11 +98,15 @@ checks to see if the release came out correctly:
|
||||
|
||||
### If something went wrong
|
||||
|
||||
If something went wrong, damage should be limited. Depending on the steps that
|
||||
have been performed, you will need to manually clean up.
|
||||
A new Argo CD release results in:
|
||||
- A new GitHub release created
|
||||
- Stable Git tag pointing to the release (if the release is the latest release)
|
||||
- The release Go packages are published for using Argo CD code as dependency
|
||||
- Docker images and SBOM artifacts are published
|
||||
|
||||
* If the container image has been pushed to Quay.io, delete it
|
||||
* Delete the release (if created) from the `Releases` page on GitHub
|
||||
Because of all the above dependencies, in a case of a release that failed, it is not safe to delete and recreate it.
|
||||
Instead, create the next patch release (for example, if 3.2.4 failed, create 3.2.5 after fixing the problem, but don't recreate 3.2.4).
|
||||
Upon successful publishing of the fixed release (3.2.5 in our example), copy the full release notes manually from the failed release (3.2.4 in our example) and then update the failed release (3.2.4 in our example) release notes to state this release is invalid and should not be used.
|
||||
|
||||
### Manual releasing
|
||||
|
||||
|
||||
@@ -222,6 +222,18 @@ Then you can build & push the image in one step:
|
||||
DOCKER_PUSH=true make image
|
||||
```
|
||||
|
||||
To speed up building of images you may use the DEV_IMAGE option that builds the argocd binaries in the users desktop environment
|
||||
(instead of building everything in Docker) and copies them into the result image:
|
||||
|
||||
```bash
|
||||
DEV_IMAGE=true DOCKER_PUSH=true make image
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> The first run of this build task may take a long time because it needs first to build the base image first; however,
|
||||
> once it's done, the build process should be much faster than a regular full image build in Docker.
|
||||
|
||||
|
||||
#### Configure manifests for your image
|
||||
|
||||
With `IMAGE_REGISTRY`, `IMAGE_NAMESPACE` and `IMAGE_TAG` still set, run:
|
||||
|
||||
@@ -3,29 +3,33 @@
|
||||
The test [directory](https://github.com/argoproj/argo-cd/tree/master/test) contains E2E tests and test applications. The tests assume that Argo CD services are installed into `argocd-e2e` namespace or cluster in current context. A throw-away
|
||||
namespace `argocd-e2e***` is created prior to the execution of the tests. The throw-away namespace is used as a target namespace for test applications.
|
||||
|
||||
The [/test/e2e/testdata](https://github.com/argoproj/argo-cd/tree/master/test/e2e/testdata) directory contains various Argo CD applications. Before test execution, the directory is copied into `/tmp/argo-e2e***` temp directory and used in tests as a
|
||||
The [/test/e2e/testdata](https://github.com/argoproj/argo-cd/tree/master/test/e2e/testdata) directory contains various Argo CD applications. Before test execution, the directory is copied into `/tmp/argo-e2e***` temp directory (configurable by `ARGOCD_E2E_DIR`) and used in tests as a
|
||||
Git repository via file url: `file:///tmp/argo-e2e***`.
|
||||
|
||||
> [!NOTE]
|
||||
> You might get an error such as `unable to ls-remote HEAD on repository: failed to list refs: repository not found` when querying the local repository exposed through the e2e server running in a container.
|
||||
> This is often caused by `/tmp` directoring sharing protection. You can configure a different directory with `ARGOCD_E2E_DIR`, or disable the directory sharing protection.
|
||||
>
|
||||
> **Rancher Desktop Volume Sharing**
|
||||
>
|
||||
> The e2e git server runs in a container. If you are using Rancher Desktop, you will need to enable volume sharing for
|
||||
> the e2e container to access the testdata directory. To do this, add the following to
|
||||
> To do enable `/tmp` sharing, add the following to
|
||||
> `~/Library/Application\ Support/rancher-desktop/lima/_config/override.yaml` and restart Rancher Desktop:
|
||||
>
|
||||
> ```yaml
|
||||
> mounts:
|
||||
> - location: /private/tmp
|
||||
> writable: true
|
||||
> - location: /private/tmp
|
||||
> writable: true
|
||||
> ```
|
||||
|
||||
## Running Tests Locally
|
||||
|
||||
### With virtualized chain
|
||||
|
||||
1. Start the e2e version `make start-e2e`
|
||||
2. Run the tests: `make test-e2e`
|
||||
|
||||
### With local chain
|
||||
|
||||
1. Start the e2e version `make start-e2e-local`
|
||||
2. Run the tests: `make test-e2e-local`
|
||||
|
||||
@@ -37,32 +41,32 @@ You can observe the tests by using the UI [http://localhost:4000/applications](h
|
||||
|
||||
The Makefile's `start-e2e` target starts instances of ArgoCD on your local machine, of which the most will require a network listener. If, for any reason, your machine already has network services listening on the same ports, then the e2e tests will not run. You can derive from the defaults by setting the following environment variables before you run `make start-e2e`:
|
||||
|
||||
* `ARGOCD_E2E_APISERVER_PORT`: Listener port for `argocd-server` (default: `8080`)
|
||||
* `ARGOCD_E2E_REPOSERVER_PORT`: Listener port for `argocd-reposerver` (default: `8081`)
|
||||
* `ARGOCD_E2E_DEX_PORT`: Listener port for `dex` (default: `5556`)
|
||||
* `ARGOCD_E2E_REDIS_PORT`: Listener port for `redis` (default: `6379`)
|
||||
* `ARGOCD_E2E_YARN_CMD`: Command to use for starting the UI via Yarn (default: `yarn`)
|
||||
- `ARGOCD_E2E_APISERVER_PORT`: Listener port for `argocd-server` (default: `8080`)
|
||||
- `ARGOCD_E2E_REPOSERVER_PORT`: Listener port for `argocd-reposerver` (default: `8081`)
|
||||
- `ARGOCD_E2E_DEX_PORT`: Listener port for `dex` (default: `5556`)
|
||||
- `ARGOCD_E2E_REDIS_PORT`: Listener port for `redis` (default: `6379`)
|
||||
- `ARGOCD_E2E_YARN_CMD`: Command to use for starting the UI via Yarn (default: `yarn`)
|
||||
- `ARGOCD_E2E_DIR`: Local path to the repository to use for ephemeral test data
|
||||
|
||||
If you have changed the port for `argocd-server`, be sure to also set `ARGOCD_SERVER` environment variable to point to that port, e.g. `export ARGOCD_SERVER=localhost:8888` before running `make test-e2e` so that the test will communicate to the correct server component.
|
||||
|
||||
|
||||
## Test Isolation
|
||||
|
||||
Some effort has been made to balance test isolation with speed. Tests are isolated as follows as each test gets:
|
||||
|
||||
* A random 5 character ID.
|
||||
* A unique Git repository containing the `testdata` in `/tmp/argo-e2e/${id}`.
|
||||
* A namespace `argocd-e2e-ns-${id}`.
|
||||
* A primary name for the app `argocd-e2e-${id}`.
|
||||
- A random 5 character ID.
|
||||
- A unique Git repository containing the `testdata` in `/tmp/argo-e2e/${id}`.
|
||||
- A namespace `argocd-e2e-ns-${id}`.
|
||||
- A primary name for the app `argocd-e2e-${id}`.
|
||||
|
||||
## Run only a subset of tests
|
||||
|
||||
Running all tests locally is a time-consuming process. To run only a subset of tests, you can set the `TEST_MODULE` environment variable.
|
||||
For example, to run only the OCI tests, you can set the variable as follows: `make TEST_MODULE=./test/e2e/oci_test.go test-e2e-local`
|
||||
Running all tests locally is a time-consuming process. To run only a subset of tests, you can set the `TEST_MODULE` environment variable.
|
||||
For example, to run only the OCI tests, you can set the variable as follows: `make TEST_MODULE=./test/e2e/oci_test.go test-e2e-local`
|
||||
|
||||
If you want to get a more fine-grained control over which tests to run, you can also try `make TEST_FLAGS="-run <TEST_METHOD_NAME_REGEXP>" test-e2e-local`
|
||||
For individual tests you can run them using the IDE run test feature
|
||||
|
||||
For individual tests you can run them using the IDE run test feature
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Tests fails to delete `argocd-e2e-ns-*` namespaces.**
|
||||
|
||||
@@ -62,21 +62,30 @@ K3d is a minimal Kubernetes distribution, in docker. Because it's running in a d
|
||||
|
||||
The configuration you will need for Argo CD virtualized toolchain:
|
||||
|
||||
1. Find your host IP by executing `ifconfig` on Mac/Linux and `ipconfig` on Windows. For most users, the following command works to find the IP address.
|
||||
1. For most users, the following command works to find the host IP address.
|
||||
|
||||
* For Mac:
|
||||
* If you have perl
|
||||
|
||||
```
|
||||
IP=`ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}'`
|
||||
echo $IP
|
||||
```
|
||||
```pl
|
||||
perl -e '
|
||||
use strict;
|
||||
use Socket;
|
||||
|
||||
* For Linux:
|
||||
my $target = sockaddr_in(53, inet_aton("8.8.8.8"));
|
||||
socket(my $s, AF_INET, SOCK_DGRAM, getprotobyname("udp")) or die $!;
|
||||
connect($s, $target) or die $!;
|
||||
my $local_addr = getsockname($s) or die $!;
|
||||
my (undef, $ip) = sockaddr_in($local_addr);
|
||||
print "IP: ", inet_ntoa($ip), "\n";
|
||||
'
|
||||
```
|
||||
|
||||
```
|
||||
IP=`ifconfig eth0 | grep inet | grep -v inet6 | awk '{print $2}'`
|
||||
echo $IP
|
||||
```
|
||||
* If you don't
|
||||
|
||||
* Try `ip route get 8.8.8.8` on Linux
|
||||
* Try `ifconfig`/`ipconfig` (and pick the ip address that feels right -- look for `192.168.x.x` or `10.x.x.x` addresses)
|
||||
|
||||
Note that `8.8.8.8` is Google's Public DNS server, in most places it's likely to be accessible and thus is a good proxy for "which outbound address would my computer use", but you can replace it with a different IP address if necessary.
|
||||
|
||||
Keep in mind that this IP is dynamically assigned by the router so if your router restarts for any reason, your IP might change.
|
||||
|
||||
|
||||
30
docs/faq.md
30
docs/faq.md
@@ -441,3 +441,33 @@ If you can avoid using these features, you can avoid triggering the error. The o
|
||||
Excluding mutation webhooks from the diff could cause undesired diffing behavior.
|
||||
3. **Disable mutation webhooks when using server-side diff**: see [server-side diff docs](user-guide/diff-strategies.md#mutation-webhooks)
|
||||
for details about that feature. Disabling mutation webhooks may have undesired effects on sync behavior.
|
||||
|
||||
### How do I fix `grpc: error while marshaling: string field contains invalid UTF-8`?
|
||||
|
||||
On Kubernetes v1.34.x clusters, Argo CD components may stop working and pods may
|
||||
fail to start with errors such as:
|
||||
|
||||
```
|
||||
Error: grpc: error while marshaling: string field contains invalid UTF-8
|
||||
```
|
||||
This issue typically affects pods that reference Kubernetes secrets via environment variables, e.g.
|
||||
```yaml
|
||||
env:
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: argocd-redis
|
||||
key: auth
|
||||
optional: false
|
||||
```
|
||||
Kubernetes environment variables must be valid UTF-8 strings. In affected clusters, the argocd-redis Secret contained non-UTF-8 (binary) data, while other clusters used
|
||||
ASCII-only values.
|
||||
|
||||
#### How do I fix the issue?
|
||||
Inspect the decoded Redis password
|
||||
```bash
|
||||
kubectl get -n argocd secret argocd-redis -o json \
|
||||
| jq -r '.data.auth' | base64 --decode | xxd
|
||||
```
|
||||
If the output contains non-printable characters or bytes outside the UTF-8 range, the Secret is invalid for use as an
|
||||
environment variable. It is recommended to regenerate the secret using a UTF-8-safe password.
|
||||
@@ -80,7 +80,7 @@ For additional details, see [architecture overview](operator-manual/architecture
|
||||
* CLI for automation and CI integration
|
||||
* Webhook integration (GitHub, BitBucket, GitLab)
|
||||
* Access tokens for automation
|
||||
* PreSync, Sync, PostSync hooks to support complex application rollouts (e.g.blue/green & canary upgrades)
|
||||
* PreSync, Sync, PostSync hooks to support complex application rollouts (e.g. blue/green & canary upgrades)
|
||||
* Audit trails for application events and API calls
|
||||
* Prometheus metrics
|
||||
* Parameter overrides for overriding helm parameters in Git
|
||||
|
||||
@@ -217,7 +217,7 @@ If you want to access a private repository, you must also provide the credential
|
||||
In case of Bitbucket App Token, go with `bearerToken` section.
|
||||
* `tokenRef`: A `Secret` name and key containing the app token to use for requests.
|
||||
|
||||
In case self-signed BitBucket Server certificates, the following options can be usefully:
|
||||
In case of self-signed BitBucket Server certificates, the following options can be useful:
|
||||
* `insecure`: By default (false) - Skip checking the validity of the SCM's certificate - useful for self-signed TLS certificates.
|
||||
* `caRef`: Optional `ConfigMap` name and key containing the BitBucket server certificates to trust - useful for self-signed TLS certificates. Possibly reference the ArgoCD CM holding the trusted certs.
|
||||
|
||||
|
||||
@@ -221,7 +221,7 @@ If you want to access a private repository, you must also provide the credential
|
||||
In case of Bitbucket App Token, go with `bearerToken` section.
|
||||
* `tokenRef`: A `Secret` name and key containing the app token to use for requests.
|
||||
|
||||
In case self-signed BitBucket Server certificates, the following options can be usefully:
|
||||
In case of self-signed BitBucket Server certificates, the following options can be useful:
|
||||
* `insecure`: By default (false) - Skip checking the validity of the SCM's certificate - useful for self-signed TLS certificates.
|
||||
* `caRef`: Optional `ConfigMap` name and key containing the BitBucket server certificates to trust - useful for self-signed TLS certificates. Possibly reference the ArgoCD CM holding the trusted certs.
|
||||
|
||||
|
||||
@@ -155,7 +155,7 @@ In this example, when applications are deleted:
|
||||
|
||||
This deletion order is useful for scenarios where you need to tear down dependent services in the correct sequence, such as deleting frontend services before backend dependencies.
|
||||
|
||||
#### Example
|
||||
### Example
|
||||
|
||||
The following example illustrates how to stage a progressive sync over Applications with explicitly configured environment labels.
|
||||
|
||||
|
||||
@@ -56,9 +56,12 @@ spec:
|
||||
path: guestbook
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps
|
||||
targetRevision: HEAD
|
||||
```
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
```
|
||||
|
||||
The sync policy to automated + prune, so that child apps are automatically created, synced, and deleted when the manifest is changed, but you may wish to disable this. I've also added the finalizer, which will ensure that your apps are deleted correctly.
|
||||
This example sets the sync policy to automated with pruning enabled, so child apps are automatically created, synced, and deleted when the parent app's manifest changes. You may wish to disable automated sync for more control over when changes are applied. The finalizer ensures that child app resources are properly cleaned up on deletion.
|
||||
|
||||
Fix the revision to a specific Git commit SHA to make sure that, even if the child apps repo changes, the app will only change when the parent app change that revision. Alternatively, you can set it to HEAD or a branch name.
|
||||
|
||||
|
||||
@@ -504,7 +504,7 @@ stringData:
|
||||
username: my-username
|
||||
```
|
||||
|
||||
A note on noProxy: Argo CD uses exec to interact with different tools such as helm and kustomize. Not all of these tools support the same noProxy syntax as the [httpproxy go package](https://cs.opensource.google/go/x/net/+/internal-branch.go1.21-vendor:http/httpproxy/proxy.go;l=38-50) does. In case you run in trouble with noProxy not beeing respected you might want to try using the full domain instead of a wildcard pattern or IP range to find a common syntax that all tools support.
|
||||
A note on noProxy: Argo CD uses exec to interact with different tools such as helm and kustomize. Not all of these tools support the same noProxy syntax as the [httpproxy go package](https://cs.opensource.google/go/x/net/+/internal-branch.go1.21-vendor:http/httpproxy/proxy.go;l=38-50) does. In case you run in trouble with noProxy not being respected you might want to try using the full domain instead of a wildcard pattern or IP range to find a common syntax that all tools support.
|
||||
|
||||
## Clusters
|
||||
|
||||
@@ -630,7 +630,7 @@ This setup requires:
|
||||
3. A role created for each cluster being added to Argo CD that is assumable by the Argo CD management role
|
||||
4. An [Access Entry](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html) within each EKS cluster added to Argo CD that gives the cluster's role (from point 3) RBAC permissions
|
||||
to perform actions within the cluster
|
||||
- Or, alternatively, an entry within the `aws-auth` ConfigMap within the cluster added to Argo CD ([depreciated by EKS](https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html))
|
||||
- Or, alternatively, an entry within the `aws-auth` ConfigMap within the cluster added to Argo CD ([deprecated by EKS](https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html))
|
||||
|
||||
#### Argo CD Management Role
|
||||
|
||||
@@ -880,7 +880,7 @@ associated EKS cluster.
|
||||
|
||||
**AWS Auth (Deprecated)**
|
||||
|
||||
Instead of using Access Entries, you may need to use the depreciated `aws-auth`.
|
||||
Instead of using Access Entries, you may need to use the deprecated `aws-auth`.
|
||||
|
||||
If so, the `roleARN` of each managed cluster needs to be added to each respective cluster's `aws-auth` config map (see
|
||||
[Enabling IAM principal access to your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)), as
|
||||
@@ -1368,7 +1368,7 @@ data:
|
||||
|
||||
## Resource Custom Labels
|
||||
|
||||
Custom Labels configured with `resource.customLabels` (comma separated string) will be displayed in the UI (for any resource that defines them).
|
||||
Custom Labels configured with `resource.customLabels` (comma separated string) will be displayed in the UI (for any resource that defines them). Note that this requires a restart to the Argo CD Application Controller to take effect.
|
||||
|
||||
## Labels on Application Events
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ There are several ways how Ingress can be configured.
|
||||
|
||||
The Ambassador Edge Stack can be used as a Kubernetes ingress controller with [automatic TLS termination](https://www.getambassador.io/docs/latest/topics/running/tls/#host) and routing capabilities for both the CLI and the UI.
|
||||
|
||||
The API server should be run with TLS disabled. Edit the `argocd-server` deployment to add the `--insecure` flag to the argocd-server command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). Given the `argocd` CLI includes the port number in the request `host` header, 2 Mappings are required.
|
||||
The API server should be run with TLS disabled. Edit the `argocd-server` deployment to add the `--insecure` flag to the argocd-server command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). Given the `argocd` CLI includes the port number in the request `host` header, 2 Mappings are required.
|
||||
Note: Disabling TLS in not required if you are using grpc-web
|
||||
|
||||
### Option 1: Mapping CRD for Host-based Routing
|
||||
@@ -415,7 +415,7 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
alb.ingress.kubernetes.io/backend-protocol-version: GRPC # This tells AWS to send traffic from the ALB using GRPC. Plain HTTP2 can be used, but the health checks wont be available because argo currently downgrade non-grpc calls to HTTP1
|
||||
alb.ingress.kubernetes.io/backend-protocol-version: GRPC # This tells AWS to send traffic from the ALB using GRPC. Plain HTTP2 can be used, but the health checks won't be available because argo currently downgrades non-grpc calls to HTTP1
|
||||
labels:
|
||||
app: argogrpc
|
||||
name: argogrpc
|
||||
@@ -494,7 +494,7 @@ resources:
|
||||
|
||||
patches:
|
||||
- path: ./patch.yml
|
||||
```
|
||||
```
|
||||
|
||||
And following lines as patch.yml
|
||||
|
||||
@@ -878,15 +878,15 @@ http {
|
||||
}
|
||||
```
|
||||
|
||||
## Cilium Gateway API Example
|
||||
## Gateway API Example
|
||||
|
||||
This section provides a working example of using Cilium Gateway API with Argo CD, including HTTP and gRPC routes.
|
||||
This section discusses using Gateway API to expose the Argo CD server in various TLS configurations,
|
||||
accomodating both HTTP and gRPC traffic, possibly using HTTP/2.
|
||||
|
||||
### Prerequisites
|
||||
### TLS termination at the Gateway
|
||||
|
||||
- API server run with TLS disabled (set `server.insecure: "true"` in argocd-cmd-params-cm ConfigMap)
|
||||
|
||||
### Gateway Example
|
||||
Assume the following cluster-wide `Gateway` resource,
|
||||
that terminates the TLS conection with a certificate stored in a `Secret` in the same namespace:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
@@ -894,17 +894,12 @@ kind: Gateway
|
||||
metadata:
|
||||
name: cluster-gateway
|
||||
namespace: gateway
|
||||
annotations:
|
||||
cert-manager.io/issuer: cloudflare-dns-issuer
|
||||
spec:
|
||||
gatewayClassName: cilium
|
||||
addresses:
|
||||
- type: IPAddress
|
||||
value: "192.168.0.130"
|
||||
gatewayClassName: example
|
||||
listeners:
|
||||
- protocol: HTTPS
|
||||
port: 443
|
||||
name: https-cluster
|
||||
name: https
|
||||
hostname: "*.local.example.com"
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
@@ -917,7 +912,38 @@ spec:
|
||||
group: ""
|
||||
```
|
||||
|
||||
### HTTPRoute Example
|
||||
To automate certificate management, `cert-manager` supports [gateway annotations](https://cert-manager.io/docs/usage/gateway/).
|
||||
|
||||
#### Securing traffic between Argo CD and the gateway
|
||||
|
||||
If your security requirements allow it, the Argo CD API server can be run with TLS disabled: pass the `--insecure` flag to the `argocd-server` command,
|
||||
or set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md).
|
||||
|
||||
It is also possible to keep TLS enabled, encrypting traffic between the gateway and the Argo CD API server, by using a [BackendTLSPolicy](https://gateway-api.sigs.k8s.io/api-types/backendtlspolicy/).
|
||||
Consult the [Upstream TLS](https://gateway-api.sigs.k8s.io/guides/tls/#upstream-tls) documentation for more details.
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: BackendTLSPolicy
|
||||
metadata:
|
||||
name: tls-upstream-auth
|
||||
namespace: argocd
|
||||
spec:
|
||||
targetRefs:
|
||||
- kind: Service
|
||||
name: argocd-server
|
||||
group: ""
|
||||
validation:
|
||||
caCertificateRefs:
|
||||
- kind: ConfigMap
|
||||
name: argocd-server-ca-cert
|
||||
group: ""
|
||||
hostname: argocd-server.argocd.svc.cluster.local
|
||||
```
|
||||
|
||||
|
||||
#### Routing HTTP requests
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
@@ -928,6 +954,7 @@ spec:
|
||||
parentRefs:
|
||||
- name: cluster-gateway
|
||||
namespace: gateway
|
||||
sectionName: https
|
||||
hostnames:
|
||||
- "argocd.local.example.com"
|
||||
rules:
|
||||
@@ -940,7 +967,45 @@ spec:
|
||||
value: /
|
||||
```
|
||||
|
||||
### GRPCRoute Example
|
||||
#### Routing gRPC requests
|
||||
|
||||
The `argocd` CLI operates at full capability when using gRPC over HTTP/2 to communicate with the API server, falling back to HTTP/1.1. (`--grpc-web` flag).
|
||||
|
||||
gRPC can be configured using a `GRPCRoute`, and HTTP/2 requested as the application protocol on the `argocd-server` service:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: GRPCRoute
|
||||
metadata:
|
||||
name: argocd-grpc-route
|
||||
namespace: argocd
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: cluster-gateway
|
||||
namespace: gateway
|
||||
sectionName: https
|
||||
hostnames:
|
||||
- "grpc.argocd.local.example.com"
|
||||
rules:
|
||||
- backendRefs:
|
||||
- name: argocd-server
|
||||
port: 443
|
||||
```
|
||||
|
||||
And in Argo CD's `values.yaml` (or [directly](https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol) in the service manifest):
|
||||
```
|
||||
server:
|
||||
service:
|
||||
# Enable gRPC over HTTP/2
|
||||
servicePortHttpsAppProtocol: kubernetes.io/h2c
|
||||
```
|
||||
|
||||
##### Routing gRPC and HTTP on through the same domain
|
||||
|
||||
Although officially [discouraged](https://gateway-api.sigs.k8s.io/api-types/grpcroute/#cross-serving),
|
||||
attaching the `HTTPRoute` and `GRPCRoute` to the same domain may be supported by some implementations.
|
||||
Matching requests headers become necessary to disambiguate the destination, as shown below:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: GRPCRoute
|
||||
@@ -952,7 +1017,7 @@ spec:
|
||||
- name: cluster-gateway
|
||||
namespace: gateway
|
||||
hostnames:
|
||||
- "argocd.local.example.com"
|
||||
- "grpc.argocd.local.example.com"
|
||||
rules:
|
||||
- backendRefs:
|
||||
- name: argocd-server
|
||||
@@ -962,4 +1027,57 @@ spec:
|
||||
- name: Content-Type
|
||||
type: RegularExpression
|
||||
value: "^application/grpc.*$"
|
||||
```
|
||||
```
|
||||
|
||||
### TLS passthrough
|
||||
|
||||
TLS can also be configured to terminate at the Argo CD API server.
|
||||
|
||||
This require attaching a `TLSRoute` to the gateway,
|
||||
which is part of the [Experimental](https://gateway-api.sigs.k8s.io/reference/1.4/specx/) Gateway API CRDs.
|
||||
|
||||
```yaml
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: cluster-gateway
|
||||
namespace: gateway
|
||||
spec:
|
||||
gatewayClassName: example
|
||||
listeners:
|
||||
- name: tls
|
||||
port: 443
|
||||
protocol: TLS
|
||||
hostname: "argocd.example.com"
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: All
|
||||
kinds:
|
||||
- kind: TLSRoute
|
||||
tls:
|
||||
mode: Passthrough
|
||||
```
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
kind: TLSRoute
|
||||
metadata:
|
||||
namespace: argocd
|
||||
name: argocd-server-tlsroute
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: cluster-gateway
|
||||
namespace: gateway
|
||||
sectionName: tls
|
||||
hostnames:
|
||||
- argocd.example.com
|
||||
rules:
|
||||
- backendRefs:
|
||||
- name: argocd-server
|
||||
port: 443
|
||||
```
|
||||
|
||||
The TLS certificates are implicit here,
|
||||
and found by the Argo CD server in the `argocd-server-tls` secret.
|
||||
|
||||
Note that `cert-manager` does not support generating certificates for passthrough gateway listeners.
|
||||
|
||||
@@ -264,7 +264,7 @@ Scraped at the `argocd-repo-server:8084/metrics` endpoint.
|
||||
| `argocd_redis_request_total` | counter | Number of Kubernetes requests executed during application reconciliation. |
|
||||
| `argocd_repo_pending_request_total` | gauge | Number of pending requests requiring repository lock |
|
||||
| `argocd_oci_request_total` | counter | Number of OCI requests performed by repo server |
|
||||
| `argocd_oci_request_duration_seconds` | histogram | Number of OCI fetch requests failures by repo server |
|
||||
| `argocd_oci_request_duration_seconds` | histogram | Duration of OCI requests performed by the repo server. |
|
||||
| `argocd_oci_test_repo_fail_total` | counter | Number of OCI test repo requests failures by repo server |
|
||||
| `argocd_oci_get_tags_fail_total` | counter | Number of OCI get tags requests failures by repo server |
|
||||
| `argocd_oci_digest_metadata_fail_total` | counter | Number of OCI digest metadata failures by repo server |
|
||||
|
||||
@@ -47,7 +47,8 @@ metadata:
|
||||
* [Grafana](./grafana.md)
|
||||
* [Webhook](./webhook.md)
|
||||
* [Telegram](./telegram.md)
|
||||
* [Teams](./teams.md)
|
||||
* [Teams (Office 365 Connectors)](./teams.md) - Legacy service (deprecated, retires March 31, 2026)
|
||||
* [Teams Workflows](./teams-workflows.md) - Recommended replacement for Office 365 Connectors
|
||||
* [Google Chat](./googlechat.md)
|
||||
* [Rocket.Chat](./rocketchat.md)
|
||||
* [Pushover](./pushover.md)
|
||||
|
||||
370
docs/operator-manual/notifications/services/teams-workflows.md
Executable file
370
docs/operator-manual/notifications/services/teams-workflows.md
Executable file
@@ -0,0 +1,370 @@
|
||||
# Teams Workflows
|
||||
|
||||
## Overview
|
||||
|
||||
The Teams Workflows notification service sends message notifications using Microsoft Teams Workflows (Power Automate). This is the recommended replacement for the legacy Office 365 Connectors service, which will be retired on March 31, 2026.
|
||||
|
||||
## Parameters
|
||||
|
||||
The Teams Workflows notification service requires specifying the following settings:
|
||||
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://api.powerautomate.com/webhook/...`
|
||||
|
||||
## Supported Webhook URL Formats
|
||||
|
||||
The service supports the following Microsoft Teams Workflows webhook URL patterns:
|
||||
|
||||
- `https://api.powerautomate.com/...`
|
||||
- `https://api.powerplatform.com/...`
|
||||
- `https://flow.microsoft.com/...`
|
||||
- URLs containing `/powerautomate/` in the path
|
||||
|
||||
## Configuration
|
||||
|
||||
1. Open `Teams` and go to the channel you wish to set notifications for
|
||||
2. Click on the 3 dots next to the channel name
|
||||
3. Select`Workflows`
|
||||
4. Click on `Manage`
|
||||
5. Click `New flow`
|
||||
6. Write `Send webhook alerts to a channel` in the search bar or select it from the template list
|
||||
7. Choose your team and channel
|
||||
8. Configure the webhook name and settings
|
||||
9. Copy the webhook URL (it will be from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`)
|
||||
10. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
data:
|
||||
service.teams-workflows: |
|
||||
recipientUrls:
|
||||
channelName: $channel-workflows-url
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
channel-workflows-url: https://api.powerautomate.com/webhook/your-webhook-id
|
||||
```
|
||||
|
||||
11. Create subscription for your Teams Workflows integration:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
|
||||
```
|
||||
|
||||
## Channel Support
|
||||
|
||||
- ✅ Standard Teams channels
|
||||
- ✅ Shared channels (as of December 2025)
|
||||
- ✅ Private channels (as of December 2025)
|
||||
|
||||
Teams Workflows provides enhanced channel support compared to Office 365 Connectors, allowing you to post to shared and private channels in addition to standard channels.
|
||||
|
||||
## Adaptive Card Format
|
||||
|
||||
The Teams Workflows service uses **Adaptive Cards** exclusively, which is the modern, flexible card format for Microsoft Teams. All notifications are automatically converted to Adaptive Card format and wrapped in the required message envelope.
|
||||
|
||||
### Option 1: Using Template Fields (Recommended)
|
||||
|
||||
The service automatically converts template fields to Adaptive Card format. This is the simplest and most maintainable approach:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams-workflows:
|
||||
# ThemeColor supports Adaptive Card semantic colors: "Good", "Warning", "Attention", "Accent"
|
||||
# or hex colors like "#000080"
|
||||
themeColor: "Good"
|
||||
title: Application {{.app.metadata.name}} has been successfully synced
|
||||
text: Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.
|
||||
summary: "{{.app.metadata.name}} sync succeeded"
|
||||
facts: |
|
||||
[{
|
||||
"name": "Sync Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
}, {
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}]
|
||||
sections: |
|
||||
[{
|
||||
"facts": [
|
||||
{
|
||||
"name": "Namespace",
|
||||
"value": "{{.app.metadata.namespace}}"
|
||||
},
|
||||
{
|
||||
"name": "Cluster",
|
||||
"value": "{{.app.spec.destination.server}}"
|
||||
}
|
||||
]
|
||||
}]
|
||||
potentialAction: |-
|
||||
[{
|
||||
"@type": "OpenUri",
|
||||
"name": "View in Argo CD",
|
||||
"targets": [{
|
||||
"os": "default",
|
||||
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
- `title` → Converted to a large, bold TextBlock
|
||||
- `text` → Converted to a regular TextBlock
|
||||
- `facts` → Converted to a FactSet element
|
||||
- `sections` → Facts within sections are extracted and converted to FactSet elements
|
||||
- `potentialAction` → OpenUri actions are converted to Action.OpenUrl
|
||||
- `themeColor` → Applied to the title TextBlock (supports semantic colors like "Good", "Warning", "Attention", "Accent" or hex colors)
|
||||
|
||||
### Option 2: Custom Adaptive Card JSON
|
||||
|
||||
For full control and advanced features, you can provide a complete Adaptive Card JSON template:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams-workflows:
|
||||
adaptiveCard: |
|
||||
{
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.4",
|
||||
"body": [
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}} synced successfully",
|
||||
"size": "Large",
|
||||
"weight": "Bolder",
|
||||
"color": "Good"
|
||||
},
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.",
|
||||
"wrap": true
|
||||
},
|
||||
{
|
||||
"type": "FactSet",
|
||||
"facts": [
|
||||
{
|
||||
"title": "Sync Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"type": "Action.OpenUrl",
|
||||
"title": "View in Argo CD",
|
||||
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** When using `adaptiveCard`, you only need to provide the AdaptiveCard JSON structure (not the full message envelope). The service automatically wraps it in the required `message` + `attachments` format for Teams Workflows.
|
||||
|
||||
**Important:** If you provide `adaptiveCard`, it takes precedence over all other template fields (`title`, `text`, `facts`, etc.).
|
||||
|
||||
## Template Fields
|
||||
|
||||
The Teams Workflows service supports the following template fields, which are automatically converted to Adaptive Card format:
|
||||
|
||||
### Standard Fields
|
||||
|
||||
- `title` - Message title (converted to large, bold TextBlock)
|
||||
- `text` - Message text content (converted to TextBlock)
|
||||
- `summary` - Summary text (currently not used in Adaptive Cards, but preserved for compatibility)
|
||||
- `themeColor` - Color for the title. Supports:
|
||||
- Semantic colors: `"Good"` (green), `"Warning"` (yellow), `"Attention"` (red), `"Accent"` (blue)
|
||||
- Hex colors: `"#000080"`, `"#FF0000"`, etc.
|
||||
- `facts` - JSON array of fact key-value pairs (converted to FactSet)
|
||||
```yaml
|
||||
facts: |
|
||||
[{
|
||||
"name": "Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
}]
|
||||
```
|
||||
- `sections` - JSON array of sections containing facts (facts are extracted and converted to FactSet)
|
||||
```yaml
|
||||
sections: |
|
||||
[{
|
||||
"facts": [{
|
||||
"name": "Namespace",
|
||||
"value": "{{.app.metadata.namespace}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
- `potentialAction` - JSON array of action buttons (OpenUri actions converted to Action.OpenUrl)
|
||||
```yaml
|
||||
potentialAction: |-
|
||||
[{
|
||||
"@type": "OpenUri",
|
||||
"name": "View Details",
|
||||
"targets": [{
|
||||
"os": "default",
|
||||
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
|
||||
### Advanced Fields
|
||||
|
||||
- `adaptiveCard` - Complete Adaptive Card JSON template (takes precedence over all other fields)
|
||||
- Only provide the AdaptiveCard structure, not the message envelope
|
||||
- Supports full Adaptive Card 1.4 specification
|
||||
- Allows access to all Adaptive Card features (containers, columns, images, etc.)
|
||||
|
||||
- `template` - Raw JSON template (legacy, use `adaptiveCard` instead)
|
||||
|
||||
### Field Conversion Details
|
||||
|
||||
| Template Field | Adaptive Card Element | Notes |
|
||||
|---------------|----------------------|-------|
|
||||
| `title` | `TextBlock` with `size: "Large"`, `weight: "Bolder"` | ThemeColor applied to this element |
|
||||
| `text` | `TextBlock` with `wrap: true` | Uses `n.Message` if `text` is empty |
|
||||
| `facts` | `FactSet` | Each fact becomes a `title`/`value` pair |
|
||||
| `sections[].facts` | `FactSet` | Facts extracted from sections |
|
||||
| `potentialAction[OpenUri]` | `Action.OpenUrl` | Only OpenUri actions are converted |
|
||||
| `themeColor` | Applied to title `TextBlock.color` | Supports semantic and hex colors |
|
||||
|
||||
## Migration from Office 365 Connectors
|
||||
|
||||
If you're currently using the `teams` service with Office 365 Connectors, follow these steps to migrate:
|
||||
|
||||
1. **Create a new Workflows webhook** using the configuration steps above
|
||||
|
||||
2. **Update your service configuration:**
|
||||
- Change from `service.teams` to `service.teams-workflows`
|
||||
- Update the webhook URL to your new Workflows webhook URL
|
||||
|
||||
3. **Update your templates:**
|
||||
- Change `teams:` to `teams-workflows:` in your templates
|
||||
- Your existing template fields (`title`, `text`, `facts`, `sections`, `potentialAction`) will automatically be converted to Adaptive Card format
|
||||
- No changes needed to your template structure - the conversion is automatic
|
||||
|
||||
4. **Update your subscriptions:**
|
||||
```yaml
|
||||
# Old
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
|
||||
|
||||
# New
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
|
||||
```
|
||||
|
||||
5. **Test and verify:**
|
||||
- Send a test notification to verify it works correctly
|
||||
- Once verified, you can remove the old Office 365 Connector configuration
|
||||
|
||||
**Note:** Your existing templates will work without modification. The service automatically converts your template fields to Adaptive Card format, so you get the benefits of modern cards without changing your templates.
|
||||
|
||||
## Differences from Office 365 Connectors
|
||||
|
||||
| Feature | Office 365 Connectors | Teams Workflows |
|
||||
|---------|----------------------|-----------------|
|
||||
| Service Name | `teams` | `teams-workflows` |
|
||||
| Standard Channels | ✅ | ✅ |
|
||||
| Shared Channels | ❌ | ✅ (Dec 2025+) |
|
||||
| Private Channels | ❌ | ✅ (Dec 2025+) |
|
||||
| Card Format | messageCard (legacy) | Adaptive Cards (modern) |
|
||||
| Template Conversion | N/A | Automatic conversion from template fields |
|
||||
| Retirement Date | March 31, 2026 | Active |
|
||||
|
||||
## Adaptive Card Features
|
||||
|
||||
The Teams Workflows service leverages Adaptive Cards, which provide:
|
||||
|
||||
- **Rich Content**: Support for text, images, fact sets, and more
|
||||
- **Flexible Layout**: Containers, columns, and adaptive layouts
|
||||
- **Interactive Elements**: Action buttons, input fields, and more
|
||||
- **Semantic Colors**: Built-in color schemes (Good, Warning, Attention, Accent)
|
||||
- **Cross-Platform**: Works across Teams, Outlook, and other Microsoft 365 apps
|
||||
|
||||
### Example: Advanced Adaptive Card Template
|
||||
|
||||
For complex notifications, you can use the full Adaptive Card specification:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded-advanced: |
|
||||
teams-workflows:
|
||||
adaptiveCard: |
|
||||
{
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.4",
|
||||
"body": [
|
||||
{
|
||||
"type": "Container",
|
||||
"items": [
|
||||
{
|
||||
"type": "ColumnSet",
|
||||
"columns": [
|
||||
{
|
||||
"type": "Column",
|
||||
"width": "auto",
|
||||
"items": [
|
||||
{
|
||||
"type": "Image",
|
||||
"url": "https://example.com/success-icon.png",
|
||||
"size": "Small"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "Column",
|
||||
"width": "stretch",
|
||||
"items": [
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}}",
|
||||
"weight": "Bolder",
|
||||
"size": "Large"
|
||||
},
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Successfully synced",
|
||||
"spacing": "None",
|
||||
"isSubtle": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "FactSet",
|
||||
"facts": [
|
||||
{
|
||||
"title": "Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"type": "Action.OpenUrl",
|
||||
"title": "View in Argo CD",
|
||||
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -1,18 +1,46 @@
|
||||
# Teams
|
||||
# Teams (Office 365 Connectors)
|
||||
|
||||
## ⚠️ Deprecation Notice
|
||||
|
||||
**Office 365 Connectors are being retired by Microsoft.**
|
||||
|
||||
Microsoft is retiring the Office 365 Connectors service in Teams. The service will be fully retired by **March 31, 2026** (extended from the original timeline of December 2025).
|
||||
|
||||
### What this means:
|
||||
- **Old Office 365 Connectors** (webhook URLs from `webhook.office.com`) will stop working after the retirement date
|
||||
- **New Power Automate Workflows** (webhook URLs from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`) are the recommended replacement
|
||||
|
||||
### Migration Required:
|
||||
If you are currently using Office 365 Connectors (Incoming Webhook), you should migrate to Power Automate Workflows before the retirement date. The notifications-engine automatically detects the webhook type and handles both formats, but you should plan your migration.
|
||||
|
||||
**Migration Resources:**
|
||||
- [Microsoft Deprecation Notice](https://devblogs.microsoft.com/microsoft365dev/retirement-of-office-365-connectors-within-microsoft-teams/)
|
||||
- [Create incoming webhooks with Workflows for Microsoft Teams](https://support.microsoft.com/en-us/office/create-incoming-webhooks-with-workflows-for-microsoft-teams-4b3b0b0e-0b5a-4b5a-9b5a-0b5a-4b5a-9b5a)
|
||||
|
||||
---
|
||||
|
||||
## Parameters
|
||||
|
||||
The Teams notification service send message notifications using Teams bot and requires specifying the following settings:
|
||||
The Teams notification service sends message notifications using Office 365 Connectors and requires specifying the following settings:
|
||||
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://example.com`
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://outlook.office.com/webhook/...`
|
||||
|
||||
> **⚠️ Deprecation Notice:** Office 365 Connectors will be retired by Microsoft on **March 31, 2026**. We recommend migrating to the [Teams Workflows service](./teams-workflows.md) for continued support and enhanced features.
|
||||
|
||||
## Configuration
|
||||
|
||||
> **💡 For Power Automate Workflows (Recommended):** See the [Teams Workflows documentation](./teams-workflows.md) for detailed configuration instructions.
|
||||
|
||||
### Office 365 Connectors (Deprecated - Retiring March 31, 2026)
|
||||
|
||||
> **⚠️ Warning:** This method is deprecated and will stop working after March 31, 2026. Please migrate to Power Automate Workflows.
|
||||
|
||||
1. Open `Teams` and goto `Apps`
|
||||
2. Find `Incoming Webhook` microsoft app and click on it
|
||||
3. Press `Add to a team` -> select team and channel -> press `Set up a connector`
|
||||
4. Enter webhook name and upload image (optional)
|
||||
5. Press `Create` then copy webhook url and store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
5. Press `Create` then copy webhook url (it will be from `webhook.office.com`)
|
||||
6. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -31,10 +59,20 @@ kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
channel-teams-url: https://example.com
|
||||
channel-teams-url: https://webhook.office.com/webhook/your-webhook-id # Office 365 Connector (deprecated)
|
||||
```
|
||||
|
||||
6. Create subscription for your Teams integration:
|
||||
> **Note:** For Power Automate Workflows webhooks, use the [Teams Workflows service](./teams-workflows.md) instead.
|
||||
|
||||
### Webhook Type Detection
|
||||
|
||||
The `teams` service supports Office 365 Connectors (deprecated):
|
||||
|
||||
- **Office 365 Connectors**: URLs from `webhook.office.com` (deprecated)
|
||||
- Requires response body to be exactly `"1"` for success
|
||||
- Will stop working after March 31, 2026
|
||||
|
||||
7. Create subscription for your Teams integration:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -44,12 +82,20 @@ metadata:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
|
||||
```
|
||||
|
||||
## Channel Support
|
||||
|
||||
- ✅ Standard Teams channels only
|
||||
|
||||
> **Note:** Office 365 Connectors only support standard Teams channels. For shared channels or private channels, use the [Teams Workflows service](./teams-workflows.md).
|
||||
|
||||
## Templates
|
||||
|
||||

|
||||
|
||||
[Notification templates](../templates.md) can be customized to leverage teams message sections, facts, themeColor, summary and potentialAction [feature](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using).
|
||||
|
||||
The Teams service uses the **messageCard** format (MessageCard schema) which is compatible with Office 365 Connectors.
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams:
|
||||
@@ -124,3 +170,7 @@ template.app-sync-succeeded: |
|
||||
teams:
|
||||
summary: "Sync Succeeded"
|
||||
```
|
||||
|
||||
## Migration to Teams Workflows
|
||||
|
||||
If you're currently using Office 365 Connectors, see the [Teams Workflows documentation](./teams-workflows.md) for migration instructions and enhanced features.
|
||||
|
||||
@@ -126,7 +126,7 @@ data:
|
||||
|
||||
## Ignoring updates for untracked resources
|
||||
|
||||
ArgoCD will only apply `ignoreResourceUpdates` configuration to tracked resources of an application. This means dependant resources, such as a `ReplicaSet` and `Pod` created by a `Deployment`, will not ignore any updates and trigger a reconcile of the application for any changes.
|
||||
ArgoCD will only apply `ignoreResourceUpdates` configuration to tracked resources of an application. This means dependent resources, such as a `ReplicaSet` and `Pod` created by a `Deployment`, will not ignore any updates and trigger a reconcile of the application for any changes.
|
||||
|
||||
If you want to apply the `ignoreResourceUpdates` configuration to an untracked resource, you can add the
|
||||
`argocd.argoproj.io/ignore-resource-updates=true` annotation in the dependent resources manifest.
|
||||
|
||||
46
docs/operator-manual/templates/minor_version_upgrade.md
Normal file
46
docs/operator-manual/templates/minor_version_upgrade.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# vX.Y to X.Z
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
<!-- What changed that could affect your setup:
|
||||
Deprecated flags / behavior removals
|
||||
Deprecated fields
|
||||
Other incompatible behavior or semantics changes between versions
|
||||
(Add list here) -->
|
||||
|
||||
## Behavioral Improvements / Fixes
|
||||
|
||||
<!-- Operational stability and performance fixes:
|
||||
Minor stability, reconciler improvements, bug fixes -->
|
||||
|
||||
## API Changes
|
||||
|
||||
<!-- Changes in API surface:
|
||||
Any removal of fields or changed API defaults -->
|
||||
|
||||
## Security Changes
|
||||
|
||||
<!-- Changes in security behavior:
|
||||
Any removal of sensitive fields or changed authentication defaults -->
|
||||
|
||||
## Deprecated Items
|
||||
|
||||
<!-- Flags / behavior to be removed in future releases
|
||||
Any deprecated config API or CLI flags -->
|
||||
|
||||
## Kustomize Upgraded
|
||||
|
||||
<!-- Bundled Kustomize version bump:
|
||||
Any breaking behavior
|
||||
Notes about specific upstream behavior changes (e.g., namespace propagation fixes). -->
|
||||
|
||||
## Helm Upgraded
|
||||
|
||||
<!-- Bundled Helm version bump:
|
||||
Any breaking behavior
|
||||
Verify if your charts depend on any features tied to Helm versions. -->
|
||||
|
||||
## Custom Healthchecks Added
|
||||
|
||||
<!-- New built-in health checks added in this release:
|
||||
Add any new CRD health support added in this version. -->
|
||||
@@ -31,8 +31,10 @@ their own dedicated Certificate Authority.
|
||||
| Connection | Strict TLS Parameter | Plain Text Parameter | Default Behavior |
|
||||
|------------|---------------------|---------------------|------------------|
|
||||
| `argocd-server` → `argocd-repo-server` | `--repo-server-strict-tls` | `--repo-server-plaintext` | Non-validating TLS |
|
||||
| `argocd-application-controller` → `argocd-repo-server` | `--repo-server-strict-tls` | `--repo-server-plaintext` | Non-validating TLS |
|
||||
| `argocd-server` → `argocd-dex-server` | `--dex-server-strict-tls` | `--dex-server-plaintext` | Non-validating TLS |
|
||||
| `argocd-application-controller` → `argocd-repo-server` | `--repo-server-strict-tls` | `--repo-server-plaintext` | Non-validating TLS |
|
||||
| `argocd-applicationset-controller` → `argocd-repo-server` | `--repo-server-strict-tls` | `--repo-server-plaintext` | Non-validating TLS |
|
||||
| `argocd-notifications-controller` → `argocd-repo-server` | `--argocd-repo-server-strict-tls` | `--argocd-repo-server-plaintext` | Non-validating TLS |
|
||||
|
||||
### Certificate Priority (argocd-server only)
|
||||
|
||||
@@ -180,27 +182,28 @@ on how your workloads connect to the repository server.
|
||||
|
||||
### Configuring TLS to argocd-repo-server
|
||||
|
||||
Both `argocd-server` and `argocd-application-controller` communicate with the
|
||||
`argocd-repo-server` using a gRPC API over TLS. By default,
|
||||
`argocd-repo-server` generates a non-persistent, self-signed certificate
|
||||
to use for its gRPC endpoint on startup. Because the `argocd-repo-server` has
|
||||
no means to connect to the K8s control plane API, this certificate is not available
|
||||
to outside consumers for verification. Both,
|
||||
`argocd-server` and `argocd-application-server` will use a non-validating
|
||||
connection to the `argocd-repo-server` for this reason.
|
||||
The componenets `argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
|
||||
and `argocd-applicationset-controller` communicate with the `argocd-repo-server`
|
||||
using a gRPC API over TLS. By default, `argocd-repo-server` generates a non-persistent,
|
||||
self-signed certificate to use for its gRPC endpoint on startup. Because the
|
||||
`argocd-repo-server` has no means to connect to the K8s control plane API, this certificate
|
||||
is not available to outside consumers for verification. These components will use a
|
||||
non-validating connection to the `argocd-repo-server` for this reason.
|
||||
|
||||
To change this behavior to be more secure by having the `argocd-server` and
|
||||
`argocd-application-controller` validate the TLS certificate of the
|
||||
To change this behavior to be more secure by having these componenets validate the TLS certificate of the
|
||||
`argocd-repo-server` endpoint, the following steps need to be performed:
|
||||
|
||||
* Create a persistent TLS certificate to be used by `argocd-repo-server`, as
|
||||
shown above
|
||||
* Restart the `argocd-repo-server` pod(s)
|
||||
* Modify the pod startup parameters for `argocd-server` and
|
||||
`argocd-application-controller` to include the `--repo-server-strict-tls`
|
||||
parameter.
|
||||
* Modify the pod startup parameters for `argocd-server`, `argocd-application-controller`,
|
||||
and `argocd-applicationset-controller` to include the
|
||||
`--repo-server-strict-tls` parameter.
|
||||
* Modify the pod startup parameters for `argocd-notifications-controller` to include the
|
||||
`--argocd-repo-server-strict-tls` parameter
|
||||
|
||||
The `argocd-server` and `argocd-application-controller` workloads will now
|
||||
The `argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
|
||||
and `argocd-applicationset-controller` workloads will now
|
||||
validate the TLS certificate of the `argocd-repo-server` by using the
|
||||
certificate stored in the `argocd-repo-server-tls` secret.
|
||||
|
||||
@@ -245,7 +248,8 @@ secret.
|
||||
|
||||
In some scenarios where mTLS through sidecar proxies is involved (e.g.
|
||||
in a service mesh), you may want to configure the connections between the
|
||||
`argocd-server` and `argocd-application-controller` to `argocd-repo-server`
|
||||
`argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
|
||||
and `argocd-applicationset-controller` to `argocd-repo-server`
|
||||
to not use TLS at all.
|
||||
|
||||
In this case, you will need to:
|
||||
@@ -255,14 +259,18 @@ In this case, you will need to:
|
||||
Also, consider restricting listening addresses to the loopback interface by specifying
|
||||
`--listen 127.0.0.1` parameter, so that the insecure endpoint is not exposed on
|
||||
the pod's network interfaces, but still available to the sidecar container.
|
||||
* Configure `argocd-server` and `argocd-application-controller` to not use TLS
|
||||
* Configure `argocd-server`, `argocd-application-controller`,
|
||||
and `argocd-applicationset-controller` to not use TLS
|
||||
for connections to the `argocd-repo-server` by specifying the parameter
|
||||
`--repo-server-plaintext` to the pod container's startup arguments
|
||||
* Modify the pod startup parameters for `argocd-notifications-controller` to include the
|
||||
`--argocd-repo-server-plaintext` parameter
|
||||
* Configure `argocd-server` and `argocd-application-controller` to connect to
|
||||
the sidecar instead of directly to the `argocd-repo-server` service by
|
||||
specifying its address via the `--repo-server <address>` parameter
|
||||
|
||||
After this change, `argocd-server` and `argocd-application-controller` will
|
||||
After this change, `argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
|
||||
and `argocd-applicationset-controller` will
|
||||
use a plain text connection to the sidecar proxy, which will handle all aspects
|
||||
of TLS to `argocd-repo-server`'s TLS sidecar proxy.
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
### Hydration paths must now be non-root
|
||||
|
||||
Source hydration now requires that every application specify a non-root path.
|
||||
Source hydration (with [Source Hydrator](../../../user-guide/source-hydrator/)) now requires that every application specify a non-root path.
|
||||
Using the repository root (for example, "" or ".") is no longer supported. This change ensures
|
||||
that hydration outputs are isolated to a dedicated subdirectory and prevents accidental overwrites
|
||||
or deletions of important files stored at the root, such as CI pipelines, documentation, or configuration files.
|
||||
|
||||
@@ -14,6 +14,7 @@ The `--force-conflicts` flag allows the apply operation to take ownership of fie
|
||||
|
||||
When Argo CD is upgraded using an Argo CD Application, it is required to enable Server-Side Apply in the Application spec:
|
||||
``` yaml
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- ServerSideApply=true
|
||||
```
|
||||
@@ -89,6 +90,8 @@ removed in a future release.
|
||||
Argo CD v3.3 upgrades the bundled Helm version to 3.19.2. There are no breaking changes in Helm 3.19.2 according to the
|
||||
[release notes](https://github.com/helm/helm/releases/tag/v3.19.0).
|
||||
|
||||
Helm 2.x is no longer supported as we are dropping the `--client` flag in calls to `helm version`.
|
||||
|
||||
## Kustomize Upgraded to 5.8.0
|
||||
|
||||
Argo CD v3.3 upgrades the bundled Kustomize version from v5.7.0 to v5.8.0. According to the
|
||||
@@ -111,4 +114,5 @@ If you rely on Helm charts within kustomization files, please review the details
|
||||
* [keda.sh/ScaledJob](https://github.com/argoproj/argo-cd/commit/e58bdf2f87b5b60a05fde0b7837779061b170c08)
|
||||
* [services.cloud.sap.com/ServiceBinding](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
|
||||
* [services.cloud.sap.com/ServiceInstance](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
|
||||
* [\_.cnrm.cloud.google.com/\_](https://github.com/argoproj/argo-cd/commit/30abebda3d930d93065eec8864aac7e0d56ae119)
|
||||
* [\_.cnrm.cloud.google.com/\_](https://github.com/argoproj/argo-cd/commit/30abebda3d930d93065eec8864aac7e0d56ae119)
|
||||
* [grafana-org-operator.kubitus-project.gitlab.io/\_](https://github.com/argoproj/argo-cd/commit/ac1a2f8536701cc23684632a2d1bc3b20408b1a8)
|
||||
|
||||
19
docs/operator-manual/upgrading/3.3-3.4.md
Normal file
19
docs/operator-manual/upgrading/3.3-3.4.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# v3.3 to 3.4
|
||||
|
||||
## Applications with `Missing` health status
|
||||
|
||||
The behavior of Application health status has changed to be more consistent and informative. Previously, Applications would show `Missing` health status inconsistently depending on whether missing resources had built-in or custom health checks defined.
|
||||
|
||||
**New behavior:**
|
||||
|
||||
- Applications now show `Missing` health **only when ALL resources are missing** (e.g., before the first sync)
|
||||
- Individual missing resources no longer affect the Application's overall health status
|
||||
- The health status now reflects the aggregated health of **existing resources**
|
||||
- The `OutOfSync` status already indicates when resources are missing, making the health status redundant for individual missing resources
|
||||
- If the defined resource health check is explicitly returning `Missing` for an existing resource, that will still be reflected in the overall Application health
|
||||
|
||||
**Impact:**
|
||||
|
||||
- Applications with some missing resources will now show the health of their existing resources (e.g., `Healthy`, `Progressing`, `Degraded`) instead of `Missing`
|
||||
- Automation relying on the Application Health status to detect missing resources should now check the Sync status for `OutOfSync` instead, and optionally inspect individual resource health if needed.
|
||||
- Users can now distinguish between an Application that has never been synced (all resources missing = `Missing` health) vs. an Application with some resources deleted (shows health of remaining resources)
|
||||
@@ -39,6 +39,8 @@ kubectl apply -n argocd --server-side --force-conflicts -f https://raw.githubuse
|
||||
|
||||
<hr/>
|
||||
|
||||
- [v3.3 to v3.4](./3.3-3.4.md)
|
||||
- [v3.2 to v3.3](./3.2-3.3.md)
|
||||
- [v3.1 to v3.2](./3.1-3.2.md)
|
||||
- [v3.0 to v3.1](./3.0-3.1.md)
|
||||
- [v2.14 to v3.0](./2.14-3.0.md)
|
||||
|
||||
@@ -137,4 +137,4 @@ More info: [RBAC Configuration](../rbac.md)
|
||||
> [!NOTE]
|
||||
> Defining policies are not supported on ArgoCD v2.
|
||||
> To define policies, please [upgrade](../upgrading/overview.md)
|
||||
> to to v3.0.0 or later.
|
||||
> to v3.0.0 or later.
|
||||
|
||||
83
docs/operator-manual/user-management/gitlab-ci.md
Normal file
83
docs/operator-manual/user-management/gitlab-ci.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# GitLab CI
|
||||
|
||||
GitLab is an OAuth identity provider which can be used in GitLab CI
|
||||
to generate tokens that identifies the repository and where it runs.
|
||||
|
||||
See: <https://docs.gitlab.com/ci/secrets/id_token_authentication>
|
||||
|
||||
You need to use OAuth 2.0 Token Exchange. Some identity providers supports this
|
||||
out of the box such as Dex.
|
||||
|
||||
## Using Dex
|
||||
|
||||
Edit the `argocd-cm` and configure the `dex.config` section:
|
||||
|
||||
```yaml
|
||||
dex.config: |
|
||||
connectors:
|
||||
- type: oidc
|
||||
id: github-ci
|
||||
name: GitLab CI
|
||||
config:
|
||||
issuer: https://gitlab.com
|
||||
# If using GitLab self-hosted, then use your GitLab issuer
|
||||
scopes: [openid]
|
||||
userNameKey: sub
|
||||
insecureSkipEmailVerified: true
|
||||
```
|
||||
|
||||
ArgoCD automatically generates a static client named `argo-cd-cli` that you can use to get your token from a GitLab CI.
|
||||
|
||||
Here is an example of GitLab CI that will retrieve a valid Argo CD authentication token from Dex and use it to perform operations with the CLI:
|
||||
|
||||
```yaml
|
||||
deploy:
|
||||
id_tokens:
|
||||
GITLAB_OIDC_TOKEN:
|
||||
aud: https://argocd.example.com # Your ArgoCD URL
|
||||
|
||||
script:
|
||||
- apt-get update && apt-get install -y jq curl
|
||||
- curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
|
||||
- install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
|
||||
- rm argocd-linux-amd64
|
||||
- |
|
||||
# Exchange GitLab token for Dex token
|
||||
DEX_URL="https://argocd.example.com/api/dex/token"
|
||||
DEX_TOKEN_RESPONSE=$(curl -sSf \
|
||||
"$DEX_URL" \
|
||||
--user argo-cd-cli: \
|
||||
--data-urlencode "connector_id=gitlab-ci" \
|
||||
--data-urlencode "grant_type=urn:ietf:params:oauth:grant-type:token-exchange" \
|
||||
--data-urlencode "scope=openid email profile federated:id" \
|
||||
--data-urlencode "requested_token_type=urn:ietf:params:oauth:token-type:access_token" \
|
||||
--data-urlencode "subject_token=$GITLAB_OIDC_TOKEN" \
|
||||
--data-urlencode "subject_token_type=urn:ietf:params:oauth:token-type:id_token")
|
||||
|
||||
DEX_TOKEN=$(echo "$DEX_TOKEN_RESPONSE" | jq -r .access_token)
|
||||
|
||||
# Use with ArgoCD CLI
|
||||
export ARGOCD_SERVER="argocd.example.com"
|
||||
export ARGOCD_OPTS="--grpc-web"
|
||||
export ARGOCD_AUTH_TOKEN="$DEX_TOKEN"
|
||||
argocd version
|
||||
argocd account get-user-info
|
||||
argocd app list
|
||||
```
|
||||
|
||||
|
||||
## Configuring RBAC
|
||||
|
||||
When using ArgoCD global RBAC comfig map, you can define your `policy.csv` like so:
|
||||
|
||||
```yaml
|
||||
configs:
|
||||
rbac:
|
||||
policy.csv: |
|
||||
# Specific project(infra) for specific apps
|
||||
p, project_path:my-repo/my-project:*, applications, get, infra/*, allow
|
||||
# Only main branch can sync under production project
|
||||
p, project_path:my-repo/my-project:ref_type:branch:ref:main, applications, sync, production/*, allow
|
||||
```
|
||||
|
||||
More info: [RBAC Configuration](../rbac.md)
|
||||
@@ -25,7 +25,12 @@ Kubernetes), then the user effectively has the same privileges as that ServiceAc
|
||||
exec.enabled: "true"
|
||||
```
|
||||
|
||||
2. Patch the `argocd-server` Role (if using namespaced Argo) or ClusterRole (if using clustered Argo) to allow `argocd-server`
|
||||
2. Restart Argo CD
|
||||
|
||||
### Permissions for Kubernetes <1.31
|
||||
Starting in Kubernetes 1.31, the `get` privilege is enough to exec into a container, so no additional permissions are required. Enabling web terminal before Kubernetes 1.31 requires adding additional RBAC permissions.
|
||||
|
||||
1. Patch the `argocd-server` Role (if using namespaced Argo) or ClusterRole (if using clustered Argo) to allow `argocd-server`
|
||||
to `exec` into pods
|
||||
|
||||
- apiGroups:
|
||||
@@ -45,7 +50,7 @@ to `exec` into pods
|
||||
kubectl patch clusterrole <argocd-server-clusterrole-name> --type='json' -p='[{"op": "add", "path": "/rules/-", "value": {"apiGroups": ["*"], "resources": ["pods/exec"], "verbs": ["create"]}}]'
|
||||
```
|
||||
|
||||
3. Add RBAC rules to allow your users to `create` the `exec` resource i.e.
|
||||
2. Add RBAC rules to allow your users to `create` the `exec` resource i.e.
|
||||
|
||||
p, role:myrole, exec, create, */*, allow
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ to reduce amount of data returned by the API server and improve the UI responsiv
|
||||
**Pagination Cursor**
|
||||
|
||||
It is proposed to add `offset` and `limit` fields for pagination support in Application List API.
|
||||
The The Watch API is a bit more complex. Both Argo CD user interface and CLI are relying on the Watch API to display real time updates of Argo CD applications.
|
||||
The Watch API is a bit more complex. Both Argo CD user interface and CLI are relying on the Watch API to display real time updates of Argo CD applications.
|
||||
The Watch API currently supports filtering by a project and an application name. In order to effectively
|
||||
implement server side pagination for the Watch API we cannot rely on the order of the applications returned by the API server. Instead of
|
||||
relying on the order it is proposed to rely on the application name and use it as a cursor for pagination. Both the Applications List and Watch
|
||||
|
||||
@@ -81,7 +81,7 @@ When you are running `v1.4.x`, you can upgrade to `v1.4.3` by simply changing th
|
||||
tags for `argocd-server`, `argocd-repo-server` and `argocd-controller` to `v1.4.3`.
|
||||
The `v1.4.3` release does not contain additional functional bug fixes.
|
||||
|
||||
Likewise, hen you are running `v1.5.x`, you can upgrade to `v1.5.2` by simply changing
|
||||
Likewise, when you are running `v1.5.x`, you can upgrade to `v1.5.2` by simply changing
|
||||
the image tags for `argocd-server`, `argocd-repo-server` and `argocd-controller` to `v1.5.2`.
|
||||
The `v1.5.2` release does not contain additional functional bug fixes.
|
||||
|
||||
|
||||
@@ -13,51 +13,64 @@ recent minor releases.
|
||||
|
||||
| | Critical | High | Medium | Low |
|
||||
|---:|:--------:|:----:|:------:|:---:|
|
||||
| [go.mod](master/argocd-test.html) | 0 | 0 | 5 | 0 |
|
||||
| [go.mod](master/argocd-test.html) | 0 | 0 | 0 | 0 |
|
||||
| [ui/yarn.lock](master/argocd-test.html) | 0 | 0 | 1 | 2 |
|
||||
| [dex:v2.43.0](master/ghcr.io_dexidp_dex_v2.43.0.html) | 0 | 0 | 0 | 5 |
|
||||
| [haproxy:3.0.8-alpine](master/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:8.2.3-alpine](master/public.ecr.aws_docker_library_redis_8.2.3-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 5 | 9 |
|
||||
| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 8 | 8 |
|
||||
| [install.yaml](master/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](master/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
|
||||
### v3.2.1
|
||||
### v3.3.0-rc3
|
||||
|
||||
| | Critical | High | Medium | Low |
|
||||
|---:|:--------:|:----:|:------:|:---:|
|
||||
| [go.mod](v3.2.1/argocd-test.html) | 0 | 1 | 7 | 0 |
|
||||
| [ui/yarn.lock](v3.2.1/argocd-test.html) | 0 | 0 | 3 | 2 |
|
||||
| [dex:v2.43.0](v3.2.1/ghcr.io_dexidp_dex_v2.43.0.html) | 0 | 0 | 0 | 5 |
|
||||
| [haproxy:3.0.8-alpine](v3.2.1/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:8.2.2-alpine](v3.2.1/public.ecr.aws_docker_library_redis_8.2.2-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:v3.2.1](v3.2.1/quay.io_argoproj_argocd_v3.2.1.html) | 0 | 0 | 5 | 9 |
|
||||
| [install.yaml](v3.2.1/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](v3.2.1/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
| [go.mod](v3.3.0-rc3/argocd-test.html) | 0 | 0 | 0 | 0 |
|
||||
| [ui/yarn.lock](v3.3.0-rc3/argocd-test.html) | 0 | 1 | 1 | 2 |
|
||||
| [dex:v2.43.0](v3.3.0-rc3/ghcr.io_dexidp_dex_v2.43.0.html) | 0 | 0 | 0 | 5 |
|
||||
| [haproxy:3.0.8-alpine](v3.3.0-rc3/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:8.2.3-alpine](v3.3.0-rc3/public.ecr.aws_docker_library_redis_8.2.3-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:v3.3.0-rc3](v3.3.0-rc3/quay.io_argoproj_argocd_v3.3.0-rc3.html) | 0 | 1 | 6 | 11 |
|
||||
| [install.yaml](v3.3.0-rc3/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](v3.3.0-rc3/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
|
||||
### v3.1.9
|
||||
### v3.2.5
|
||||
|
||||
| | Critical | High | Medium | Low |
|
||||
|---:|:--------:|:----:|:------:|:---:|
|
||||
| [go.mod](v3.1.9/argocd-test.html) | 0 | 1 | 7 | 0 |
|
||||
| [ui/yarn.lock](v3.1.9/argocd-test.html) | 1 | 0 | 3 | 2 |
|
||||
| [dex:v2.43.0](v3.1.9/ghcr.io_dexidp_dex_v2.43.0.html) | 0 | 0 | 0 | 5 |
|
||||
| [haproxy:3.0.8-alpine](v3.1.9/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:7.2.11-alpine](v3.1.9/public.ecr.aws_docker_library_redis_7.2.11-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:v3.1.9](v3.1.9/quay.io_argoproj_argocd_v3.1.9.html) | 0 | 0 | 4 | 12 |
|
||||
| [install.yaml](v3.1.9/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](v3.1.9/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
| [go.mod](v3.2.5/argocd-test.html) | 0 | 0 | 0 | 0 |
|
||||
| [ui/yarn.lock](v3.2.5/argocd-test.html) | 0 | 1 | 3 | 2 |
|
||||
| [dex:v2.43.0](v3.2.5/ghcr.io_dexidp_dex_v2.43.0.html) | 0 | 0 | 0 | 5 |
|
||||
| [haproxy:3.0.8-alpine](v3.2.5/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:8.2.2-alpine](v3.2.5/public.ecr.aws_docker_library_redis_8.2.2-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:v3.2.5](v3.2.5/quay.io_argoproj_argocd_v3.2.5.html) | 0 | 0 | 6 | 11 |
|
||||
| [install.yaml](v3.2.5/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](v3.2.5/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
|
||||
### v3.0.20
|
||||
### v3.1.11
|
||||
|
||||
| | Critical | High | Medium | Low |
|
||||
|---:|:--------:|:----:|:------:|:---:|
|
||||
| [go.mod](v3.0.20/argocd-test.html) | 0 | 4 | 7 | 0 |
|
||||
| [ui/yarn.lock](v3.0.20/argocd-test.html) | 1 | 1 | 4 | 4 |
|
||||
| [dex:v2.41.1](v3.0.20/ghcr.io_dexidp_dex_v2.41.1.html) | 0 | 2 | 0 | 8 |
|
||||
| [haproxy:3.0.8-alpine](v3.0.20/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:7.2.11-alpine](v3.0.20/public.ecr.aws_docker_library_redis_7.2.11-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:v3.0.20](v3.0.20/quay.io_argoproj_argocd_v3.0.20.html) | 0 | 0 | 4 | 12 |
|
||||
| [redis:7.2.11-alpine](v3.0.20/redis_7.2.11-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [install.yaml](v3.0.20/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](v3.0.20/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
| [go.mod](v3.1.11/argocd-test.html) | 0 | 0 | 0 | 0 |
|
||||
| [ui/yarn.lock](v3.1.11/argocd-test.html) | 1 | 1 | 3 | 2 |
|
||||
| [dex:v2.43.0](v3.1.11/ghcr.io_dexidp_dex_v2.43.0.html) | 0 | 0 | 0 | 5 |
|
||||
| [haproxy:3.0.8-alpine](v3.1.11/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:7.2.11-alpine](v3.1.11/public.ecr.aws_docker_library_redis_7.2.11-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:v3.1.11](v3.1.11/quay.io_argoproj_argocd_v3.1.11.html) | 0 | 0 | 7 | 15 |
|
||||
| [install.yaml](v3.1.11/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](v3.1.11/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
|
||||
### v3.0.22
|
||||
|
||||
| | Critical | High | Medium | Low |
|
||||
|---:|:--------:|:----:|:------:|:---:|
|
||||
| [go.mod](v3.0.22/argocd-test.html) | 0 | 0 | 0 | 0 |
|
||||
| [ui/yarn.lock](v3.0.22/argocd-test.html) | 1 | 2 | 4 | 4 |
|
||||
| [dex:v2.41.1](v3.0.22/ghcr.io_dexidp_dex_v2.41.1.html) | 0 | 2 | 0 | 8 |
|
||||
| [haproxy:3.0.8-alpine](v3.0.22/public.ecr.aws_docker_library_haproxy_3.0.8-alpine.html) | 0 | 0 | 0 | 5 |
|
||||
| [redis:7.2.11-alpine](v3.0.22/public.ecr.aws_docker_library_redis_7.2.11-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [argocd:v3.0.22](v3.0.22/quay.io_argoproj_argocd_v3.0.22.html) | 0 | 0 | 7 | 15 |
|
||||
| [redis:7.2.11-alpine](v3.0.22/redis_7.2.11-alpine.html) | 0 | 0 | 0 | 2 |
|
||||
| [install.yaml](v3.0.22/argocd-iac-install.html) | - | - | - | - |
|
||||
| [namespace-install.yaml](v3.0.22/argocd-iac-namespace-install.html) | - | - | - | - |
|
||||
|
||||
@@ -456,7 +456,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:26:47 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:27:35 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following path:</span>
|
||||
@@ -507,7 +507,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30936
|
||||
Line number: 30946
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -553,7 +553,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30621
|
||||
Line number: 30631
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -599,7 +599,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30709
|
||||
Line number: 30719
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -645,7 +645,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30744
|
||||
Line number: 30754
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -691,7 +691,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30774
|
||||
Line number: 30784
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -737,7 +737,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30792
|
||||
Line number: 30802
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -783,7 +783,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30810
|
||||
Line number: 30820
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -829,7 +829,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 30832
|
||||
Line number: 30842
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -881,7 +881,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32039
|
||||
Line number: 32049
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -933,7 +933,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32382
|
||||
Line number: 32392
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -991,7 +991,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31519
|
||||
Line number: 31529
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1049,7 +1049,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31835
|
||||
Line number: 31845
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1107,7 +1107,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31783
|
||||
Line number: 31793
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1165,7 +1165,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31897
|
||||
Line number: 31907
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1223,7 +1223,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32010
|
||||
Line number: 32020
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1281,7 +1281,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32034
|
||||
Line number: 32044
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1339,7 +1339,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32382
|
||||
Line number: 32392
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1397,7 +1397,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32093
|
||||
Line number: 32103
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1455,7 +1455,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32470
|
||||
Line number: 32480
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1513,7 +1513,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32880
|
||||
Line number: 32890
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1565,7 +1565,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31815
|
||||
Line number: 31825
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1617,7 +1617,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31519
|
||||
Line number: 31529
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1669,7 +1669,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31783
|
||||
Line number: 31793
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1721,7 +1721,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32010
|
||||
Line number: 32020
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1779,7 +1779,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31519
|
||||
Line number: 31529
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1837,7 +1837,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31783
|
||||
Line number: 31793
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1895,7 +1895,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31835
|
||||
Line number: 31845
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1953,7 +1953,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31897
|
||||
Line number: 31907
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2011,7 +2011,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32010
|
||||
Line number: 32020
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2069,7 +2069,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32034
|
||||
Line number: 32044
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2127,7 +2127,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32382
|
||||
Line number: 32392
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2185,7 +2185,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32093
|
||||
Line number: 32103
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2243,7 +2243,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32470
|
||||
Line number: 32480
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2301,7 +2301,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32880
|
||||
Line number: 32890
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2357,7 +2357,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31696
|
||||
Line number: 31706
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2413,7 +2413,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31843
|
||||
Line number: 31853
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2469,7 +2469,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31818
|
||||
Line number: 31828
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2525,7 +2525,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 31942
|
||||
Line number: 31952
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2581,7 +2581,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32027
|
||||
Line number: 32037
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2637,7 +2637,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32041
|
||||
Line number: 32051
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2693,7 +2693,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32390
|
||||
Line number: 32400
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2749,7 +2749,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32355
|
||||
Line number: 32365
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2805,7 +2805,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 32779
|
||||
Line number: 32789
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -2861,7 +2861,7 @@
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">
|
||||
Line number: 33155
|
||||
Line number: 33165
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
|
||||
@@ -456,7 +456,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:26:56 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:27:45 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following path:</span>
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<title>Snyk test report</title>
|
||||
<meta name="description" content="8 known vulnerabilities found in 29 vulnerable dependency paths.">
|
||||
<meta name="description" content="3 known vulnerabilities found in 6 vulnerable dependency paths.">
|
||||
<base target="_blank">
|
||||
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
|
||||
sizes="194x194">
|
||||
@@ -247,6 +247,11 @@
|
||||
border-color: #88879E;
|
||||
}
|
||||
|
||||
.card .label--exploit {
|
||||
background-color: #8B5A96;
|
||||
border-color: #8B5A96;
|
||||
}
|
||||
|
||||
.severity--low {
|
||||
border-color: #88879E;
|
||||
}
|
||||
@@ -487,12 +492,11 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:24:32 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:25:09 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following paths:</span>
|
||||
<ul>
|
||||
<li class="paths">/argo-cd/argoproj/argo-cd/v3/go.mod (gomodules)</li>
|
||||
<li class="paths">/argo-cd/argoproj/gitops-engine/gitops-engine/go.mod (gomodules)</li>
|
||||
<li class="paths">/argo-cd/argoproj/argo-cd/get-previous-release/hack/get-previous-release/go.mod (gomodules)</li>
|
||||
<li class="paths">/argo-cd/ui/yarn.lock (yarn)</li>
|
||||
@@ -500,9 +504,9 @@
|
||||
</div>
|
||||
|
||||
<div class="meta-counts">
|
||||
<div class="meta-count"><span>8</span> <span>known vulnerabilities</span></div>
|
||||
<div class="meta-count"><span>29 vulnerable dependency paths</span></div>
|
||||
<div class="meta-count"><span>2868</span> <span>dependencies</span></div>
|
||||
<div class="meta-count"><span>3</span> <span>known vulnerabilities</span></div>
|
||||
<div class="meta-count"><span>6 vulnerable dependency paths</span></div>
|
||||
<div class="meta-count"><span>1016</span> <span>dependencies</span></div>
|
||||
</div><!-- .meta-counts -->
|
||||
</div><!-- .layout-container--short -->
|
||||
</header><!-- .project__header -->
|
||||
@@ -510,586 +514,6 @@
|
||||
|
||||
<div class="layout-container" style="padding-top: 35px;">
|
||||
<div class="cards--vuln filter--patch filter--ignore">
|
||||
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
|
||||
<h2 class="card__title">MPL-2.0 license</h2>
|
||||
<div class="card__section">
|
||||
|
||||
<div class="card__labels">
|
||||
<div class="label label--medium">
|
||||
<span class="label__text">medium severity</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<ul class="card__meta">
|
||||
<li class="card__meta__item">
|
||||
Manifest file: /argo-cd/argoproj/argo-cd/v3 <span class="list-paths__item__arrow">›</span> go.mod
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Package Manager: golang
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Module:
|
||||
|
||||
github.com/r3labs/diff/v3
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">Introduced through:
|
||||
|
||||
github.com/argoproj/argo-cd/v3@0.0.0 and github.com/r3labs/diff/v3@3.0.2
|
||||
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<hr/>
|
||||
|
||||
|
||||
<h3 class="card__section__title">Detailed paths</h3>
|
||||
|
||||
<ul class="card__meta__paths">
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/r3labs/diff/v3@3.0.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
</ul><!-- .list-paths -->
|
||||
|
||||
</div><!-- .card__section -->
|
||||
|
||||
<hr/>
|
||||
<!-- Overview -->
|
||||
<p>MPL-2.0 license</p>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div class="cta card__cta">
|
||||
<p><a href="https://snyk.io/vuln/snyk:lic:golang:github.com:r3labs:diff:v3:MPL-2.0">More about this vulnerability</a></p>
|
||||
</div>
|
||||
|
||||
</div><!-- .card -->
|
||||
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
|
||||
<h2 class="card__title">MPL-2.0 license</h2>
|
||||
<div class="card__section">
|
||||
|
||||
<div class="card__labels">
|
||||
<div class="label label--medium">
|
||||
<span class="label__text">medium severity</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<ul class="card__meta">
|
||||
<li class="card__meta__item">
|
||||
Manifest file: /argo-cd/argoproj/argo-cd/v3 <span class="list-paths__item__arrow">›</span> go.mod
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Package Manager: golang
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Module:
|
||||
|
||||
github.com/hashicorp/go-version
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">Introduced through:
|
||||
|
||||
|
||||
github.com/argoproj/argo-cd/v3@0.0.0, code.gitea.io/sdk/gitea@0.22.1 and others
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<hr/>
|
||||
|
||||
|
||||
<h3 class="card__section__title">Detailed paths</h3>
|
||||
|
||||
<ul class="card__meta__paths">
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
code.gitea.io/sdk/gitea@0.22.1
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-version@1.7.0
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
</ul><!-- .list-paths -->
|
||||
|
||||
</div><!-- .card__section -->
|
||||
|
||||
<hr/>
|
||||
<!-- Overview -->
|
||||
<p>MPL-2.0 license</p>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div class="cta card__cta">
|
||||
<p><a href="https://snyk.io/vuln/snyk:lic:golang:github.com:hashicorp:go-version:MPL-2.0">More about this vulnerability</a></p>
|
||||
</div>
|
||||
|
||||
</div><!-- .card -->
|
||||
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
|
||||
<h2 class="card__title">MPL-2.0 license</h2>
|
||||
<div class="card__section">
|
||||
|
||||
<div class="card__labels">
|
||||
<div class="label label--medium">
|
||||
<span class="label__text">medium severity</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<ul class="card__meta">
|
||||
<li class="card__meta__item">
|
||||
Manifest file: /argo-cd/argoproj/argo-cd/v3 <span class="list-paths__item__arrow">›</span> go.mod
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Package Manager: golang
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Module:
|
||||
|
||||
github.com/hashicorp/go-retryablehttp
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">Introduced through:
|
||||
|
||||
github.com/argoproj/argo-cd/v3@0.0.0 and github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<hr/>
|
||||
|
||||
|
||||
<h3 class="card__section__title">Detailed paths</h3>
|
||||
|
||||
<ul class="card__meta__paths">
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
gitlab.com/gitlab-org/api/client-go@1.8.1
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/cmd@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/api@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/controller@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/cmd@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/api@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/controller@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
</ul><!-- .list-paths -->
|
||||
|
||||
</div><!-- .card__section -->
|
||||
|
||||
<hr/>
|
||||
<!-- Overview -->
|
||||
<p>MPL-2.0 license</p>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div class="cta card__cta">
|
||||
<p><a href="https://snyk.io/vuln/snyk:lic:golang:github.com:hashicorp:go-retryablehttp:MPL-2.0">More about this vulnerability</a></p>
|
||||
</div>
|
||||
|
||||
</div><!-- .card -->
|
||||
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
|
||||
<h2 class="card__title">MPL-2.0 license</h2>
|
||||
<div class="card__section">
|
||||
|
||||
<div class="card__labels">
|
||||
<div class="label label--medium">
|
||||
<span class="label__text">medium severity</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<ul class="card__meta">
|
||||
<li class="card__meta__item">
|
||||
Manifest file: /argo-cd/argoproj/argo-cd/v3 <span class="list-paths__item__arrow">›</span> go.mod
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Package Manager: golang
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Module:
|
||||
|
||||
github.com/hashicorp/go-cleanhttp
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">Introduced through:
|
||||
|
||||
|
||||
github.com/argoproj/argo-cd/v3@0.0.0, github.com/hashicorp/go-retryablehttp@0.7.8 and others
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<hr/>
|
||||
|
||||
|
||||
<h3 class="card__section__title">Detailed paths</h3>
|
||||
|
||||
<ul class="card__meta__paths">
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
gitlab.com/gitlab-org/api/client-go@1.8.1
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
gitlab.com/gitlab-org/api/client-go@1.8.1
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/cmd@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/api@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/controller@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/subscriptions@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/argoproj/notifications-engine/pkg/services@#e2e7fe18381a
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/opsgenie/opsgenie-go-sdk-v2/client@1.2.23
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-retryablehttp@0.7.8
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/hashicorp/go-cleanhttp@0.5.2
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
</ul><!-- .list-paths -->
|
||||
|
||||
</div><!-- .card__section -->
|
||||
|
||||
<hr/>
|
||||
<!-- Overview -->
|
||||
<p>MPL-2.0 license</p>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div class="cta card__cta">
|
||||
<p><a href="https://snyk.io/vuln/snyk:lic:golang:github.com:hashicorp:go-cleanhttp:MPL-2.0">More about this vulnerability</a></p>
|
||||
</div>
|
||||
|
||||
</div><!-- .card -->
|
||||
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
|
||||
<h2 class="card__title">MPL-2.0 license</h2>
|
||||
<div class="card__section">
|
||||
|
||||
<div class="card__labels">
|
||||
<div class="label label--medium">
|
||||
<span class="label__text">medium severity</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<ul class="card__meta">
|
||||
<li class="card__meta__item">
|
||||
Manifest file: /argo-cd/argoproj/argo-cd/v3 <span class="list-paths__item__arrow">›</span> go.mod
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Package Manager: golang
|
||||
</li>
|
||||
<li class="card__meta__item">
|
||||
Module:
|
||||
|
||||
github.com/gosimple/slug
|
||||
</li>
|
||||
|
||||
<li class="card__meta__item">Introduced through:
|
||||
|
||||
github.com/argoproj/argo-cd/v3@0.0.0 and github.com/gosimple/slug@1.15.0
|
||||
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<hr/>
|
||||
|
||||
|
||||
<h3 class="card__section__title">Detailed paths</h3>
|
||||
|
||||
<ul class="card__meta__paths">
|
||||
<li>
|
||||
<span class="list-paths__item__introduced"><em>Introduced through</em>:
|
||||
github.com/argoproj/argo-cd/v3@0.0.0
|
||||
<span class="list-paths__item__arrow">›</span>
|
||||
github.com/gosimple/slug@1.15.0
|
||||
|
||||
</span>
|
||||
|
||||
</li>
|
||||
</ul><!-- .list-paths -->
|
||||
|
||||
</div><!-- .card__section -->
|
||||
|
||||
<hr/>
|
||||
<!-- Overview -->
|
||||
<p>MPL-2.0 license</p>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div class="cta card__cta">
|
||||
<p><a href="https://snyk.io/vuln/snyk:lic:golang:github.com:gosimple:slug:MPL-2.0">More about this vulnerability</a></p>
|
||||
</div>
|
||||
|
||||
</div><!-- .card -->
|
||||
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
|
||||
<h2 class="card__title">Regular Expression Denial of Service (ReDoS)</h2>
|
||||
<div class="card__section">
|
||||
@@ -1098,6 +522,9 @@
|
||||
<div class="label label--medium">
|
||||
<span class="label__text">medium severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Proof of Concept</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -1245,6 +672,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Proof of Concept</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -1317,6 +747,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Proof of Concept</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -247,6 +247,11 @@
|
||||
border-color: #88879E;
|
||||
}
|
||||
|
||||
.card .label--exploit {
|
||||
background-color: #8B5A96;
|
||||
border-color: #8B5A96;
|
||||
}
|
||||
|
||||
.severity--low {
|
||||
border-color: #88879E;
|
||||
}
|
||||
@@ -487,7 +492,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:24:48 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:25:25 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following path:</span>
|
||||
@@ -524,6 +529,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -713,6 +721,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -899,6 +910,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -1090,6 +1104,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -1214,6 +1231,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
@@ -247,6 +247,11 @@
|
||||
border-color: #88879E;
|
||||
}
|
||||
|
||||
.card .label--exploit {
|
||||
background-color: #8B5A96;
|
||||
border-color: #8B5A96;
|
||||
}
|
||||
|
||||
.severity--low {
|
||||
border-color: #88879E;
|
||||
}
|
||||
@@ -487,7 +492,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:24:55 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:25:32 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following path:</span>
|
||||
@@ -524,6 +529,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -638,6 +646,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -456,7 +456,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:34:22 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:38:09 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following path:</span>
|
||||
@@ -456,7 +456,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:34:31 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:38:20 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following path:</span>
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -247,6 +247,11 @@
|
||||
border-color: #88879E;
|
||||
}
|
||||
|
||||
.card .label--exploit {
|
||||
background-color: #8B5A96;
|
||||
border-color: #8B5A96;
|
||||
}
|
||||
|
||||
.severity--low {
|
||||
border-color: #88879E;
|
||||
}
|
||||
@@ -487,7 +492,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:27:19 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:36:18 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following path:</span>
|
||||
@@ -524,6 +529,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -713,6 +721,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -899,6 +910,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -1090,6 +1104,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -1214,6 +1231,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -247,6 +247,11 @@
|
||||
border-color: #88879E;
|
||||
}
|
||||
|
||||
.card .label--exploit {
|
||||
background-color: #8B5A96;
|
||||
border-color: #8B5A96;
|
||||
}
|
||||
|
||||
.severity--low {
|
||||
border-color: #88879E;
|
||||
}
|
||||
@@ -487,7 +492,7 @@
|
||||
<div class="header-wrap">
|
||||
<h1 class="project__header__title">Snyk test report</h1>
|
||||
|
||||
<p class="timestamp">December 14th 2025, 12:32:42 am (UTC+00:00)</p>
|
||||
<p class="timestamp">January 18th 2026, 12:36:24 am (UTC+00:00)</p>
|
||||
</div>
|
||||
<div class="source-panel">
|
||||
<span>Scanned the following paths:</span>
|
||||
@@ -516,6 +521,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
@@ -629,6 +637,9 @@
|
||||
<div class="label label--low">
|
||||
<span class="label__text">low severity</span>
|
||||
</div>
|
||||
<div class="label label--exploit">
|
||||
<span class="label__text">Exploit: Not Defined</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user