mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-21 18:18:48 +01:00
Compare commits
73 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9d03a633e | ||
|
|
030b4f982b | ||
|
|
fafbd44489 | ||
|
|
d7d9674e33 | ||
|
|
e6f54030f0 | ||
|
|
b4146969ed | ||
|
|
51c6375130 | ||
|
|
b67eb40a45 | ||
|
|
8a0633b74a | ||
|
|
0d4f505954 | ||
|
|
2b6251dfed | ||
|
|
8f903c3a11 | ||
|
|
8d0dde1388 | ||
|
|
784f62ca6d | ||
|
|
33b5043405 | ||
|
|
88fe638aff | ||
|
|
a29703877e | ||
|
|
95e7cdb16f | ||
|
|
122f4db3db | ||
|
|
2d65b26420 | ||
|
|
0ace9bb9a3 | ||
|
|
6398ec3dcb | ||
|
|
732b16fb2a | ||
|
|
024c7e6020 | ||
|
|
26b7fb2c61 | ||
|
|
8c4ab63a9c | ||
|
|
29f869c82f | ||
|
|
c11e67d4bf | ||
|
|
a0a18438ab | ||
|
|
dabdf39772 | ||
|
|
cd8df1721c | ||
|
|
27c5065308 | ||
|
|
1545390cd8 | ||
|
|
7bd02d7f02 | ||
|
|
86c9994394 | ||
|
|
6dd5e7a6d2 | ||
|
|
66b2f302d9 | ||
|
|
a1df57df93 | ||
|
|
8884b27381 | ||
|
|
be8e79eb31 | ||
|
|
6aa9c20e47 | ||
|
|
1963030721 | ||
|
|
b227ef1559 | ||
|
|
d1251f407a | ||
|
|
3db95b1fbe | ||
|
|
7628473802 | ||
|
|
059e8d220e | ||
|
|
1ba3929520 | ||
|
|
a42ccaeeca | ||
|
|
d75bcfd7b2 | ||
|
|
35e3897f61 | ||
|
|
dc309cbe0d | ||
|
|
a1f42488d9 | ||
|
|
973eccee0a | ||
|
|
8f8a1ecacb | ||
|
|
46409ae734 | ||
|
|
5f5d46c78b | ||
|
|
722036d447 | ||
|
|
001bfda068 | ||
|
|
4821d71e3d | ||
|
|
ef8ac49807 | ||
|
|
feab307df3 | ||
|
|
087378c669 | ||
|
|
f3c8e1d5e3 | ||
|
|
28510cdda6 | ||
|
|
6a2df4380a | ||
|
|
cd87a13a0d | ||
|
|
1453367645 | ||
|
|
50531e6ab3 | ||
|
|
bf9f927d55 | ||
|
|
ee0de13be4 | ||
|
|
4ac3f920d5 | ||
|
|
06ef059f9f |
85
.github/ISSUE_TEMPLATE/release.md
vendored
85
.github/ISSUE_TEMPLATE/release.md
vendored
@@ -9,79 +9,18 @@ assignees: ''
|
||||
Target RC1 date: ___. __, ____
|
||||
Target GA date: ___. __, ____
|
||||
|
||||
## RC1 Release Checklist
|
||||
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can't merge
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge
|
||||
- [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task)
|
||||
- [ ] Create new release branch (or delegate this task to an Approver)
|
||||
- [ ] Add the release branch to ReadTheDocs
|
||||
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Run the [Init ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/init-release.yaml) from the release branch
|
||||
- [ ] Review and merge the generated version bump PR
|
||||
- [ ] Run `./hack/trigger-release.sh` to push the release tag
|
||||
- [ ] Monitor the [Publish ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/release.yaml)
|
||||
- [ ] Verify the release on [GitHub releases](https://github.com/argoproj/argo-cd/releases)
|
||||
- [ ] Verify the container image on [Quay.io](https://quay.io/repository/argoproj/argocd?tab=tags)
|
||||
- [ ] Confirm the new version appears in [Read the Docs](https://argo-cd.readthedocs.io/)
|
||||
- [ ] Verify the docs release build in https://app.readthedocs.org/projects/argo-cd/ succeeded and retry if failed (requires an Approver with admin creds to readthedocs)
|
||||
- [ ] Announce RC1 release
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements requesting help testing:
|
||||
```
|
||||
:mega: Argo CD v{MAJOR}.{MINOR}.{PATCH}-rc{RC_NUMBER} is OUT NOW! :argocd::tada:
|
||||
|
||||
Please go through the following resources to know more about the release:
|
||||
|
||||
Release notes: https://github.com/argoproj/argo-cd/releases/tag/v{VERSION}
|
||||
Blog: {BLOG_POST_URL}
|
||||
|
||||
We'd love your help testing this release candidate! Please try it out in your environments and report any issues you find. This helps us ensure a stable GA release.
|
||||
|
||||
Thanks to all the folks who spent their time contributing to this release in any way possible!
|
||||
```
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate during the RC period (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After creating the RC, open a documentation PR for the next minor version using [this](../../docs/operator-manual/templates/minor_version_upgrade.md) template.
|
||||
|
||||
## GA Release Checklist
|
||||
|
||||
- [ ] At GA release date, evaluate if any bugs justify delaying the release
|
||||
- [ ] Prepare for EOL version (version that is 3 releases old)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Edit the final patch release on GitHub and add the following notice at the top:
|
||||
```markdown
|
||||
> [!IMPORTANT]
|
||||
> **END OF LIFE NOTICE**
|
||||
>
|
||||
> This is the final release of the {EOL_SERIES} release series. As of {GA_DATE}, this version has reached end of life and will no longer receive bug fixes or security updates.
|
||||
>
|
||||
> **Action Required**: Please upgrade to a [supported version](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) (v{SUPPORTED_VERSION_1}, v{SUPPORTED_VERSION_2}, or v{NEW_VERSION}).
|
||||
```
|
||||
- [ ] Cut GA release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Run the [Init ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/init-release.yaml) from the release branch
|
||||
- [ ] Review and merge the generated version bump PR
|
||||
- [ ] Run `./hack/trigger-release.sh` to push the release tag
|
||||
- [ ] Monitor the [Publish ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/release.yaml)
|
||||
- [ ] Verify the release on [GitHub releases](https://github.com/argoproj/argo-cd/releases)
|
||||
- [ ] Verify the container image on [Quay.io](https://quay.io/repository/argoproj/argocd?tab=tags)
|
||||
- [ ] Verify the `stable` tag has been updated
|
||||
- [ ] Confirm the new version appears in [Read the Docs](https://argo-cd.readthedocs.io/)
|
||||
- [ ] Verify the docs release build in https://app.readthedocs.org/projects/argo-cd/ succeeded and retry if failed (requires an Approver with admin creds to readthedocs)
|
||||
- [ ] Announce GA release with EOL notice
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements announcing the release and EOL:
|
||||
```
|
||||
:mega: Argo CD v{MAJOR}.{MINOR} is OUT NOW! :argocd::tada:
|
||||
|
||||
Please go through the following resources to know more about the release:
|
||||
|
||||
Upgrade instructions: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/{PREV_MINOR}-{MAJOR}.{MINOR}/
|
||||
Blog: {BLOG_POST_URL}
|
||||
|
||||
:warning: IMPORTANT: With the release of Argo CD v{MAJOR}.{MINOR}, support for Argo CD v{EOL_VERSION} has officially reached End of Life (EOL).
|
||||
|
||||
Thanks to all the folks who spent their time contributing to this release in any way possible!
|
||||
```
|
||||
- [ ] Create new release branch
|
||||
- [ ] Add the release branch to ReadTheDocs
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Trigger the release
|
||||
- [ ] After the release is finished, publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements with lots of emojis announcing the release and requesting help testing
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] At release date, evaluate if any bugs justify delaying the release. If not, cut the release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After the release, post in #argo-cd that the {current minor version minus 3} has reached EOL (example: https://cloud-native.slack.com/archives/C01TSERG0KZ/p1667336234059729)
|
||||
- [ ] (For the next release champion) Review the [items scheduled for the next release](https://github.com/orgs/argoproj/projects/25). If any item does not have an assignee who can commit to finish the feature, move it to the next release.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
1
.github/configs/renovate-config.js
vendored
1
.github/configs/renovate-config.js
vendored
@@ -4,7 +4,6 @@ module.exports = {
|
||||
autodiscover: false,
|
||||
allowPostUpgradeCommandTemplating: true,
|
||||
allowedPostUpgradeCommands: ["make mockgen"],
|
||||
binarySource: 'install',
|
||||
extends: [
|
||||
"github>argoproj/argo-cd//renovate-presets/commons.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/custom-managers/shell.json5",
|
||||
|
||||
26
.github/pr-title-checker-config.json
vendored
26
.github/pr-title-checker-config.json
vendored
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "title needs formatting",
|
||||
"color": "EEEEEE"
|
||||
},
|
||||
"CHECKS": {
|
||||
"prefixes": ["[Bot] docs: "],
|
||||
"regexp": "^(refactor|feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
},
|
||||
"MESSAGES": {
|
||||
"success": "PR title is valid",
|
||||
"failure": "PR title is invalid",
|
||||
"notice": "PR Title needs to pass regex '^(refactor|feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
"LABEL": {
|
||||
"name": "title needs formatting",
|
||||
"color": "EEEEEE"
|
||||
},
|
||||
"CHECKS": {
|
||||
"prefixes": ["[Bot] docs: "],
|
||||
"regexp": "^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
},
|
||||
"MESSAGES": {
|
||||
"success": "PR title is valid",
|
||||
"failure": "PR title is invalid",
|
||||
"notice": "PR Title needs to pass regex '^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1
.github/workflows/README.md
vendored
1
.github/workflows/README.md
vendored
@@ -11,7 +11,6 @@
|
||||
| release.yaml | Build images, cli-binaries, provenances, and post actions |
|
||||
| scorecard.yaml | Generate scorecard for supply-chain security |
|
||||
| update-snyk.yaml | Scheduled snyk reports |
|
||||
| stale.yaml | Labels stale issues and PRs |
|
||||
|
||||
# Reusable workflows
|
||||
|
||||
|
||||
8
.github/workflows/bump-major-version.yaml
vendored
8
.github/workflows/bump-major-version.yaml
vendored
@@ -10,10 +10,10 @@ jobs:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically update major version
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Add ~/go/bin to PATH
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
rsync -a --exclude=.git /home/runner/go/src/github.com/argoproj/argo-cd/ ../argo-cd
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
commit-message: "Bump major version to ${{ steps.get-target-version.outputs.TARGET_VERSION }}"
|
||||
title: "Bump major version to ${{ steps.get-target-version.outputs.TARGET_VERSION }}"
|
||||
|
||||
23
.github/workflows/cherry-pick-single.yml
vendored
23
.github/workflows/cherry-pick-single.yml
vendored
@@ -28,7 +28,7 @@ on:
|
||||
jobs:
|
||||
cherry-pick:
|
||||
name: Cherry Pick to ${{ inputs.version_number }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate-token
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
private-key: ${{ secrets.CHERRYPICK_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
@@ -91,17 +91,11 @@ jobs:
|
||||
- name: Create Pull Request
|
||||
run: |
|
||||
# Create cherry-pick PR
|
||||
TITLE="${PR_TITLE} (cherry-pick #${{ inputs.pr_number }} for ${{ inputs.version_number }})"
|
||||
BODY=$(cat <<EOF
|
||||
Cherry-picked ${PR_TITLE} (#${{ inputs.pr_number }})
|
||||
|
||||
${{ steps.cherry-pick.outputs.signoff }}
|
||||
EOF
|
||||
)
|
||||
|
||||
gh pr create \
|
||||
--title "$TITLE" \
|
||||
--body "$BODY" \
|
||||
--title "${{ inputs.pr_title }} (cherry-pick #${{ inputs.pr_number }} for ${{ inputs.version_number }})" \
|
||||
--body "Cherry-picked ${{ inputs.pr_title }} (#${{ inputs.pr_number }})
|
||||
|
||||
${{ steps.cherry-pick.outputs.signoff }}" \
|
||||
--base "${{ steps.cherry-pick.outputs.target_branch }}" \
|
||||
--head "${{ steps.cherry-pick.outputs.branch_name }}"
|
||||
|
||||
@@ -109,13 +103,12 @@ jobs:
|
||||
gh pr comment ${{ inputs.pr_number }} \
|
||||
--body "🍒 Cherry-pick PR created for ${{ inputs.version_number }}: #$(gh pr list --head ${{ steps.cherry-pick.outputs.branch_name }} --json number --jq '.[0].number')"
|
||||
env:
|
||||
PR_TITLE: ${{ inputs.pr_title }}
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
- name: Comment on failure
|
||||
if: failure()
|
||||
run: |
|
||||
gh pr comment ${{ inputs.pr_number }} \
|
||||
--body "❌ Cherry-pick failed for ${{ inputs.version_number }}. Please check the [workflow logs](https://github.com/argoproj/argo-cd/actions/runs/${{ github.run_id }}) for details."
|
||||
--body "❌ Cherry-pick failed for ${{ inputs.version_number }}. Please check the workflow logs for details."
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
2
.github/workflows/cherry-pick.yml
vendored
2
.github/workflows/cherry-pick.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
(github.event.action == 'labeled' && startsWith(github.event.label.name, 'cherry-pick/')) ||
|
||||
(github.event.action == 'closed' && contains(toJSON(github.event.pull_request.labels.*.name), 'cherry-pick/'))
|
||||
)
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
labels: ${{ steps.extract-labels.outputs.labels }}
|
||||
steps:
|
||||
|
||||
152
.github/workflows/ci-build.yaml
vendored
152
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.26.0'
|
||||
GOLANG_VERSION: '1.25.5'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -25,14 +25,14 @@ permissions:
|
||||
|
||||
jobs:
|
||||
changes:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
backend: ${{ steps.filter.outputs.backend_any_changed }}
|
||||
frontend: ${{ steps.filter.outputs.frontend_any_changed }}
|
||||
docs: ${{ steps.filter.outputs.docs_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
id: filter
|
||||
with:
|
||||
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
|
||||
@@ -50,14 +50,14 @@ jobs:
|
||||
check-go:
|
||||
name: Ensure Go modules synchronicity
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -67,21 +67,22 @@ jobs:
|
||||
run: |
|
||||
go mod tidy
|
||||
git diff --exit-code -- .
|
||||
|
||||
build-go:
|
||||
name: Build & cache Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -97,27 +98,27 @@ jobs:
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Lint Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint/v2 versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v2.9.0
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v2.4.0
|
||||
args: --verbose
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
@@ -128,11 +129,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -152,7 +153,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -173,7 +174,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -181,7 +182,7 @@ jobs:
|
||||
test-go-race:
|
||||
name: Run unit tests with -race for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
@@ -192,11 +193,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -216,7 +217,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -237,7 +238,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -245,21 +246,20 @@ jobs:
|
||||
codegen:
|
||||
name: Check changes to generated code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.docs == 'true'}}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
run: |
|
||||
mkdir -p ~/go/src/github.com/argoproj
|
||||
cp -a ../${{ github.event.repository.name }} ~/go/src/github.com/argoproj
|
||||
cp -a ../argo-cd ~/go/src/github.com/argoproj
|
||||
- name: Add ~/go/bin to PATH
|
||||
run: |
|
||||
echo "/home/runner/go/bin" >> $GITHUB_PATH
|
||||
@@ -271,14 +271,12 @@ jobs:
|
||||
# We need to vendor go modules for codegen yet
|
||||
go mod download
|
||||
go mod vendor -v
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
- name: Install toolchain for codegen
|
||||
run: |
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
@@ -289,33 +287,31 @@ jobs:
|
||||
export GOPATH=$(go env GOPATH)
|
||||
git checkout -- go.mod go.sum
|
||||
make codegen-local
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
- name: Check nothing has changed
|
||||
run: |
|
||||
set -xo pipefail
|
||||
git diff --exit-code -- . ':!go.sum' ':!go.mod' ':!assets/swagger.json' | tee codegen.patch
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
build-ui:
|
||||
name: Build, test & lint UI code
|
||||
# We run UI logic for backend changes so that we have a complete set of coverage documents to send to codecov.
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
|
||||
with:
|
||||
# renovate: datasource=node-version packageName=node versioning=node
|
||||
node-version: '22.9.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -338,9 +334,9 @@ jobs:
|
||||
working-directory: ui/
|
||||
|
||||
shellcheck:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- run: |
|
||||
sudo apt-get install shellcheck
|
||||
shellcheck -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC1091 $(find . -type f -name '*.sh' | grep -v './ui/node_modules') | tee sc.log
|
||||
@@ -349,7 +345,7 @@ jobs:
|
||||
analyze:
|
||||
name: Process & analyze test artifacts
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- test-go
|
||||
- build-ui
|
||||
@@ -357,15 +353,14 @@ jobs:
|
||||
- test-e2e
|
||||
env:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
codecov_secret: ${{ secrets.CODECOV_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -373,12 +368,12 @@ jobs:
|
||||
run: |
|
||||
rm -rf ui/node_modules/argo-ui/node_modules
|
||||
- name: Get e2e code coverage
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: e2e-code-coverage
|
||||
- name: Get unit test code coverage
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -390,52 +385,52 @@ jobs:
|
||||
run: |
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller,e2e-code-coverage/commit-server -o test-results/full-coverage.out
|
||||
- name: Upload code coverage information to codecov.io
|
||||
# Only run when the workflow is for upstream (PR target or push is in argoproj/argo-cd).
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
with:
|
||||
files: test-results/full-coverage.out
|
||||
fail_ci_if_error: true
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
# Codecov uploads test results to Codecov.io on upstream master branch.
|
||||
if: github.repository == 'argoproj/argo-cd' && github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push' && github.repository == 'argoproj/argo-cd'
|
||||
uses: codecov/test-results-action@47f89e9acb64b76debcd5ea40642d25a4adced9f # v1.1.1
|
||||
with:
|
||||
files: test-results/junit.xml
|
||||
file: test-results/junit.xml
|
||||
fail_ci_if_error: true
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
report_type: test_results
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
uses: SonarSource/sonarqube-scan-action@a31c9398be7ace6bbfaf30c0bd5d415f843d45e9 # v7.0.0
|
||||
uses: SonarSource/sonarqube-scan-action@1a6d90ebcb0e6a6b1d87e37ba693fe453195ae25 # v5.3.1
|
||||
if: env.sonar_secret != ''
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ${{ github.repository == 'argoproj/argo-cd' && 'oracle-vm-16cpu-64gb-x86-64' || 'ubuntu-24.04' }}
|
||||
runs-on: oracle-vm-16cpu-64gb-x86-64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# latest: true means that this version mush upload the coverage report to codecov.io
|
||||
# We designate the latest version because we only collect code coverage for that version.
|
||||
k3s:
|
||||
- version: v1.35.0
|
||||
latest: true
|
||||
- version: v1.34.2
|
||||
latest: false
|
||||
latest: true
|
||||
- version: v1.33.1
|
||||
latest: false
|
||||
- version: v1.32.1
|
||||
latest: false
|
||||
- version: v1.31.0
|
||||
latest: false
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
GOPATH: /home/ubuntu/go
|
||||
ARGOCD_FAKE_IN_CLUSTER: 'true'
|
||||
ARGOCD_SSH_DATA_PATH: '/tmp/argo-e2e/app/config/ssh'
|
||||
ARGOCD_TLS_DATA_PATH: '/tmp/argo-e2e/app/config/tls'
|
||||
ARGOCD_E2E_SSH_KNOWN_HOSTS: '../fixture/certs/ssh_known_hosts'
|
||||
ARGOCD_E2E_K3S: 'true'
|
||||
ARGOCD_IN_CI: 'true'
|
||||
ARGOCD_E2E_APISERVER_PORT: '8088'
|
||||
@@ -452,14 +447,11 @@ jobs:
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Set GOPATH
|
||||
run: |
|
||||
echo "GOPATH=$HOME/go" >> $GITHUB_ENV
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
run: |
|
||||
sudo pkill mono || true
|
||||
@@ -470,19 +462,19 @@ jobs:
|
||||
set -x
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
sudo chmod -R a+rw /etc/rancher/k3s
|
||||
sudo mkdir -p $HOME/.kube && sudo chown -R $(whoami) $HOME/.kube
|
||||
sudo mkdir -p $HOME/.kube && sudo chown -R ubuntu $HOME/.kube
|
||||
sudo k3s kubectl config view --raw > $HOME/.kube/config
|
||||
sudo chown $(whoami) $HOME/.kube/config
|
||||
sudo chown ubuntu $HOME/.kube/config
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Add ~/go/bin to PATH
|
||||
run: |
|
||||
echo "$HOME/go/bin" >> $GITHUB_PATH
|
||||
echo "/home/ubuntu/go/bin" >> $GITHUB_PATH
|
||||
- name: Add /usr/local/bin to PATH
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
@@ -502,13 +494,13 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.44.0
|
||||
docker pull ghcr.io/dexidp/dex:v2.43.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:8.2.3-alpine
|
||||
docker pull redis:8.2.2-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
chown $(whoami) dist
|
||||
chown ubuntu dist
|
||||
- name: Run E2E server and wait for it being available
|
||||
timeout-minutes: 30
|
||||
run: |
|
||||
@@ -534,13 +526,13 @@ jobs:
|
||||
goreman run stop-all || echo "goreman trouble"
|
||||
sleep 30
|
||||
- name: Upload e2e coverage report
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: /tmp/coverage
|
||||
if: ${{ matrix.k3s.latest }}
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s.version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
@@ -558,7 +550,7 @@ jobs:
|
||||
needs:
|
||||
- test-e2e
|
||||
- changes
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- run: |
|
||||
result="${{ needs.test-e2e.result }}"
|
||||
|
||||
6
.github/workflows/codeql.yml
vendored
6
.github/workflows/codeql.yml
vendored
@@ -26,14 +26,14 @@ jobs:
|
||||
if: github.repository == 'argoproj/argo-cd' || vars.enable_codeql
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
|
||||
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
|
||||
22
.github/workflows/image-reuse.yaml
vendored
22
.github/workflows/image-reuse.yaml
vendored
@@ -51,32 +51,32 @@ jobs:
|
||||
contents: read
|
||||
packages: write # Used to push images to `ghcr.io` if used.
|
||||
id-token: write # Needed to create an OIDC token for keyless signing
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
image-digest: ${{ steps.image.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.ref_type == 'tag'}}
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
cache: false
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
|
||||
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.quay_username }}
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
if: ${{ inputs.quay_image_name && inputs.push }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.ghcr_username }}
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
if: ${{ inputs.ghcr_image_name && inputs.push }}
|
||||
|
||||
- name: Login to dockerhub Container Registry
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.docker_username }}
|
||||
password: ${{ secrets.docker_password }}
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 #v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 #v6.18.0
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ inputs.platforms }}
|
||||
|
||||
64
.github/workflows/image.yaml
vendored
64
.github/workflows/image.yaml
vendored
@@ -19,49 +19,16 @@ jobs:
|
||||
set-vars:
|
||||
permissions:
|
||||
contents: read
|
||||
# Always run to calculate variables - other jobs check outputs
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
image-tag: ${{ steps.image.outputs.tag}}
|
||||
platforms: ${{ steps.platforms.outputs.platforms }}
|
||||
image_namespace: ${{ steps.image.outputs.image_namespace }}
|
||||
image_repository: ${{ steps.image.outputs.image_repository }}
|
||||
quay_image_name: ${{ steps.image.outputs.quay_image_name }}
|
||||
ghcr_image_name: ${{ steps.image.outputs.ghcr_image_name }}
|
||||
ghcr_provenance_image: ${{ steps.image.outputs.ghcr_provenance_image }}
|
||||
allow_ghcr_publish: ${{ steps.image.outputs.allow_ghcr_publish }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
|
||||
- name: Set image tag and names
|
||||
run: |
|
||||
# Calculate image tag
|
||||
TAG="$(cat ./VERSION)-${GITHUB_SHA::8}"
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
# Calculate image names with defaults
|
||||
IMAGE_NAMESPACE="${{ vars.IMAGE_NAMESPACE || 'argoproj' }}"
|
||||
IMAGE_REPOSITORY="${{ vars.IMAGE_REPOSITORY || 'argocd' }}"
|
||||
GHCR_NAMESPACE="${{ vars.GHCR_NAMESPACE || github.repository }}"
|
||||
GHCR_REPOSITORY="${{ vars.GHCR_REPOSITORY || 'argocd' }}"
|
||||
|
||||
echo "image_namespace=$IMAGE_NAMESPACE" >> $GITHUB_OUTPUT
|
||||
echo "image_repository=$IMAGE_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
|
||||
# Construct image name
|
||||
echo "quay_image_name=quay.io/$IMAGE_NAMESPACE/$IMAGE_REPOSITORY:latest" >> $GITHUB_OUTPUT
|
||||
|
||||
ALLOW_GHCR_PUBLISH=false
|
||||
if [[ "${{ github.repository }}" == "argoproj/argo-cd" || "$GHCR_NAMESPACE" != argoproj/* ]]; then
|
||||
ALLOW_GHCR_PUBLISH=true
|
||||
echo "ghcr_image_name=ghcr.io/$GHCR_NAMESPACE/$GHCR_REPOSITORY:$TAG" >> $GITHUB_OUTPUT
|
||||
echo "ghcr_provenance_image=ghcr.io/$GHCR_NAMESPACE/$GHCR_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "GhCR publish skipped: refusing to push to namespace '$GHCR_NAMESPACE'. Please override GHCR_* for forks." >&2
|
||||
echo "ghcr_image_name=" >> $GITHUB_OUTPUT
|
||||
echo "ghcr_provenance_image=" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
echo "allow_ghcr_publish=$ALLOW_GHCR_PUBLISH" >> $GITHUB_OUTPUT
|
||||
- name: Set image tag for ghcr
|
||||
run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
id: image
|
||||
|
||||
- name: Determine image platforms to use
|
||||
@@ -81,12 +48,12 @@ jobs:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ (github.repository == 'argoproj/argo-cd' || needs.set-vars.outputs.image_namespace != 'argoproj') && github.event_name != 'push' }}
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name != 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.26.0
|
||||
go-version: 1.25.5
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -96,14 +63,14 @@ jobs:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ (github.repository == 'argoproj/argo-cd' || needs.set-vars.outputs.image_namespace != 'argoproj') && github.event_name == 'push' }}
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: ${{ needs.set-vars.outputs.quay_image_name }}
|
||||
ghcr_image_name: ${{ needs.set-vars.outputs.ghcr_image_name }}
|
||||
quay_image_name: quay.io/argoproj/argocd:latest
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.26.0
|
||||
go-version: 1.25.5
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
@@ -114,17 +81,16 @@ jobs:
|
||||
|
||||
build-and-publish-provenance: # Push attestations to GHCR, latest image is polluting quay.io
|
||||
needs:
|
||||
- set-vars
|
||||
- build-and-publish
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ (github.repository == 'argoproj/argo-cd' || needs.set-vars.outputs.image_namespace != 'argoproj') && github.event_name == 'push' && needs.set-vars.outputs.allow_ghcr_publish == 'true'}}
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: ${{ needs.set-vars.outputs.ghcr_provenance_image }}
|
||||
image: ghcr.io/argoproj/argo-cd/argocd
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
registry-username: ${{ github.actor }}
|
||||
secrets:
|
||||
@@ -138,9 +104,9 @@ jobs:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
|
||||
12
.github/workflows/init-release.yaml
vendored
12
.github/workflows/init-release.yaml
vendored
@@ -20,16 +20,10 @@ jobs:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically generate version and manifests on ${{ inputs.TARGET_BRANCH }}
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# Calculate image names with defaults, this will be used in the make manifests-local command
|
||||
# to generate the correct image name in the manifests
|
||||
IMAGE_REGISTRY: ${{ vars.IMAGE_REGISTRY || 'quay.io' }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE || 'argoproj' }}
|
||||
IMAGE_REPOSITORY: ${{ vars.IMAGE_REPOSITORY || 'argocd' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -70,7 +64,7 @@ jobs:
|
||||
git stash pop
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
2
.github/workflows/pr-title-check.yml
vendored
2
.github/workflows/pr-title-check.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
name: Validate PR Title
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@7fbfe05602bdd86f926d3fb3bccb6f3aed43bc70 # v1.4.3
|
||||
with:
|
||||
|
||||
84
.github/workflows/release.yaml
vendored
84
.github/workflows/release.yaml
vendored
@@ -11,22 +11,21 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.26.0' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.25.5' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
needs: [setup-variables]
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # used to push images to `ghcr.io` if used.
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: ${{ needs.setup-variables.outputs.quay_image_name }}
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.26.0
|
||||
go-version: 1.25.5
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -35,20 +34,14 @@ jobs:
|
||||
|
||||
setup-variables:
|
||||
name: Setup Release Variables
|
||||
if: github.repository == 'argoproj/argo-cd' || (github.repository_owner != 'argoproj' && vars.ENABLE_FORK_RELEASES == 'true' && vars.IMAGE_NAMESPACE && vars.IMAGE_NAMESPACE != 'argoproj')
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
is_pre_release: ${{ steps.var.outputs.is_pre_release }}
|
||||
is_latest_release: ${{ steps.var.outputs.is_latest_release }}
|
||||
enable_fork_releases: ${{ steps.var.outputs.enable_fork_releases }}
|
||||
image_namespace: ${{ steps.var.outputs.image_namespace }}
|
||||
image_repository: ${{ steps.var.outputs.image_repository }}
|
||||
quay_image_name: ${{ steps.var.outputs.quay_image_name }}
|
||||
provenance_image: ${{ steps.var.outputs.provenance_image }}
|
||||
allow_fork_release: ${{ steps.var.outputs.allow_fork_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -74,36 +67,18 @@ jobs:
|
||||
fi
|
||||
echo "is_pre_release=$PRE_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "is_latest_release=$IS_LATEST" >> $GITHUB_OUTPUT
|
||||
|
||||
# Calculate configuration with defaults
|
||||
ENABLE_FORK_RELEASES="${{ vars.ENABLE_FORK_RELEASES || 'false' }}"
|
||||
IMAGE_NAMESPACE="${{ vars.IMAGE_NAMESPACE || 'argoproj' }}"
|
||||
IMAGE_REPOSITORY="${{ vars.IMAGE_REPOSITORY || 'argocd' }}"
|
||||
|
||||
echo "enable_fork_releases=$ENABLE_FORK_RELEASES" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "image_namespace=$IMAGE_NAMESPACE" >> $GITHUB_OUTPUT
|
||||
echo "image_repository=$IMAGE_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
echo "quay_image_name=quay.io/$IMAGE_NAMESPACE/$IMAGE_REPOSITORY:${{ github.ref_name }}" >> $GITHUB_OUTPUT
|
||||
echo "provenance_image=quay.io/$IMAGE_NAMESPACE/$IMAGE_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
|
||||
ALLOW_FORK_RELEASE=false
|
||||
if [[ "${{ github.repository_owner }}" != "argoproj" && "$ENABLE_FORK_RELEASES" == "true" && "$IMAGE_NAMESPACE" != "argoproj" && "${{ github.ref }}" == refs/tags/* ]]; then
|
||||
ALLOW_FORK_RELEASE=true
|
||||
fi
|
||||
echo "allow_fork_release=$ALLOW_FORK_RELEASE" >> $GITHUB_OUTPUT
|
||||
|
||||
argocd-image-provenance:
|
||||
needs: [setup-variables, argocd-image]
|
||||
needs: [argocd-image]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: ${{ needs.setup-variables.outputs.provenance_image }}
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
secrets:
|
||||
registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
@@ -116,15 +91,15 @@ jobs:
|
||||
- argocd-image-provenance
|
||||
permissions:
|
||||
contents: write # used for uploading assets
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GORELEASER_MAKE_LATEST: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
outputs:
|
||||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -133,7 +108,7 @@ jobs:
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -168,8 +143,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
|
||||
GIT_TREE_STATE: ${{ env.GIT_TREE_STATE }}
|
||||
# Used to determine the current repository in the goreleaser config to display correct manifest links
|
||||
GORELEASER_CURRENT_REPOSITORY: ${{ github.repository }}
|
||||
|
||||
- name: Generate subject for provenance
|
||||
id: hash
|
||||
@@ -186,12 +159,12 @@ jobs:
|
||||
echo "hashes=$hashes" >> $GITHUB_OUTPUT
|
||||
|
||||
goreleaser-provenance:
|
||||
needs: [goreleaser, setup-variables]
|
||||
needs: [goreleaser]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
@@ -204,22 +177,21 @@ jobs:
|
||||
needs:
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
- setup-variables
|
||||
permissions:
|
||||
contents: write # Needed for release uploads
|
||||
outputs:
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes }}
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -235,7 +207,7 @@ jobs:
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: '.,./ui'
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{ needs.setup-variables.outputs.quay_image_name }}
|
||||
DOCKER_IMAGE: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
run: |
|
||||
yarn install --cwd ./ui
|
||||
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
|
||||
@@ -264,7 +236,7 @@ jobs:
|
||||
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload SBOM
|
||||
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2.5.0
|
||||
uses: softprops/action-gh-release@6cbd405e2c4e67a21c47fa9e383d020e4e28b836 # v2.3.3
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
@@ -272,12 +244,12 @@ jobs:
|
||||
/tmp/sbom.tar.gz
|
||||
|
||||
sbom-provenance:
|
||||
needs: [generate-sbom, setup-variables]
|
||||
needs: [generate-sbom]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
@@ -294,13 +266,13 @@ jobs:
|
||||
permissions:
|
||||
contents: write # Needed to push commit to update stable tag
|
||||
pull-requests: write # Needed to create PR for VERSION update.
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
TAG_STABLE: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -344,7 +316,7 @@ jobs:
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
- name: Create PR to update VERSION on master branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
commit-message: Bump version in master
|
||||
title: 'chore: Bump version in master'
|
||||
|
||||
7
.github/workflows/renovate.yaml
vendored
7
.github/workflows/renovate.yaml
vendored
@@ -9,8 +9,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get token
|
||||
id: get_token
|
||||
@@ -20,10 +19,10 @@ jobs:
|
||||
private-key: ${{ secrets.RENOVATE_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # 6.0.2
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@d65ef9e20512193cc070238b49c3873a361cd50c #46.1.1
|
||||
uses: renovatebot/github-action@f8af9272cd94a4637c29f60dea8731afd3134473 #43.0.12
|
||||
with:
|
||||
configurationFile: .github/configs/renovate-config.js
|
||||
token: '${{ steps.get_token.outputs.token }}'
|
||||
|
||||
8
.github/workflows/scorecard.yaml
vendored
8
.github/workflows/scorecard.yaml
vendored
@@ -17,7 +17,7 @@ permissions: read-all
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecards analysis
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
@@ -30,12 +30,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
|
||||
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
||||
33
.github/workflows/stale.yaml
vendored
33
.github/workflows/stale.yaml
vendored
@@ -1,33 +0,0 @@
|
||||
name: "Label stale issues and PRs"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" #Runs midnight 12AM UTC
|
||||
|
||||
#Added Recommended permissions
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
stale-issue-message: >
|
||||
This issue has been marked as stale because it has had no activity for 90 days. Please comment if this is still relevant.
|
||||
|
||||
stale-pr-message: >
|
||||
This pull request has been marked as stale because it has had no activity for 90 days. Please comment if this is still relevant.
|
||||
|
||||
days-before-stale: 90
|
||||
days-before-close: -1 # Auto-close diabled
|
||||
|
||||
exempt-issue-labels: >
|
||||
bug, security, breaking/high, breaking/medium, breaking/low
|
||||
|
||||
# General configuration
|
||||
operations-per-run: 200
|
||||
remove-stale-when-updated: true #Remove stale label when issue/pr is updated
|
||||
4
.github/workflows/update-snyk.yaml
vendored
4
.github/workflows/update-snyk.yaml
vendored
@@ -14,10 +14,10 @@ jobs:
|
||||
pull-requests: write
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
name: Update Snyk report in the docs directory
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
|
||||
@@ -22,8 +22,6 @@ linters:
|
||||
- govet
|
||||
- importas
|
||||
- misspell
|
||||
- modernize
|
||||
- noctx
|
||||
- perfsprint
|
||||
- revive
|
||||
- staticcheck
|
||||
@@ -122,13 +120,6 @@ linters:
|
||||
- pkg: github.com/argoproj/argo-cd/v3/util/io
|
||||
alias: utilio
|
||||
|
||||
modernize:
|
||||
disable:
|
||||
# Suggest replacing omitempty with omitzero for struct fields.
|
||||
- omitzero
|
||||
# Simplify code by using go1.26's new(expr). - generates lots of false positives.
|
||||
- newexpr
|
||||
|
||||
nolintlint:
|
||||
require-specific: true
|
||||
|
||||
|
||||
@@ -66,14 +66,14 @@ release:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd --server-side --force-conflicts -f https://raw.githubusercontent.com/{{ .Env.GORELEASER_CURRENT_REPOSITORY }}/{{.Tag}}/manifests/install.yaml
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/install.yaml
|
||||
```
|
||||
|
||||
### HA:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd --server-side --force-conflicts -f https://raw.githubusercontent.com/{{ .Env.GORELEASER_CURRENT_REPOSITORY }}/{{.Tag}}/manifests/ha/install.yaml
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/ha/install.yaml
|
||||
```
|
||||
|
||||
## Release Signatures and Provenance
|
||||
@@ -87,7 +87,7 @@ release:
|
||||
|
||||
If upgrading from a different minor version, be sure to read the [upgrading](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) documentation.
|
||||
footer: |
|
||||
**Full Changelog**: https://github.com/{{ .Env.GORELEASER_CURRENT_REPOSITORY }}/compare/{{ .PreviousTag }}...{{ .Tag }}
|
||||
**Full Changelog**: https://github.com/argoproj/argo-cd/compare/{{ .PreviousTag }}...{{ .Tag }}
|
||||
|
||||
<a href="https://argoproj.github.io/cd/"><img src="https://raw.githubusercontent.com/argoproj/argo-site/master/content/pages/cd/gitops-cd.png" width="25%" ></a>
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
dir: '{{.InterfaceDir}}/mocks'
|
||||
structname: '{{.InterfaceName}}'
|
||||
filename: '{{.InterfaceName}}.go'
|
||||
include-auto-generated: true # Needed since mockery 3.6.1
|
||||
pkgname: mocks
|
||||
|
||||
template-data:
|
||||
unroll-variadic: true
|
||||
|
||||
packages:
|
||||
github.com/argoproj/argo-cd/v3/applicationset/generators:
|
||||
interfaces:
|
||||
@@ -9,10 +14,11 @@ packages:
|
||||
interfaces:
|
||||
Repos: {}
|
||||
github.com/argoproj/argo-cd/v3/applicationset/services/scm_provider:
|
||||
config:
|
||||
dir: applicationset/services/scm_provider/aws_codecommit/mocks
|
||||
interfaces:
|
||||
AWSCodeCommitClient: {}
|
||||
AWSTaggingClient: {}
|
||||
AzureDevOpsClientFactory: {}
|
||||
github.com/argoproj/argo-cd/v3/applicationset/utils:
|
||||
interfaces:
|
||||
Renderer: {}
|
||||
@@ -32,9 +38,6 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster:
|
||||
interfaces:
|
||||
ClusterServiceServer: {}
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/project:
|
||||
interfaces:
|
||||
ProjectServiceClient: {}
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/session:
|
||||
interfaces:
|
||||
SessionServiceClient: {}
|
||||
@@ -44,8 +47,8 @@ packages:
|
||||
AppProjectInterface: {}
|
||||
github.com/argoproj/argo-cd/v3/reposerver/apiclient:
|
||||
interfaces:
|
||||
RepoServerServiceClient: {}
|
||||
RepoServerService_GenerateManifestWithFilesClient: {}
|
||||
RepoServerServiceClient: {}
|
||||
github.com/argoproj/argo-cd/v3/server/application:
|
||||
interfaces:
|
||||
Broadcaster: {}
|
||||
@@ -60,37 +63,26 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/util/db:
|
||||
interfaces:
|
||||
ArgoDB: {}
|
||||
RepoCredsDB: {}
|
||||
github.com/argoproj/argo-cd/v3/util/git:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/helm:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/oci:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/io:
|
||||
interfaces:
|
||||
TempPaths: {}
|
||||
github.com/argoproj/argo-cd/v3/util/notification/argocd:
|
||||
interfaces:
|
||||
Service: {}
|
||||
github.com/argoproj/argo-cd/v3/util/oci:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/workloadidentity:
|
||||
interfaces:
|
||||
TokenProvider: {}
|
||||
github.com/argoproj/argo-cd/gitops-engine/pkg/cache:
|
||||
interfaces:
|
||||
ClusterCache: {}
|
||||
github.com/argoproj/argo-cd/gitops-engine/pkg/diff:
|
||||
interfaces:
|
||||
ServerSideDryRunner: {}
|
||||
github.com/microsoft/azure-devops-go-api/azuredevops/v7/git:
|
||||
config:
|
||||
dir: applicationset/services/scm_provider/azure_devops/git/mocks
|
||||
interfaces:
|
||||
Client: {}
|
||||
pkgname: mocks
|
||||
structname: '{{.InterfaceName}}'
|
||||
template-data:
|
||||
unroll-variadic: true
|
||||
|
||||
@@ -16,5 +16,4 @@
|
||||
# CLI
|
||||
/cmd/argocd/** @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
|
||||
/cmd/main.go @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
|
||||
# Also include @argoproj/argocd-approvers-docs to avoid requiring CLI approvers for docs-only PRs.
|
||||
/docs/operator-manual/ @argoproj/argocd-approvers @argoproj/argocd-approvers-docs @argoproj/argocd-approvers-cli
|
||||
/docs/operator-manual/ @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
|
||||
28
Dockerfile
28
Dockerfile
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.10@sha256:4a9232cc47bf99defcc8860ef6222c99773330367fcecbf21ba2edb0b810a31e
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36715270ead632cfcb74d08ca2273712a0dfb42
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.26.0@sha256:c83e68f3ebb6943a2904fa66348867d108119890a2c6a2e6f07b38d0eb6c25c5 AS builder
|
||||
FROM docker.io/library/golang:1.25.5@sha256:36b4f45d2874905b9e8573b783292629bcb346d0a70d8d7150b6df545234818f AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -16,6 +16,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
unzip \
|
||||
fcgiwrap \
|
||||
git \
|
||||
git-lfs \
|
||||
make \
|
||||
wget \
|
||||
gcc \
|
||||
@@ -28,8 +29,7 @@ COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
|
||||
RUN ./install.sh helm && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize && \
|
||||
./install.sh git-lfs
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
@@ -50,10 +50,10 @@ RUN groupadd -g $ARGOCD_USER_ID argocd && \
|
||||
chmod g=u /home/argocd && \
|
||||
apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install --no-install-recommends -y \
|
||||
git tini ca-certificates gpg gpg-agent tzdata connect-proxy openssh-client && \
|
||||
apt-get install -y \
|
||||
git git-lfs tini gpg tzdata connect-proxy && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/doc/*
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
COPY hack/gpg-wrapper.sh \
|
||||
hack/git-verify-wrapper.sh \
|
||||
@@ -61,7 +61,6 @@ COPY hack/gpg-wrapper.sh \
|
||||
/usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
|
||||
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
|
||||
COPY --from=builder /usr/local/bin/git-lfs /usr/local/bin/git-lfs
|
||||
|
||||
# keep uid_entrypoint.sh for backward compatibility
|
||||
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
|
||||
@@ -80,19 +79,13 @@ RUN mkdir -p tls && \
|
||||
|
||||
ENV USER=argocd
|
||||
|
||||
# Disable gRPC service config lookups via DNS TXT records to prevent excessive
|
||||
# DNS queries for _grpc_config.<hostname> which can cause timeouts in dual-stack
|
||||
# environments. This can be overridden via argocd-cmd-params-cm ConfigMap.
|
||||
# See https://github.com/argoproj/argo-cd/issues/24991
|
||||
ENV GRPC_ENABLE_TXT_SERVICE_CONFIG=false
|
||||
|
||||
USER $ARGOCD_USER_ID
|
||||
WORKDIR /home/argocd
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:9d09fa506f5b8465c5221cbd6f980e29ae0ce9a3119e2b9bc0842e6a3f37bb59 AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:e643c0b70dca9704dff42e12b17f5b719dbe4f95e6392fc2dfa0c5f02ea8044d AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -110,13 +103,10 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.26.0@sha256:c83e68f3ebb6943a2904fa66348867d108119890a2c6a2e6f07b38d0eb6c25c5 AS argocd-build
|
||||
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.5@sha256:36b4f45d2874905b9e8573b783292629bcb346d0a70d8d7150b6df545234818f AS argocd-build
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
COPY go.* ./
|
||||
RUN mkdir -p gitops-engine
|
||||
COPY gitops-engine/go.* ./gitops-engine
|
||||
RUN go mod download
|
||||
|
||||
# Perform the build
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.26.0@sha256:c83e68f3ebb6943a2904fa66348867d108119890a2c6a2e6f07b38d0eb6c25c5
|
||||
FROM docker.io/library/golang:1.25.5@sha256:36b4f45d2874905b9e8573b783292629bcb346d0a70d8d7150b6df545234818f
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@@ -11,6 +11,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
unzip \
|
||||
fcgiwrap \
|
||||
git \
|
||||
git-lfs \
|
||||
make \
|
||||
wget \
|
||||
gcc \
|
||||
@@ -27,8 +28,7 @@ COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
|
||||
RUN ./install.sh helm && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize && \
|
||||
./install.sh git-lfs
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
|
||||
COPY hack/gpg-wrapper.sh \
|
||||
hack/git-verify-wrapper.sh \
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
# Argo CD Maintainers
|
||||
|
||||
This document lists the maintainers of the Argo CD project.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Project Roles | Affiliation |
|
||||
|---------------------------|---------------------------------------------------------|----------------------|-------------------------------------------------|
|
||||
| Zach Aller | [zachaller](https://github.com/zachaller) | Reviewer | [Intuit](https://www.github.com/intuit/) |
|
||||
| Leonardo Luz Almeida | [leoluz](https://github.com/leoluz) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Chetan Banavikalmutt | [chetan-rns](https://github.com/chetan-rns) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Keith Chong | [keithchong](https://github.com/keithchong) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Alex Collins | [alexec](https://github.com/alexec) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Michael Crenshaw | [crenshaw-dev](https://github.com/crenshaw-dev) | Lead | [Intuit](https://www.github.com/intuit/) |
|
||||
| Soumya Ghosh Dastidar | [gdsoumya](https://github.com/gdsoumya) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Eugene Doudine | [dudinea](https://github.com/dudinea) | Reviewer | [Octopus Deploy](https://octopus.com/) |
|
||||
| Jann Fischer | [jannfis](https://github.com/jannfis) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Dan Garfield | [todaywasawesome](https://github.com/todaywasawesome) | Approver(docs) | [Octopus Deploy](https://octopus.com/) |
|
||||
| Alexandre Gaudreault | [agaudreault](https://github.com/agaudreault) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Christian Hernandez | [christianh814](https://github.com/christianh814) | Reviewer(docs) | [Akuity](https://akuity.io/) |
|
||||
| Peter Jiang | [pjiang-dev](https://github.com/pjiang-dev) | Approver(docs) | [Intuit](https://www.intuit.com/) |
|
||||
| Andrii Korotkov | [andrii-korotkov](https://github.com/andrii-korotkov) | Reviewer | [Verkada](https://www.verkada.com/) |
|
||||
| Pasha Kostohrys | [pasha-codefresh](https://github.com/pasha-codefresh) | Approver | [Codefresh](https://www.github.com/codefresh/) |
|
||||
| Nitish Kumar | [nitishfy](https://github.com/nitishfy) | Approver(cli,docs) | [Akuity](https://akuity.io/) |
|
||||
| Justin Marquis | [34fathombelow](https://github.com/34fathombelow) | Approver(docs/ci) | [Akuity](https://akuity.io/) |
|
||||
| Alexander Matyushentsev | [alexmt](https://github.com/alexmt) | Lead | [Akuity](https://akuity.io/) |
|
||||
| Nicholas Morey | [morey-tech](https://github.com/morey-tech) | Reviewer(docs) | [Akuity](https://akuity.io/) |
|
||||
| Papapetrou Patroklos | [ppapapetrou76](https://github.com/ppapapetrou76) | Approver(docs,cli) | [Octopus Deploy](https://octopus.com/) |
|
||||
| Blake Pettersson | [blakepettersson](https://github.com/blakepettersson) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Ishita Sequeira | [ishitasequeira](https://github.com/ishitasequeira) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Ashutosh Singh | [ashutosh16](https://github.com/ashutosh16) | Approver(docs) | [Intuit](https://www.github.com/intuit/) |
|
||||
| Linghao Su | [linghaoSu](https://github.com/linghaoSu) | Reviewer | [DaoCloud](https://daocloud.io) |
|
||||
| Jesse Suen | [jessesuen](https://github.com/jessesuen) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Yuan Tang | [terrytangyuan](https://github.com/terrytangyuan) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| William Tam | [wtam2018](https://github.com/wtam2018) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Ryan Umstead | [rumstead](https://github.com/rumstead) | Approver | [Black Rock](https://www.github.com/blackrock/) |
|
||||
| Regina Voloshin | [reggie-k](https://github.com/reggie-k) | Approver | [Octopus Deploy](https://octopus.com/) |
|
||||
| Hong Wang | [wanghong230](https://github.com/wanghong230) | Reviewer | [Akuity](https://akuity.io/) |
|
||||
| Jonathan West | [jgwest](https://github.com/jgwest) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Jaewoo Choi | [choejwoo](https://github.com/choejwoo) | Reviewer | [Hyundai-Autoever](https://www.hyundai-autoever.com/eng/) |
|
||||
| Alexy Mantha | [alexymantha](https://github.com/alexymantha) | Reviewer | GoTo |
|
||||
| Kanika Rana | [ranakan19](https://github.com/ranakan19) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Jonathan Winters | [jwinters01](https://github.com/jwinters01) | Reviewer | [Intuit](https://www.github.com/intuit/) |
|
||||
103
Makefile
103
Makefile
@@ -56,8 +56,8 @@ endif
|
||||
|
||||
ARGOCD_PROCFILE?=Procfile
|
||||
|
||||
# pointing to python 3.12 to match https://github.com/argoproj/argo-cd/blob/master/.readthedocs.yaml
|
||||
MKDOCS_DOCKER_IMAGE?=python:3.12-alpine
|
||||
# pointing to python 3.7 to match https://github.com/argoproj/argo-cd/blob/master/.readthedocs.yml
|
||||
MKDOCS_DOCKER_IMAGE?=python:3.7-alpine
|
||||
MKDOCS_RUN_ARGS?=
|
||||
|
||||
# Configuration for building argocd-test-tools image
|
||||
@@ -76,15 +76,15 @@ ARGOCD_E2E_REDIS_PORT?=6379
|
||||
ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
ARGOCD_E2E_DIR?=/tmp/argo-e2e
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
ARGOCD_E2E_RERUN_FAILS?=5
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
ARGOCD_BIN_MODE?=true
|
||||
|
||||
ARGOCD_LINT_GOGC?=20
|
||||
|
||||
# Depending on where we are (legacy or non-legacy pwd), we need to use
|
||||
# different Docker volume mounts for our source tree
|
||||
LEGACY_PATH=$(GOPATH)/src/github.com/argoproj/argo-cd
|
||||
@@ -144,6 +144,7 @@ define run-in-test-client
|
||||
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
|
||||
-e GITHUB_TOKEN \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
-e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \
|
||||
-v ${DOCKER_SRC_MOUNT} \
|
||||
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
@@ -197,40 +198,19 @@ endif
|
||||
|
||||
ifneq (${GIT_TAG},)
|
||||
IMAGE_TAG=${GIT_TAG}
|
||||
override LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
|
||||
LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
|
||||
else
|
||||
IMAGE_TAG?=latest
|
||||
endif
|
||||
|
||||
# defaults for building images and manifests
|
||||
ifeq (${DOCKER_PUSH},true)
|
||||
ifndef IMAGE_NAMESPACE
|
||||
$(error IMAGE_NAMESPACE must be set to push images (e.g. IMAGE_NAMESPACE=argoproj))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Consruct prefix for docker image
|
||||
# Note: keeping same logic as in hacks/update_manifests.sh
|
||||
ifdef IMAGE_REGISTRY
|
||||
ifdef IMAGE_NAMESPACE
|
||||
IMAGE_PREFIX=${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/
|
||||
else
|
||||
$(error IMAGE_NAMESPACE must be set when IMAGE_REGISTRY is set (e.g. IMAGE_NAMESPACE=argoproj))
|
||||
endif
|
||||
else
|
||||
ifdef IMAGE_NAMESPACE
|
||||
# for backwards compatibility with the old way like IMAGE_NAMESPACE='quay.io/argoproj'
|
||||
IMAGE_PREFIX=${IMAGE_NAMESPACE}/
|
||||
else
|
||||
# Neither namespace nor registry given - apply the default values
|
||||
IMAGE_REGISTRY="quay.io"
|
||||
IMAGE_NAMESPACE="argoproj"
|
||||
IMAGE_PREFIX=${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef IMAGE_REPOSITORY
|
||||
IMAGE_REPOSITORY=argocd
|
||||
endif
|
||||
|
||||
.PHONY: all
|
||||
@@ -281,12 +261,8 @@ clidocsgen:
|
||||
actionsdocsgen:
|
||||
hack/generate-actions-list.sh
|
||||
|
||||
.PHONY: resourceiconsgen
|
||||
resourceiconsgen:
|
||||
hack/generate-icons-typescript.sh
|
||||
|
||||
.PHONY: codegen-local
|
||||
codegen-local: mod-vendor-local mockgen gogen protogen clientgen openapigen clidocsgen actionsdocsgen resourceiconsgen manifests-local notification-docs notification-catalog
|
||||
codegen-local: mod-vendor-local mockgen gogen protogen clientgen openapigen clidocsgen actionsdocsgen manifests-local notification-docs notification-catalog
|
||||
rm -rf vendor/
|
||||
|
||||
.PHONY: codegen-local-fast
|
||||
@@ -328,11 +304,12 @@ endif
|
||||
.PHONY: manifests-local
|
||||
manifests-local:
|
||||
./hack/update-manifests.sh
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: test-tools-image
|
||||
$(call run-in-test-client,make manifests-local IMAGE_REGISTRY='${IMAGE_REGISTRY}' IMAGE_NAMESPACE='${IMAGE_NAMESPACE}' IMAGE_REPOSITORY='${IMAGE_REPOSITORY}' IMAGE_TAG='${IMAGE_TAG}')
|
||||
# consolidated binary for cli, util, server, repo-server, controller
|
||||
$(call run-in-test-client,make manifests-local IMAGE_NAMESPACE='${IMAGE_NAMESPACE}' IMAGE_TAG='${IMAGE_TAG}')
|
||||
|
||||
# consolidated binary for cli, util, server, repo-server, controller
|
||||
.PHONY: argocd-all
|
||||
argocd-all: clean-debug
|
||||
CGO_ENABLED=${CGO_FLAG} GOOS=${GOOS} GOARCH=${GOARCH} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
@@ -353,7 +330,7 @@ controller:
|
||||
build-ui:
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t argocd-ui --platform=$(TARGET_ARCH) --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
$(DOCKER) run -u $(CONTAINER_UID):$(CONTAINER_GID) -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
$(DOCKER) run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
|
||||
.PHONY: image
|
||||
ifeq ($(DEV_IMAGE), true)
|
||||
@@ -363,23 +340,23 @@ ifeq ($(DEV_IMAGE), true)
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t argocd-base --target argocd-base .
|
||||
GOOS=linux GOARCH=$(TARGET_ARCH:linux/%=%) GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
CGO_ENABLED=${CGO_FLAG} GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
|
||||
cp Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) ; fi
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
|
||||
.PHONY: armimage
|
||||
armimage:
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)(IMAGE_REPOSITORY):$(IMAGE_TAG)-arm .
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
|
||||
|
||||
.PHONY: builder-image
|
||||
builder-image:
|
||||
@@ -411,7 +388,9 @@ lint: test-tools-image
|
||||
.PHONY: lint-local
|
||||
lint-local:
|
||||
golangci-lint --version
|
||||
golangci-lint run --fix --verbose
|
||||
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
|
||||
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose
|
||||
|
||||
.PHONY: lint-ui
|
||||
lint-ui: test-tools-image
|
||||
@@ -443,24 +422,12 @@ test: test-tools-image
|
||||
|
||||
# Run all unit tests (local version)
|
||||
.PHONY: test-local
|
||||
test-local: test-gitops-engine
|
||||
# run if TEST_MODULE is empty or does not point to gitops-engine tests
|
||||
ifneq ($(if $(TEST_MODULE),,ALL)$(filter-out github.com/argoproj/argo-cd/gitops-engine% ./gitops-engine%,$(TEST_MODULE)),)
|
||||
test-local:
|
||||
if test "$(TEST_MODULE)" = ""; then \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -args -test.gocoverdir="$(PWD)/test-results"; \
|
||||
else \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -args -test.gocoverdir="$(PWD)/test-results" "$(TEST_MODULE)"; \
|
||||
fi
|
||||
endif
|
||||
|
||||
# Run gitops-engine unit tests
|
||||
.PHONY: test-gitops-engine
|
||||
test-gitops-engine:
|
||||
# run if TEST_MODULE is empty or points to gitops-engine tests
|
||||
ifneq ($(if $(TEST_MODULE),,ALL)$(filter github.com/argoproj/argo-cd/gitops-engine% ./gitops-engine%,$(TEST_MODULE)),)
|
||||
mkdir -p $(PWD)/test-results
|
||||
cd gitops-engine && go test -race -cover ./... -args -test.gocoverdir="$(PWD)/test-results"
|
||||
endif
|
||||
|
||||
.PHONY: test-race
|
||||
test-race: test-tools-image
|
||||
@@ -487,7 +454,7 @@ test-e2e:
|
||||
test-e2e-local: cli-local
|
||||
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
|
||||
export GO111MODULE=off
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=$(ARGOCD_E2E_RERUN_FAILS) PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
|
||||
# Spawns a shell in the test server container for debugging purposes
|
||||
debug-test-server: test-tools-image
|
||||
@@ -511,13 +478,13 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
kubectl create ns argocd-e2e-external || true
|
||||
kubectl create ns argocd-e2e-external-2 || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply --server-side --force-conflicts -f -
|
||||
kustomize build test/manifests/base | kubectl apply -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d $(ARGOCD_E2E_DIR)/app/config/gpg; then rm -rf $(ARGOCD_E2E_DIR)/app/config/gpg/*; fi
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/keys && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/keys
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/source && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/source
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/plugin && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/plugin
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
|
||||
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
|
||||
# create folders to hold go coverage results for each component
|
||||
mkdir -p /tmp/coverage/app-controller
|
||||
mkdir -p /tmp/coverage/api-server
|
||||
@@ -526,15 +493,13 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
mkdir -p /tmp/coverage/notification
|
||||
mkdir -p /tmp/coverage/commit-server
|
||||
# set paths for locally managed ssh known hosts and tls certs data
|
||||
ARGOCD_E2E_DIR=$(ARGOCD_E2E_DIR) \
|
||||
ARGOCD_SSH_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=$(ARGOCD_E2E_DIR)/app/config/gpg/keys \
|
||||
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_GIT_CONFIG=$(PWD)/test/e2e/fixture/gitconfig \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
@@ -611,7 +576,7 @@ build-docs-local:
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs build'
|
||||
|
||||
.PHONY: serve-docs-local
|
||||
serve-docs-local:
|
||||
@@ -619,7 +584,7 @@ serve-docs-local:
|
||||
|
||||
.PHONY: serve-docs
|
||||
serve-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
.PHONY: verify-kube-connect
|
||||
|
||||
6
Procfile
6
Procfile
@@ -2,13 +2,13 @@ controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v3/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: hack/start-redis-with-password.sh
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "export PATH=./dist:\$PATH && [ -n \"\$ARGOCD_GIT_CONFIG\" ] && export GIT_CONFIG_GLOBAL=\$ARGOCD_GIT_CONFIG && export GIT_CONFIG_NOSYSTEM=1; GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
commit-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/commit-server} FORCE_LOG_COLORS=1 ARGOCD_BINARY_NAME=argocd-commit-server $COMMAND --loglevel debug --port ${ARGOCD_E2E_COMMITSERVER_PORT:-8086}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
oci-registry: test/fixture/testrepos/start-authenticated-helm-registry.sh
|
||||
dev-mounter: [ "$ARGOCD_E2E_TEST" != "true" ] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/applicationset-controller} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
@@ -13,7 +13,6 @@
|
||||
[](https://twitter.com/argoproj)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://www.linkedin.com/company/argoproj/)
|
||||
[](https://bsky.app/profile/argoproj.bsky.social)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ header:
|
||||
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
|
||||
last-updated: '2023-10-27'
|
||||
last-reviewed: '2023-10-27'
|
||||
commit-hash: 814db444c36503851dc3d45cf9c44394821ca1a4
|
||||
commit-hash: 320f46f06beaf75f9c406e3a47e2e09d36e2047a
|
||||
project-url: https://github.com/argoproj/argo-cd
|
||||
project-release: v3.4.0
|
||||
project-release: v3.2.0
|
||||
changelog: https://github.com/argoproj/argo-cd/releases
|
||||
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
|
||||
project-lifecycle:
|
||||
|
||||
25
Tiltfile
25
Tiltfile
@@ -60,7 +60,7 @@ k8s_yaml(kustomize('manifests/dev-tilt'))
|
||||
|
||||
# build dev image
|
||||
docker_build_with_restart(
|
||||
'quay.io/argoproj/argocd:latest',
|
||||
'argocd',
|
||||
context='.',
|
||||
dockerfile='Dockerfile.tilt',
|
||||
entrypoint=[
|
||||
@@ -123,7 +123,6 @@ k8s_resource(
|
||||
'9345:2345',
|
||||
'8083:8083'
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track crds
|
||||
@@ -149,7 +148,6 @@ k8s_resource(
|
||||
'9346:2345',
|
||||
'8084:8084'
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-redis resources and port forward
|
||||
@@ -164,7 +162,6 @@ k8s_resource(
|
||||
port_forwards=[
|
||||
'6379:6379',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-applicationset-controller resources
|
||||
@@ -183,7 +180,6 @@ k8s_resource(
|
||||
'8085:8080',
|
||||
'7000:7000'
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-application-controller resources
|
||||
@@ -201,7 +197,6 @@ k8s_resource(
|
||||
'9348:2345',
|
||||
'8086:8082',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-notifications-controller resources
|
||||
@@ -219,7 +214,6 @@ k8s_resource(
|
||||
'9349:2345',
|
||||
'8087:9001',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-dex-server resources
|
||||
@@ -231,7 +225,6 @@ k8s_resource(
|
||||
'argocd-dex-server:role',
|
||||
'argocd-dex-server:rolebinding',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-commit-server resources
|
||||
@@ -246,19 +239,6 @@ k8s_resource(
|
||||
'8088:8087',
|
||||
'8089:8086',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# ui dependencies
|
||||
local_resource(
|
||||
'node-modules',
|
||||
'yarn',
|
||||
dir='ui',
|
||||
deps = [
|
||||
'ui/package.json',
|
||||
'ui/yarn.lock',
|
||||
],
|
||||
allow_parallel=True,
|
||||
)
|
||||
|
||||
# docker for ui
|
||||
@@ -280,7 +260,6 @@ k8s_resource(
|
||||
port_forwards=[
|
||||
'4000:4000',
|
||||
],
|
||||
resource_deps=['node-modules'],
|
||||
)
|
||||
|
||||
# linting
|
||||
@@ -299,7 +278,6 @@ local_resource(
|
||||
'ui',
|
||||
],
|
||||
allow_parallel=True,
|
||||
resource_deps=['node-modules'],
|
||||
)
|
||||
|
||||
local_resource(
|
||||
@@ -309,6 +287,5 @@ local_resource(
|
||||
'go.mod',
|
||||
'go.sum',
|
||||
],
|
||||
allow_parallel=True,
|
||||
)
|
||||
|
||||
|
||||
16
USERS.md
16
USERS.md
@@ -31,7 +31,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arcadia](https://www.arcadia.io)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [Artemis Health by Nomi Health](https://www.artemishealth.com/)
|
||||
1. [Arturia](https://www.arturia.com)
|
||||
@@ -64,7 +63,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Candis](https://www.candis.io)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [Capptain LTD](https://capptain.co/)
|
||||
1. [CARFAX Europe](https://www.carfax.eu)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [Carrefour Group](https://www.carrefour.com)
|
||||
@@ -87,7 +85,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Cognizant](https://www.cognizant.com/)
|
||||
1. [Collins Aerospace](https://www.collinsaerospace.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [Compatio.AI](https://compatio.ai/)
|
||||
1. [Contlo](https://contlo.com/)
|
||||
@@ -101,7 +98,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Datarisk](https://www.datarisk.io/)
|
||||
1. [Daydream](https://daydream.ing)
|
||||
1. [Deloitte](https://www.deloitte.com/)
|
||||
1. [Dematic](https://www.dematic.com)
|
||||
1. [Deutsche Telekom AG](https://telekom.com)
|
||||
1. [Deutsche Bank AG](https://www.deutsche-bank.de/)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
@@ -110,7 +106,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [DigitalOcean](https://www.digitalocean.com)
|
||||
1. [Divar](https://divar.ir)
|
||||
1. [Divistant](https://divistant.com)
|
||||
2. [DocNetwork](https://docnetwork.org/)
|
||||
1. [Dott](https://ridedott.com)
|
||||
1. [Doubble](https://www.doubble.app)
|
||||
1. [Doximity](https://www.doximity.com/)
|
||||
@@ -125,7 +120,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [enigmo](https://enigmo.co.jp/)
|
||||
1. [Envoy](https://envoy.com/)
|
||||
1. [eSave](https://esave.es/)
|
||||
1. [Expedia](https://www.expedia.com)
|
||||
1. [Factorial](https://factorialhr.com/)
|
||||
1. [Farfetch](https://www.farfetch.com)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
@@ -178,7 +172,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [IFS](https://www.ifs.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [IllumiDesk](https://www.illumidesk.com)
|
||||
1. [Imagine Learning](https://www.imaginelearning.com/)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Indeed](https://indeed.com)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
@@ -187,7 +180,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Instruqt](https://www.instruqt.com)
|
||||
1. [Intel](https://www.intel.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [IQVIA](https://www.iqvia.com/)
|
||||
1. [Jellysmack](https://www.jellysmack.com)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
1. [JovianX](https://www.jovianx.com/)
|
||||
@@ -209,7 +201,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Kurly](https://www.kurly.com/)
|
||||
1. [Kvist](https://kvistsolutions.com)
|
||||
1. [Kyriba](https://www.kyriba.com/)
|
||||
1. [Lattice](https://lattice.com)
|
||||
1. [LeFigaro](https://www.lefigaro.fr/)
|
||||
1. [Lely](https://www.lely.com/)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
@@ -240,14 +231,12 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [mixi Group](https://mixi.co.jp/)
|
||||
1. [Moengage](https://www.moengage.com/)
|
||||
1. [Money Forward](https://corp.moneyforward.com/en/)
|
||||
1. [MongoDB](https://www.mongodb.com/)
|
||||
1. [MOO Print](https://www.moo.com/)
|
||||
1. [Mozilla](https://www.mozilla.org)
|
||||
1. [MTN Group](https://www.mtn.com/)
|
||||
1. [Municipality of The Hague](https://www.denhaag.nl/)
|
||||
1. [My Job Glasses](https://myjobglasses.com)
|
||||
1. [Natura &Co](https://naturaeco.com/)
|
||||
1. [Netease Cloud Music](https://music.163.com/)
|
||||
1. [Nethopper](https://nethopper.io)
|
||||
1. [New Relic](https://newrelic.com/)
|
||||
1. [Nextbasket](https://nextbasket.com)
|
||||
@@ -320,8 +309,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Relex Solutions](https://www.relexsolutions.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rijkswaterstaat](https://www.rijkswaterstaat.nl/en)
|
||||
1. Rise
|
||||
1. [RISK IDENT](https://riskident.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Robotinfra](https://www.robotinfra.com)
|
||||
1. [Rocket.Chat](https://rocket.chat)
|
||||
@@ -331,7 +319,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Salad Technologies](https://salad.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [SAP Signavio](https://www.signavio.com)
|
||||
1. [Sauce Labs](https://saucelabs.com/)
|
||||
1. [Schneider Electric](https://www.se.com)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
@@ -389,7 +376,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
1. [Tiger Analytics](https://www.tigeranalytics.com/)
|
||||
1. [Tigera](https://www.tigera.io/)
|
||||
1. [Topicus.Education](https://topicus.nl/en/sectors/education)
|
||||
1. [Toss](https://toss.im/en)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [tru.ID](https://tru.id)
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -38,7 +37,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -48,8 +46,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/health"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/controllers/template"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/metrics"
|
||||
@@ -58,7 +54,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
applog "github.com/argoproj/argo-cd/v3/util/app/log"
|
||||
"github.com/argoproj/argo-cd/v3/util/db"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
argoutil "github.com/argoproj/argo-cd/v3/util/argo"
|
||||
@@ -77,11 +72,6 @@ const (
|
||||
AllAtOnceDeletionOrder = "AllAtOnce"
|
||||
)
|
||||
|
||||
var defaultPreservedFinalizers = []string{
|
||||
argov1alpha1.PreDeleteFinalizerName,
|
||||
argov1alpha1.PostDeleteFinalizerName,
|
||||
}
|
||||
|
||||
var defaultPreservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
@@ -112,7 +102,6 @@ type ApplicationSetReconciler struct {
|
||||
GlobalPreservedLabels []string
|
||||
Metrics *metrics.ApplicationsetMetrics
|
||||
MaxResourcesStatusCount int
|
||||
ClusterInformer *settings.ClusterInformer
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -185,16 +174,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// ensure finalizer exists if deletionOrder is set as Reverse
|
||||
if r.EnableProgressiveSyncs && isProgressiveSyncDeletionOrderReversed(&applicationSetInfo) {
|
||||
if !controllerutil.ContainsFinalizer(&applicationSetInfo, argov1alpha1.ResourcesFinalizerName) {
|
||||
controllerutil.AddFinalizer(&applicationSetInfo, argov1alpha1.ResourcesFinalizerName)
|
||||
if err := r.Update(ctx, &applicationSetInfo); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log a warning if there are unrecognized generators
|
||||
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
@@ -244,6 +223,13 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
}
|
||||
|
||||
err = r.updateResourcesStatus(ctx, logCtx, &applicationSetInfo, currentApplications)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get update resources status for application set: %w", err)
|
||||
}
|
||||
|
||||
// appMap is a name->app collection of Applications in this ApplicationSet.
|
||||
appMap := map[string]argov1alpha1.Application{}
|
||||
// appSyncMap tracks which apps will be synced during this reconciliation.
|
||||
appSyncMap := map[string]bool{}
|
||||
|
||||
@@ -257,7 +243,12 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
} else if isRollingSyncStrategy(&applicationSetInfo) {
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications)
|
||||
// The appset uses progressive sync with `RollingSync` strategy
|
||||
for _, app := range currentApplications {
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
@@ -274,6 +265,13 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range generatedApplications {
|
||||
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
|
||||
validApps = append(validApps, generatedApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(validateErrors) > 0 {
|
||||
errorApps := make([]string, 0, len(validateErrors))
|
||||
for key := range validateErrors {
|
||||
@@ -301,25 +299,13 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
)
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range generatedApplications {
|
||||
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
|
||||
validApps = append(validApps, generatedApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(&applicationSetInfo) {
|
||||
validApps = r.syncDesiredApplications(logCtx, &applicationSetInfo, appSyncMap, validApps)
|
||||
validApps = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort apps by name so they are updated/created in the same order, and condition errors are the same
|
||||
sort.Slice(validApps, func(i, j int) bool {
|
||||
return validApps[i].Name < validApps[j].Name
|
||||
})
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() {
|
||||
err = r.createOrUpdateInCluster(ctx, logCtx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
@@ -351,7 +337,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
|
||||
// Delete the generatedApplications instead of the validApps because we want to be able to delete applications in error/invalid state
|
||||
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, generatedApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -367,16 +352,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
}
|
||||
|
||||
// Update resources status after create/update/delete so it reflects the actual cluster state.
|
||||
currentApplications, err = r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
}
|
||||
err = r.updateResourcesStatus(ctx, logCtx, &applicationSetInfo, currentApplications)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to update resources status for application set: %w", err)
|
||||
}
|
||||
|
||||
if applicationSetInfo.RefreshRequired() {
|
||||
delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh)
|
||||
err := r.Update(ctx, &applicationSetInfo)
|
||||
@@ -756,19 +731,21 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve deleting finalizers and avoid diff conflicts
|
||||
for _, finalizer := range defaultPreservedFinalizers {
|
||||
for _, f := range found.Finalizers {
|
||||
// For finalizers, use prefix matching in case it contains "/" stages
|
||||
if strings.HasPrefix(f, finalizer) {
|
||||
generatedApp.Finalizers = append(generatedApp.Finalizers, f)
|
||||
// Preserve post-delete finalizers:
|
||||
// https://github.com/argoproj/argo-cd/issues/17181
|
||||
for _, finalizer := range found.Finalizers {
|
||||
if strings.HasPrefix(finalizer, argov1alpha1.PostDeleteFinalizerName) {
|
||||
if generatedApp.Finalizers == nil {
|
||||
generatedApp.Finalizers = []string{}
|
||||
}
|
||||
generatedApp.Finalizers = append(generatedApp.Finalizers, finalizer)
|
||||
}
|
||||
}
|
||||
|
||||
found.Annotations = generatedApp.Annotations
|
||||
found.Labels = generatedApp.Labels
|
||||
|
||||
found.Finalizers = generatedApp.Finalizers
|
||||
found.Labels = generatedApp.Labels
|
||||
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
@@ -833,7 +810,7 @@ func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, a
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
clusterList, err := utils.ListClusters(r.ClusterInformer)
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
@@ -899,14 +876,16 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
// Detect if the destination's server field does not match an existing cluster
|
||||
matchingCluster := false
|
||||
for _, cluster := range clusterList {
|
||||
// A cluster matches if either the server matches OR the name matches
|
||||
// This handles cases where:
|
||||
// 1. The cluster is the in-cluster (server=https://kubernetes.default.svc, name=in-cluster)
|
||||
// 2. A custom cluster has the same server as in-cluster but a different name
|
||||
if destCluster.Server == cluster.Server || (destCluster.Name != "" && cluster.Name != "" && destCluster.Name == cluster.Name) {
|
||||
matchingCluster = true
|
||||
break
|
||||
if destCluster.Server != cluster.Server {
|
||||
continue
|
||||
}
|
||||
|
||||
if destCluster.Name != cluster.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
matchingCluster = true
|
||||
break
|
||||
}
|
||||
|
||||
if !matchingCluster {
|
||||
@@ -965,7 +944,7 @@ func (r *ApplicationSetReconciler) removeOwnerReferencesOnDeleteAppSet(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application) (map[string]bool, error) {
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
appDependencyList, appStepMap := r.buildAppDependencyList(logCtx, appset, desiredApplications)
|
||||
|
||||
_, err := r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
|
||||
@@ -974,21 +953,21 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
|
||||
}
|
||||
|
||||
logCtx.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
for stepIndex, applicationNames := range appDependencyList {
|
||||
logCtx.Infof("step %v: %+v", stepIndex+1, applicationNames)
|
||||
for i, step := range appDependencyList {
|
||||
logCtx.Infof("step %v: %+v", i+1, step)
|
||||
}
|
||||
|
||||
appsToSync := r.getAppsToSync(appset, appDependencyList, applications)
|
||||
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appsToSync)
|
||||
appSyncMap := r.buildAppSyncMap(appset, appDependencyList, appMap)
|
||||
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appsToSync, appStepMap)
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
|
||||
}
|
||||
|
||||
_ = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
|
||||
|
||||
return appsToSync, nil
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
// this list tracks which Applications belong to each RollingUpdate step
|
||||
@@ -1052,61 +1031,65 @@ func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov
|
||||
// if operator == NotIn, default to true
|
||||
valueMatched := matchExpression.Operator == "NotIn"
|
||||
|
||||
if slices.Contains(matchExpression.Values, val) {
|
||||
// first "In" match returns true
|
||||
// first "NotIn" match returns false
|
||||
return matchExpression.Operator == "In"
|
||||
for _, value := range matchExpression.Values {
|
||||
if val == value {
|
||||
// first "In" match returns true
|
||||
// first "NotIn" match returns false
|
||||
return matchExpression.Operator == "In"
|
||||
}
|
||||
}
|
||||
return valueMatched
|
||||
}
|
||||
|
||||
// getAppsToSync returns a Map of Applications that should be synced in this progressive sync wave
|
||||
func (r *ApplicationSetReconciler) getAppsToSync(applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, currentApplications []argov1alpha1.Application) map[string]bool {
|
||||
// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop
|
||||
func (r *ApplicationSetReconciler) buildAppSyncMap(applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) map[string]bool {
|
||||
appSyncMap := map[string]bool{}
|
||||
currentAppsMap := map[string]bool{}
|
||||
syncEnabled := true
|
||||
|
||||
for _, app := range currentApplications {
|
||||
currentAppsMap[app.Name] = true
|
||||
}
|
||||
// healthy stages and the first non-healthy stage should have sync enabled
|
||||
// every stage after should have sync disabled
|
||||
|
||||
for stepIndex := range appDependencyList {
|
||||
for i := range appDependencyList {
|
||||
// set the syncEnabled boolean for every Application in the current step
|
||||
for _, appName := range appDependencyList[stepIndex] {
|
||||
appSyncMap[appName] = true
|
||||
for _, appName := range appDependencyList[i] {
|
||||
appSyncMap[appName] = syncEnabled
|
||||
}
|
||||
|
||||
// evaluate if we need to sync next waves
|
||||
syncNextWave := true
|
||||
for _, appName := range appDependencyList[stepIndex] {
|
||||
// Check if application is created and managed by this AppSet, if it is not created yet, we cannot progress
|
||||
if _, ok := currentAppsMap[appName]; !ok {
|
||||
syncNextWave = false
|
||||
break
|
||||
}
|
||||
|
||||
// detect if we need to halt before progressing to the next step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appName)
|
||||
if idx == -1 {
|
||||
// No Application status found, likely because the Application is being newly created
|
||||
// This mean this wave is not yet completed
|
||||
syncNextWave = false
|
||||
// no Application status found, likely because the Application is being newly created
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
|
||||
appStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
if appStatus.Status != argov1alpha1.ProgressiveSyncHealthy {
|
||||
// At least one application in this wave is not yet healthy. We cannot proceed to the next wave
|
||||
syncNextWave = false
|
||||
app, ok := appMap[appName]
|
||||
if !ok {
|
||||
// application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
|
||||
if !syncEnabled {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !syncNextWave {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return appSyncMap
|
||||
}
|
||||
|
||||
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(appset) {
|
||||
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
|
||||
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isRollingSyncStrategy(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// It's only RollingSync if the type specifically sets it
|
||||
return appset.Spec.Strategy != nil && appset.Spec.Strategy.Type == "RollingSync" && appset.Spec.Strategy.RollingSync != nil
|
||||
@@ -1117,21 +1100,29 @@ func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.Application
|
||||
return isRollingSyncStrategy(appset) && len(appset.Spec.Strategy.RollingSync.Steps) > 0
|
||||
}
|
||||
|
||||
func isApplicationWithError(app argov1alpha1.Application) bool {
|
||||
for _, condition := range app.Status.Conditions {
|
||||
if condition.Type == argov1alpha1.ApplicationConditionInvalidSpecError {
|
||||
return true
|
||||
}
|
||||
if condition.Type == argov1alpha1.ApplicationConditionUnknownError {
|
||||
return true
|
||||
}
|
||||
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
|
||||
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
if healthStatusString == "Healthy" && syncStatusString != "OutOfSync" && (operationPhaseString == "Succeeded" || operationPhaseString == "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
|
||||
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
|
||||
func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
healthStatusString := string(app.Status.Health.Status)
|
||||
syncStatusString := string(app.Status.Sync.Status)
|
||||
operationPhaseString := ""
|
||||
if app.Status.OperationState != nil {
|
||||
operationPhaseString = string(app.Status.OperationState.Phase)
|
||||
}
|
||||
|
||||
return healthStatusString, syncStatusString, operationPhaseString
|
||||
}
|
||||
|
||||
func getAppStep(appName string, appStepMap map[string]int) int {
|
||||
@@ -1150,112 +1141,81 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
|
||||
|
||||
for _, app := range applications {
|
||||
appHealthStatus := app.Status.Health.Status
|
||||
appSyncStatus := app.Status.Sync.Status
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
|
||||
currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
|
||||
if idx == -1 {
|
||||
// AppStatus not found, set default status of "Waiting"
|
||||
currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{
|
||||
Application: app.Name,
|
||||
TargetRevisions: app.Status.GetRevisions(),
|
||||
LastTransitionTime: &now,
|
||||
Message: "No Application status found, defaulting status to Waiting",
|
||||
Status: argov1alpha1.ProgressiveSyncWaiting,
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
Step: strconv.Itoa(getAppStep(app.Name, appStepMap)),
|
||||
}
|
||||
} else {
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
|
||||
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
}
|
||||
}
|
||||
|
||||
statusLogCtx := logCtx.WithFields(log.Fields{
|
||||
"app.name": currentAppStatus.Application,
|
||||
"app.health": appHealthStatus,
|
||||
"app.sync": appSyncStatus,
|
||||
"status.status": currentAppStatus.Status,
|
||||
"status.message": currentAppStatus.Message,
|
||||
"status.step": currentAppStatus.Step,
|
||||
"status.targetRevisions": strings.Join(currentAppStatus.TargetRevisions, ","),
|
||||
})
|
||||
|
||||
newAppStatus := currentAppStatus.DeepCopy()
|
||||
newAppStatus.Step = strconv.Itoa(getAppStep(newAppStatus.Application, appStepMap))
|
||||
|
||||
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
|
||||
// A new version is available in the application and we need to re-sync the application
|
||||
newAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
newAppStatus.Message = "Application has pending changes, setting status to Waiting"
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncWaiting
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
|
||||
if newAppStatus.Status == argov1alpha1.ProgressiveSyncWaiting {
|
||||
// App has changed to waiting because the TargetRevisions changed or it is a new selected app
|
||||
// This does not mean we should always sync the app. The app may not be OutOfSync
|
||||
// and may not require a sync if it does not have differences.
|
||||
if appSyncStatus == argov1alpha1.SyncStatusCodeSynced {
|
||||
if app.Status.Health.Status == health.HealthStatusHealthy {
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncHealthy
|
||||
newAppStatus.Message = "Application resource has synced, updating status to Healthy"
|
||||
} else {
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
|
||||
newAppStatus.Message = "Application resource has synced, updating status to Progressing"
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The target revision is the same, so we need to evaluate the current revision progress
|
||||
if currentAppStatus.Status == argov1alpha1.ProgressiveSyncPending {
|
||||
// No need to evaluate status health further if the application did not change since our last transition
|
||||
if app.Status.ReconciledAt == nil || (newAppStatus.LastTransitionTime != nil && app.Status.ReconciledAt.After(newAppStatus.LastTransitionTime.Time)) {
|
||||
// Validate that at least one sync was trigerred after the pending transition time
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
|
||||
statusLogCtx = statusLogCtx.WithField("app.operation", app.Status.OperationState.Phase)
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
|
||||
appOutdated := false
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
appOutdated = syncStatusString == "OutOfSync"
|
||||
}
|
||||
|
||||
switch {
|
||||
case app.Status.OperationState.Phase.Successful():
|
||||
newAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing"
|
||||
case app.Status.OperationState.Phase.Completed():
|
||||
newAppStatus.Message = "Application resource completed a sync, updating status from Pending to Progressing"
|
||||
default:
|
||||
// If a sync fails or has errors, the Application should be configured with retry. It is not the appset's job to retry failed syncs
|
||||
newAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing"
|
||||
}
|
||||
} else if isApplicationWithError(app) {
|
||||
// Validate if the application has errors preventing it to be reconciled and perform syncs
|
||||
// If it does, we move it to progressing.
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
|
||||
newAppStatus.Message = "Application resource has error and cannot sync, updating status to Progressing"
|
||||
}
|
||||
}
|
||||
}
|
||||
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
|
||||
logCtx.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == argov1alpha1.ProgressiveSyncProgressing {
|
||||
// If the status has reached progressing, we know a sync has been triggered. No matter the result of that operation,
|
||||
// we want an the app to reach the Healthy state for the current revision.
|
||||
if appHealthStatus == health.HealthStatusHealthy && appSyncStatus == argov1alpha1.SyncStatusCodeSynced {
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncHealthy
|
||||
newAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy"
|
||||
}
|
||||
if currentAppStatus.Status == "Pending" {
|
||||
if !appOutdated && operationPhaseString == "Succeeded" {
|
||||
logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
}
|
||||
|
||||
if newAppStatus.LastTransitionTime == &now {
|
||||
statusLogCtx.WithFields(log.Fields{
|
||||
"new_status.status": newAppStatus.Status,
|
||||
"new_status.message": newAppStatus.Message,
|
||||
"new_status.step": newAppStatus.Step,
|
||||
"new_status.targetRevisions": strings.Join(newAppStatus.TargetRevisions, ","),
|
||||
}).Info("Progressive sync application changed status")
|
||||
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
|
||||
logCtx.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
appStatuses = append(appStatuses, *newAppStatus)
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
logCtx.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
|
||||
@@ -1267,7 +1227,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
}
|
||||
|
||||
// check Applications that are in Waiting status and promote them to Pending if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appsToSync map[string]bool, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
|
||||
@@ -1283,20 +1243,12 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
totalCountMap[appStepMap[appStatus.Application]]++
|
||||
|
||||
if appStatus.Status == argov1alpha1.ProgressiveSyncPending || appStatus.Status == argov1alpha1.ProgressiveSyncProgressing {
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
updateCountMap[appStepMap[appStatus.Application]]++
|
||||
}
|
||||
}
|
||||
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
statusLogCtx := logCtx.WithFields(log.Fields{
|
||||
"app.name": appStatus.Application,
|
||||
"status.status": appStatus.Status,
|
||||
"status.message": appStatus.Message,
|
||||
"status.step": appStatus.Step,
|
||||
"status.targetRevisions": strings.Join(appStatus.TargetRevisions, ","),
|
||||
})
|
||||
|
||||
maxUpdateAllowed := true
|
||||
maxUpdate := &intstr.IntOrString{}
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
@@ -1307,7 +1259,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
if maxUpdate != nil {
|
||||
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
|
||||
if err != nil {
|
||||
statusLogCtx.Warnf("AppSet has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", maxUpdate, err)
|
||||
logCtx.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
|
||||
}
|
||||
|
||||
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
|
||||
@@ -1317,21 +1269,16 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
|
||||
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
|
||||
maxUpdateAllowed = false
|
||||
statusLogCtx.Infof("Application is not allowed to update yet, %v/%v Applications already updating in step %v", updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap))
|
||||
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap), applicationSet.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if appStatus.Status == argov1alpha1.ProgressiveSyncWaiting && appsToSync[appStatus.Application] && maxUpdateAllowed {
|
||||
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
|
||||
logCtx.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = argov1alpha1.ProgressiveSyncPending
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing"
|
||||
|
||||
statusLogCtx.WithFields(log.Fields{
|
||||
"new_status.status": appStatus.Status,
|
||||
"new_status.message": appStatus.Message,
|
||||
"new_status.step": appStatus.Step,
|
||||
"new_status.targetRevisions": strings.Join(appStatus.TargetRevisions, ","),
|
||||
}).Info("Progressive sync application changed status")
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
appStatus.Step = strconv.Itoa(getAppStep(appStatus.Application, appStepMap))
|
||||
|
||||
updateCountMap[appStepMap[appStatus.Application]]++
|
||||
}
|
||||
@@ -1356,9 +1303,9 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditio
|
||||
completedWaves := map[string]bool{}
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
if v, ok := completedWaves[appStatus.Step]; !ok {
|
||||
completedWaves[appStatus.Step] = appStatus.Status == argov1alpha1.ProgressiveSyncHealthy
|
||||
completedWaves[appStatus.Step] = appStatus.Status == "Healthy"
|
||||
} else {
|
||||
completedWaves[appStatus.Step] = v && appStatus.Status == argov1alpha1.ProgressiveSyncHealthy
|
||||
completedWaves[appStatus.Step] = v && appStatus.Status == "Healthy"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1580,31 +1527,30 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) syncDesiredApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appsToSync map[string]bool, desiredApplications []argov1alpha1.Application) []argov1alpha1.Application {
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) []argov1alpha1.Application {
|
||||
rolloutApps := []argov1alpha1.Application{}
|
||||
for i := range desiredApplications {
|
||||
for i := range validApps {
|
||||
pruneEnabled := false
|
||||
|
||||
// ensure that Applications generated with RollingSync do not have an automated sync policy, since the AppSet controller will handle triggering the sync operation instead
|
||||
if desiredApplications[i].Spec.SyncPolicy != nil && desiredApplications[i].Spec.SyncPolicy.IsAutomatedSyncEnabled() {
|
||||
pruneEnabled = desiredApplications[i].Spec.SyncPolicy.Automated.Prune
|
||||
desiredApplications[i].Spec.SyncPolicy.Automated.Enabled = ptr.To(false)
|
||||
if validApps[i].Spec.SyncPolicy != nil && validApps[i].Spec.SyncPolicy.IsAutomatedSyncEnabled() {
|
||||
pruneEnabled = validApps[i].Spec.SyncPolicy.Automated.Prune
|
||||
validApps[i].Spec.SyncPolicy.Automated = nil
|
||||
}
|
||||
|
||||
appSetStatusPending := false
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, desiredApplications[i].Name)
|
||||
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == argov1alpha1.ProgressiveSyncPending {
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, validApps[i].Name)
|
||||
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == "Pending" {
|
||||
// only trigger a sync for Applications that are in Pending status, since this is governed by maxUpdate
|
||||
appSetStatusPending = true
|
||||
}
|
||||
|
||||
// check appsToSync to determine which Applications are ready to be updated and which should be skipped
|
||||
if appsToSync[desiredApplications[i].Name] && appSetStatusPending {
|
||||
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", desiredApplications[i].Name, pruneEnabled)
|
||||
desiredApplications[i] = syncApplication(desiredApplications[i], pruneEnabled)
|
||||
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
|
||||
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
|
||||
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
validApps[i] = syncApplication(validApps[i], pruneEnabled)
|
||||
}
|
||||
|
||||
rolloutApps = append(rolloutApps, desiredApplications[i])
|
||||
rolloutApps = append(rolloutApps, validApps[i])
|
||||
}
|
||||
return rolloutApps
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,7 +19,6 @@ import (
|
||||
appsetmetrics "github.com/argoproj/argo-cd/v3/applicationset/metrics"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/mocks"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
@@ -58,17 +57,12 @@ func TestRequeueAfter(t *testing.T) {
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
scmConfig := generators.NewSCMConfig("", []string{""}, true, true, nil, true)
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, "argocd"),
|
||||
"Clusters": generators.NewClusterGenerator(ctx, k8sClient, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer, "namespace"),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), scmConfig),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd", clusterInformer),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, scmConfig),
|
||||
}
|
||||
|
||||
|
||||
@@ -86,28 +86,28 @@ func TestGenerateApplications(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
generatorMock := &genmock.Generator{}
|
||||
generatorMock := genmock.Generator{}
|
||||
generator := v1alpha1.ApplicationSetGenerator{
|
||||
List: &v1alpha1.ListGenerator{},
|
||||
}
|
||||
|
||||
generatorMock.EXPECT().GenerateParams(&generator, mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).
|
||||
generatorMock.On("GenerateParams", &generator, mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).
|
||||
Return(cc.params, cc.generateParamsError)
|
||||
|
||||
generatorMock.EXPECT().GetTemplate(&generator).
|
||||
generatorMock.On("GetTemplate", &generator).
|
||||
Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
|
||||
rendererMock := &rendmock.Renderer{}
|
||||
rendererMock := rendmock.Renderer{}
|
||||
|
||||
var expectedApps []v1alpha1.Application
|
||||
|
||||
if cc.generateParamsError == nil {
|
||||
for _, p := range cc.params {
|
||||
if cc.rendererError != nil {
|
||||
rendererMock.EXPECT().RenderTemplateParams(GetTempApplication(cc.template), mock.AnythingOfType("*v1alpha1.ApplicationSetSyncPolicy"), p, false, []string(nil)).
|
||||
rendererMock.On("RenderTemplateParams", GetTempApplication(cc.template), mock.AnythingOfType("*v1alpha1.ApplicationSetSyncPolicy"), p, false, []string(nil)).
|
||||
Return(nil, cc.rendererError)
|
||||
} else {
|
||||
rendererMock.EXPECT().RenderTemplateParams(GetTempApplication(cc.template), mock.AnythingOfType("*v1alpha1.ApplicationSetSyncPolicy"), p, false, []string(nil)).
|
||||
rendererMock.On("RenderTemplateParams", GetTempApplication(cc.template), mock.AnythingOfType("*v1alpha1.ApplicationSetSyncPolicy"), p, false, []string(nil)).
|
||||
Return(&app, nil)
|
||||
expectedApps = append(expectedApps, app)
|
||||
}
|
||||
@@ -115,9 +115,9 @@ func TestGenerateApplications(t *testing.T) {
|
||||
}
|
||||
|
||||
generators := map[string]generators.Generator{
|
||||
"List": generatorMock,
|
||||
"List": &generatorMock,
|
||||
}
|
||||
renderer := rendererMock
|
||||
renderer := &rendererMock
|
||||
|
||||
got, reason, err := GenerateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -200,26 +200,26 @@ func TestMergeTemplateApplications(t *testing.T) {
|
||||
cc := c
|
||||
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
generatorMock := &genmock.Generator{}
|
||||
generatorMock := genmock.Generator{}
|
||||
generator := v1alpha1.ApplicationSetGenerator{
|
||||
List: &v1alpha1.ListGenerator{},
|
||||
}
|
||||
|
||||
generatorMock.EXPECT().GenerateParams(&generator, mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).
|
||||
generatorMock.On("GenerateParams", &generator, mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).
|
||||
Return(cc.params, nil)
|
||||
|
||||
generatorMock.EXPECT().GetTemplate(&generator).
|
||||
generatorMock.On("GetTemplate", &generator).
|
||||
Return(&cc.overrideTemplate)
|
||||
|
||||
rendererMock := &rendmock.Renderer{}
|
||||
rendererMock := rendmock.Renderer{}
|
||||
|
||||
rendererMock.EXPECT().RenderTemplateParams(GetTempApplication(cc.expectedMerged), mock.AnythingOfType("*v1alpha1.ApplicationSetSyncPolicy"), cc.params[0], false, []string(nil)).
|
||||
rendererMock.On("RenderTemplateParams", GetTempApplication(cc.expectedMerged), mock.AnythingOfType("*v1alpha1.ApplicationSetSyncPolicy"), cc.params[0], false, []string(nil)).
|
||||
Return(&cc.expectedApps[0], nil)
|
||||
|
||||
generators := map[string]generators.Generator{
|
||||
"List": generatorMock,
|
||||
"List": &generatorMock,
|
||||
}
|
||||
renderer := rendererMock
|
||||
renderer := &rendererMock
|
||||
|
||||
got, _, _ := GenerateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -312,19 +312,19 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cases.name, func(t *testing.T) {
|
||||
generatorMock := &genmock.Generator{}
|
||||
generatorMock := genmock.Generator{}
|
||||
generator := v1alpha1.ApplicationSetGenerator{
|
||||
PullRequest: &v1alpha1.PullRequestGenerator{},
|
||||
}
|
||||
|
||||
generatorMock.EXPECT().GenerateParams(&generator, mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).
|
||||
generatorMock.On("GenerateParams", &generator, mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).
|
||||
Return(cases.params, nil)
|
||||
|
||||
generatorMock.EXPECT().GetTemplate(&generator).
|
||||
Return(&cases.template)
|
||||
generatorMock.On("GetTemplate", &generator).
|
||||
Return(&cases.template, nil)
|
||||
|
||||
generators := map[string]generators.Generator{
|
||||
"PullRequest": generatorMock,
|
||||
"PullRequest": &generatorMock,
|
||||
}
|
||||
renderer := &utils.Render{}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
@@ -21,15 +22,19 @@ var _ Generator = (*ClusterGenerator)(nil)
|
||||
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type ClusterGenerator struct {
|
||||
client.Client
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
// namespace is the Argo CD namespace
|
||||
namespace string
|
||||
}
|
||||
|
||||
var render = &utils.Render{}
|
||||
|
||||
func NewClusterGenerator(c client.Client, namespace string) Generator {
|
||||
func NewClusterGenerator(ctx context.Context, c client.Client, clientset kubernetes.Interface, namespace string) Generator {
|
||||
g := &ClusterGenerator{
|
||||
Client: c,
|
||||
ctx: ctx,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
}
|
||||
return g
|
||||
@@ -59,7 +64,16 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
// - Since local clusters do not have secrets, they do not have labels to match against
|
||||
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
|
||||
|
||||
// Get cluster secrets using the cached controller-runtime client
|
||||
// ListCluster will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
clusterSecrets, err := g.getSecretsByClusterName(logCtx, appSetGenerator)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting cluster secrets: %w", err)
|
||||
@@ -68,14 +82,32 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
paramHolder := ¶mHolder{isFlatMode: appSetGenerator.Clusters.FlatList}
|
||||
logCtx.Debugf("Using flat mode = %t for cluster generator", paramHolder.isFlatMode)
|
||||
|
||||
// Convert map values to slice to check for an in-cluster secret
|
||||
secretsList := make([]corev1.Secret, 0, len(clusterSecrets))
|
||||
for _, secret := range clusterSecrets {
|
||||
secretsList = append(secretsList, secret)
|
||||
secretsFound := []corev1.Secret{}
|
||||
for _, cluster := range clustersFromArgoCD {
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
|
||||
secretsFound = append(secretsFound, secretForCluster)
|
||||
} else if !ignoreLocalClusters {
|
||||
// If there is no secret for the cluster, it's the local cluster, so handle it here.
|
||||
params := map[string]any{}
|
||||
params["name"] = cluster.Name
|
||||
params["nameNormalized"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
params["project"] = ""
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range clusterSecrets {
|
||||
for _, cluster := range secretsFound {
|
||||
params := g.getClusterParameters(cluster, appSet)
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
@@ -87,23 +119,6 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
logCtx.WithField("cluster", cluster.Name).Debug("matched cluster secret")
|
||||
}
|
||||
|
||||
// Add the in-cluster last if it doesn't have a secret, and we're not ignoring in-cluster
|
||||
if !ignoreLocalClusters && !utils.SecretsContainInClusterCredentials(secretsList) {
|
||||
params := map[string]any{}
|
||||
params["name"] = argoappsetv1alpha1.KubernetesInClusterName
|
||||
params["nameNormalized"] = argoappsetv1alpha1.KubernetesInClusterName
|
||||
params["server"] = argoappsetv1alpha1.KubernetesInternalAPIServerAddr
|
||||
params["project"] = ""
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
|
||||
return paramHolder.consolidate(), nil
|
||||
}
|
||||
|
||||
@@ -171,7 +186,7 @@ func (g *ClusterGenerator) getSecretsByClusterName(log *log.Entry, appSetGenerat
|
||||
return nil, fmt.Errorf("error converting label selector: %w", err)
|
||||
}
|
||||
|
||||
if err := g.List(context.Background(), clusterSecretList, client.InNamespace(g.namespace), client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
if err := g.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("clusters matching labels: %d", len(clusterSecretList.Items))
|
||||
|
||||
@@ -7,9 +7,12 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
|
||||
@@ -296,15 +299,23 @@ func TestGenerateParams(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -325,25 +336,12 @@ func TestGenerateParams(t *testing.T) {
|
||||
require.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assertEqualParamsFlat(t, testCase.expected, got, testCase.isFlatMode)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualParamsFlat(t *testing.T, expected, got []map[string]any, isFlatMode bool) {
|
||||
t.Helper()
|
||||
if isFlatMode && len(expected) == 1 && len(got) == 1 {
|
||||
expectedClusters, ok1 := expected[0]["clusters"].([]map[string]any)
|
||||
gotClusters, ok2 := got[0]["clusters"].([]map[string]any)
|
||||
if ok1 && ok2 {
|
||||
assert.ElementsMatch(t, expectedClusters, gotClusters)
|
||||
return
|
||||
}
|
||||
}
|
||||
assert.ElementsMatch(t, expected, got)
|
||||
}
|
||||
|
||||
func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
@@ -839,15 +837,23 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -870,7 +876,7 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
require.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assertEqualParamsFlat(t, testCase.expected, got, testCase.isFlatMode)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,27 +19,24 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
var _ Generator = (*DuckTypeGenerator)(nil)
|
||||
|
||||
// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type DuckTypeGenerator struct {
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
clusterInformer *settings.ClusterInformer
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
}
|
||||
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string, clusterInformer *settings.ClusterInformer) Generator {
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator {
|
||||
g := &DuckTypeGenerator{
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
clusterInformer: clusterInformer,
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
}
|
||||
return g
|
||||
}
|
||||
@@ -68,7 +65,8 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
|
||||
return nil, ErrEmptyAppSetGenerator
|
||||
}
|
||||
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.clusterInformer)
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
@@ -11,13 +11,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -292,14 +290,9 @@ func TestGenerateParamsForDuckType(t *testing.T) {
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer test.StartInformer(clusterInformer)()
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace", clusterInformer)
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -593,14 +586,9 @@ func TestGenerateParamsForDuckTypeGoTemplate(t *testing.T) {
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer test.StartInformer(clusterInformer)()
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace", clusterInformer)
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -15,6 +16,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
@@ -332,14 +335,20 @@ func getMockClusterGenerator() Generator {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
return NewClusterGenerator(fakeClient, "namespace")
|
||||
return NewClusterGenerator(context.Background(), fakeClient, appClientset, "namespace")
|
||||
}
|
||||
|
||||
func getMockGitGenerator() Generator {
|
||||
argoCDServiceMock := &mocks.Repos{}
|
||||
argoCDServiceMock.EXPECT().GetDirectories(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "namespace")
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "namespace")
|
||||
return gitGenerator
|
||||
}
|
||||
|
||||
@@ -542,7 +551,7 @@ func TestInterpolateGeneratorError(t *testing.T) {
|
||||
},
|
||||
useGoTemplate: true,
|
||||
goTemplateOptions: []string{},
|
||||
}, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "failed to replace parameters in generator: failed to execute go template {{ index .rmap (default .override .test) }}: template: base:1:3: executing \"base\" at <index .rmap (default .override .test)>: error calling index: index of untyped nil"},
|
||||
}, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "failed to replace parameters in generator: failed to execute go template {{ index .rmap (default .override .test) }}: template: :1:3: executing \"\" at <index .rmap (default .override .test)>: error calling index: index of untyped nil"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -3,7 +3,6 @@ package generators
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -169,7 +168,9 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maps.Copy(fileContentMap, retrievedFiles)
|
||||
for absPath, content := range retrievedFiles {
|
||||
fileContentMap[absPath] = content
|
||||
}
|
||||
}
|
||||
|
||||
// Now remove files matching any exclude pattern
|
||||
@@ -241,7 +242,9 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
params := map[string]any{}
|
||||
|
||||
if useGoTemplate {
|
||||
maps.Copy(params, objectFound)
|
||||
for k, v := range objectFound {
|
||||
params[k] = v
|
||||
}
|
||||
|
||||
paramPath := map[string]any{}
|
||||
|
||||
@@ -313,7 +316,7 @@ func (g *GitGenerator) filterApps(directories []argoprojiov1alpha1.GitDirectoryG
|
||||
appExclude = true
|
||||
}
|
||||
}
|
||||
// Whenever there is a path with exclude: true it won't be included, even if it is included in a different path pattern
|
||||
// Whenever there is a path with exclude: true it wont be included, even if it is included in a different path pattern
|
||||
if appInclude && !appExclude {
|
||||
res = append(res, appPath)
|
||||
}
|
||||
|
||||
@@ -320,11 +320,11 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
argoCDServiceMock.EXPECT().GetDirectories(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -357,6 +357,8 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -621,11 +623,11 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
argoCDServiceMock.EXPECT().GetDirectories(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -658,6 +660,8 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -996,11 +1000,11 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -1032,6 +1036,8 @@ cluster:
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1325,7 +1331,7 @@ env: testing
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
// IMPORTANT: we try to get the files from the repo server that matches the patterns
|
||||
// If we find those files also satisfy the exclude pattern, we remove them from map
|
||||
@@ -1333,16 +1339,18 @@ env: testing
|
||||
// With the below mock setup, we make sure that if the GetFiles() function gets called
|
||||
// for a include or exclude pattern, it should always return the includeFiles or excludeFiles.
|
||||
for _, pattern := range testCaseCopy.excludePattern {
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.
|
||||
On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.excludeFiles, testCaseCopy.repoPathsError)
|
||||
}
|
||||
|
||||
for _, pattern := range testCaseCopy.includePattern {
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.
|
||||
On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.includeFiles, testCaseCopy.repoPathsError)
|
||||
}
|
||||
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -1374,6 +1382,8 @@ env: testing
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1662,7 +1672,7 @@ env: testing
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
// IMPORTANT: we try to get the files from the repo server that matches the patterns
|
||||
// If we find those files also satisfy the exclude pattern, we remove them from map
|
||||
@@ -1670,16 +1680,18 @@ env: testing
|
||||
// With the below mock setup, we make sure that if the GetFiles() function gets called
|
||||
// for a include or exclude pattern, it should always return the includeFiles or excludeFiles.
|
||||
for _, pattern := range testCaseCopy.excludePattern {
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.
|
||||
On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.excludeFiles, testCaseCopy.repoPathsError)
|
||||
}
|
||||
|
||||
for _, pattern := range testCaseCopy.includePattern {
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.
|
||||
On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.includeFiles, testCaseCopy.repoPathsError)
|
||||
}
|
||||
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -1711,6 +1723,8 @@ env: testing
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1894,23 +1908,25 @@ func TestGitGeneratorParamsFromFilesWithExcludeOptionGoTemplate(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
// IMPORTANT: we try to get the files from the repo server that matches the patterns
|
||||
// If we find those files also satisfy the exclude pattern, we remove them from map
|
||||
// This is generally done by the g.repos.GetFiles() function.
|
||||
// With the below mock setup, we make sure that if the GetFiles() function gets called
|
||||
// for a include or exclude pattern, it should always return the includeFiles or excludeFiles.
|
||||
for _, pattern := range testCaseCopy.excludePattern {
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.
|
||||
On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.excludeFiles, testCaseCopy.repoPathsError)
|
||||
}
|
||||
|
||||
for _, pattern := range testCaseCopy.includePattern {
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.
|
||||
On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, pattern, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.includeFiles, testCaseCopy.repoPathsError)
|
||||
}
|
||||
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -1942,6 +1958,8 @@ func TestGitGeneratorParamsFromFilesWithExcludeOptionGoTemplate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2261,11 +2279,11 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -2297,6 +2315,8 @@ cluster:
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2462,7 +2482,7 @@ func TestGitGenerator_GenerateParams(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
argoCDServiceMock := mocks.NewRepos(t)
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
if testCase.callGetDirectories {
|
||||
var project any
|
||||
@@ -2472,9 +2492,9 @@ func TestGitGenerator_GenerateParams(t *testing.T) {
|
||||
project = mock.Anything
|
||||
}
|
||||
|
||||
argoCDServiceMock.EXPECT().GetDirectories(mock.Anything, mock.Anything, mock.Anything, project, mock.Anything, mock.Anything).Return(testCase.repoApps, testCase.repoPathsError)
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, project, mock.Anything, mock.Anything).Return(testCase.repoApps, testCase.repoPathsError)
|
||||
}
|
||||
gitGenerator := NewGitGenerator(argoCDServiceMock, "argocd")
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "argocd")
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
@@ -2490,5 +2510,7 @@ func TestGitGenerator_GenerateParams(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testCase.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,15 +8,16 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/mocks"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
generatorsMock "github.com/argoproj/argo-cd/v3/applicationset/generators/mocks"
|
||||
servicesMocks "github.com/argoproj/argo-cd/v3/applicationset/services/mocks"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -135,7 +136,7 @@ func TestMatrixGenerate(t *testing.T) {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorsMock.Generator{}
|
||||
genMock := &generatorMock{}
|
||||
appSet := &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -148,7 +149,7 @@ func TestMatrixGenerate(t *testing.T) {
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
genMock.EXPECT().GenerateParams(mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet, mock.Anything).Return([]map[string]any{
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet, mock.Anything).Return([]map[string]any{
|
||||
{
|
||||
"path": "app1",
|
||||
"path.basename": "app1",
|
||||
@@ -161,7 +162,7 @@ func TestMatrixGenerate(t *testing.T) {
|
||||
},
|
||||
}, nil)
|
||||
|
||||
genMock.EXPECT().GetTemplate(&gitGeneratorSpec).
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
|
||||
@@ -342,7 +343,7 @@ func TestMatrixGenerateGoTemplate(t *testing.T) {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorsMock.Generator{}
|
||||
genMock := &generatorMock{}
|
||||
appSet := &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -357,7 +358,7 @@ func TestMatrixGenerateGoTemplate(t *testing.T) {
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
genMock.EXPECT().GenerateParams(mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet, mock.Anything).Return([]map[string]any{
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet, mock.Anything).Return([]map[string]any{
|
||||
{
|
||||
"path": map[string]string{
|
||||
"path": "app1",
|
||||
@@ -374,7 +375,7 @@ func TestMatrixGenerateGoTemplate(t *testing.T) {
|
||||
},
|
||||
}, nil)
|
||||
|
||||
genMock.EXPECT().GetTemplate(&gitGeneratorSpec).
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
|
||||
@@ -506,7 +507,7 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
mock := &generatorsMock.Generator{}
|
||||
mock := &generatorMock{}
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := v1alpha1.ApplicationSetGenerator{
|
||||
@@ -516,7 +517,7 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
SCMProvider: g.SCMProvider,
|
||||
ClusterDecisionResource: g.ClusterDecisionResource,
|
||||
}
|
||||
mock.EXPECT().GetRequeueAfter(&gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter)
|
||||
mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil)
|
||||
}
|
||||
|
||||
matrixGenerator := NewMatrixGenerator(
|
||||
@@ -623,27 +624,33 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorsMock.Generator{}
|
||||
genMock := &generatorMock{}
|
||||
appSet := &v1alpha1.ApplicationSet{}
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := v1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
Clusters: g.Clusters,
|
||||
}
|
||||
genMock.EXPECT().GenerateParams(mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet, mock.Anything).Return([]map[string]any{
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]any{
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/cluster-config/dev/config.json",
|
||||
"path.basename": "dev",
|
||||
@@ -655,7 +662,7 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
|
||||
"path.basenameNormalized": "prod",
|
||||
},
|
||||
}, nil)
|
||||
genMock.EXPECT().GetTemplate(&gitGeneratorSpec).
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
matrixGenerator := NewMatrixGenerator(
|
||||
@@ -796,31 +803,37 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorsMock.Generator{}
|
||||
genMock := &generatorMock{}
|
||||
appSet := &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
},
|
||||
}
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := v1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
Clusters: g.Clusters,
|
||||
}
|
||||
genMock.EXPECT().GenerateParams(mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet, mock.Anything).Return([]map[string]any{
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]any{
|
||||
{
|
||||
"path": map[string]string{
|
||||
"path": "examples/git-generator-files-discovery/cluster-config/dev/config.json",
|
||||
@@ -836,7 +849,7 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
genMock.EXPECT().GetTemplate(&gitGeneratorSpec).
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
matrixGenerator := NewMatrixGenerator(
|
||||
@@ -956,7 +969,7 @@ func TestMatrixGenerateListElementsYaml(t *testing.T) {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorsMock.Generator{}
|
||||
genMock := &generatorMock{}
|
||||
appSet := &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -971,7 +984,7 @@ func TestMatrixGenerateListElementsYaml(t *testing.T) {
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
genMock.EXPECT().GenerateParams(mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet, mock.Anything).Return([]map[string]any{{
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]any{{
|
||||
"foo": map[string]any{
|
||||
"bar": []any{
|
||||
map[string]any{
|
||||
@@ -996,7 +1009,7 @@ func TestMatrixGenerateListElementsYaml(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}}, nil)
|
||||
genMock.EXPECT().GetTemplate(&gitGeneratorSpec).
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
|
||||
@@ -1024,6 +1037,28 @@ func TestMatrixGenerateListElementsYaml(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type generatorMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (g *generatorMock) GetTemplate(appSetGenerator *v1alpha1.ApplicationSetGenerator) *v1alpha1.ApplicationSetTemplate {
|
||||
args := g.Called(appSetGenerator)
|
||||
|
||||
return args.Get(0).(*v1alpha1.ApplicationSetTemplate)
|
||||
}
|
||||
|
||||
func (g *generatorMock) GenerateParams(appSetGenerator *v1alpha1.ApplicationSetGenerator, appSet *v1alpha1.ApplicationSet, _ client.Client) ([]map[string]any, error) {
|
||||
args := g.Called(appSetGenerator, appSet)
|
||||
|
||||
return args.Get(0).([]map[string]any), args.Error(1)
|
||||
}
|
||||
|
||||
func (g *generatorMock) GetRequeueAfter(appSetGenerator *v1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
args := g.Called(appSetGenerator)
|
||||
|
||||
return args.Get(0).(time.Duration)
|
||||
}
|
||||
|
||||
func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
// Given a matrix generator over a list generator and a git files generator, the nested git files generator should
|
||||
// be treated as a files generator, and it should produce parameters.
|
||||
@@ -1037,11 +1072,11 @@ func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
// Now instead of checking for nil, we check whether the field is a non-empty slice. This test prevents a regression
|
||||
// of that bug.
|
||||
|
||||
listGeneratorMock := &generatorsMock.Generator{}
|
||||
listGeneratorMock.EXPECT().GenerateParams(mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).Return([]map[string]any{
|
||||
listGeneratorMock := &generatorMock{}
|
||||
listGeneratorMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), mock.AnythingOfType("*v1alpha1.ApplicationSet"), mock.Anything).Return([]map[string]any{
|
||||
{"some": "value"},
|
||||
}, nil)
|
||||
listGeneratorMock.EXPECT().GetTemplate(mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator")).Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
listGeneratorMock.On("GetTemplate", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator")).Return(&v1alpha1.ApplicationSetTemplate{})
|
||||
|
||||
gitGeneratorSpec := &v1alpha1.GitGenerator{
|
||||
RepoURL: "https://git.example.com",
|
||||
@@ -1050,10 +1085,10 @@ func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
repoServiceMock := &servicesMocks.Repos{}
|
||||
repoServiceMock.EXPECT().GetFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
repoServiceMock := &mocks.Repos{}
|
||||
repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
"some/path.json": []byte("test: content"),
|
||||
}, nil).Maybe()
|
||||
}, nil)
|
||||
gitGenerator := NewGitGenerator(repoServiceMock, "")
|
||||
|
||||
matrixGenerator := NewMatrixGenerator(map[string]Generator{
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -116,7 +115,9 @@ func (g *PluginGenerator) generateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
params := map[string]any{}
|
||||
|
||||
if useGoTemplate {
|
||||
maps.Copy(params, objectFound)
|
||||
for k, v := range objectFound {
|
||||
params[k] = v
|
||||
}
|
||||
} else {
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
|
||||
@@ -96,12 +96,18 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
var shortSHALength int
|
||||
var shortSHALength7 int
|
||||
for _, pull := range pulls {
|
||||
shortSHALength = min(len(pull.HeadSHA), 8)
|
||||
shortSHALength = 8
|
||||
if len(pull.HeadSHA) < 8 {
|
||||
shortSHALength = len(pull.HeadSHA)
|
||||
}
|
||||
|
||||
shortSHALength7 = min(len(pull.HeadSHA), 7)
|
||||
shortSHALength7 = 7
|
||||
if len(pull.HeadSHA) < 7 {
|
||||
shortSHALength7 = len(pull.HeadSHA)
|
||||
}
|
||||
|
||||
paramMap := map[string]any{
|
||||
"number": strconv.FormatInt(pull.Number, 10),
|
||||
"number": strconv.Itoa(pull.Number),
|
||||
"title": pull.Title,
|
||||
"branch": pull.Branch,
|
||||
"branch_slug": slug.Make(pull.Branch),
|
||||
@@ -237,9 +243,9 @@ func (g *PullRequestGenerator) github(ctx context.Context, cfg *argoprojiov1alph
|
||||
}
|
||||
|
||||
if g.enableGitHubAPIMetrics {
|
||||
return pullrequest.NewGithubAppService(ctx, *auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels, httpClient)
|
||||
return pullrequest.NewGithubAppService(*auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels, httpClient)
|
||||
}
|
||||
return pullrequest.NewGithubAppService(ctx, *auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels)
|
||||
return pullrequest.NewGithubAppService(*auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels)
|
||||
}
|
||||
|
||||
// always default to token, even if not set (public access)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -106,8 +105,10 @@ func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, g
|
||||
return nil
|
||||
}
|
||||
|
||||
if slices.Contains(allowedScmProviders, url) {
|
||||
return nil
|
||||
for _, allowedScmProvider := range allowedScmProviders {
|
||||
if url == allowedScmProvider {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
@@ -243,9 +244,15 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
var shortSHALength int
|
||||
var shortSHALength7 int
|
||||
for _, repo := range repos {
|
||||
shortSHALength = min(len(repo.SHA), 8)
|
||||
shortSHALength = 8
|
||||
if len(repo.SHA) < 8 {
|
||||
shortSHALength = len(repo.SHA)
|
||||
}
|
||||
|
||||
shortSHALength7 = min(len(repo.SHA), 7)
|
||||
shortSHALength7 = 7
|
||||
if len(repo.SHA) < 7 {
|
||||
shortSHALength7 = len(repo.SHA)
|
||||
}
|
||||
|
||||
params := map[string]any{
|
||||
"organization": repo.Organization,
|
||||
@@ -289,9 +296,9 @@ func (g *SCMProviderGenerator) githubProvider(ctx context.Context, github *argop
|
||||
}
|
||||
|
||||
if g.enableGitHubAPIMetrics {
|
||||
return scm_provider.NewGithubAppProviderFor(ctx, *auth, github.Organization, github.API, github.AllBranches, httpClient)
|
||||
return scm_provider.NewGithubAppProviderFor(*auth, github.Organization, github.API, github.AllBranches, httpClient)
|
||||
}
|
||||
return scm_provider.NewGithubAppProviderFor(ctx, *auth, github.Organization, github.API, github.AllBranches)
|
||||
return scm_provider.NewGithubAppProviderFor(*auth, github.Organization, github.API, github.AllBranches)
|
||||
}
|
||||
|
||||
token, err := utils.GetSecretRef(ctx, g.client, github.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
|
||||
@@ -8,16 +8,15 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig, clusterInformer *settings.ClusterInformer) map[string]Generator {
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
|
||||
terminalGenerators := map[string]Generator{
|
||||
"List": NewListGenerator(),
|
||||
"Clusters": NewClusterGenerator(c, controllerNamespace),
|
||||
"Clusters": NewClusterGenerator(ctx, c, k8sClient, controllerNamespace),
|
||||
"Git": NewGitGenerator(argoCDService, controllerNamespace),
|
||||
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace, clusterInformer),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace),
|
||||
"PullRequest": NewPullRequestGenerator(c, scmConfig),
|
||||
"Plugin": NewPluginGenerator(c, controllerNamespace),
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
)
|
||||
|
||||
func appendTemplatedValues(values map[string]string, params map[string]any, useGoTemplate bool, goTemplateOptions []string) error {
|
||||
@@ -27,7 +26,9 @@ func appendTemplatedValues(values map[string]string, params map[string]any, useG
|
||||
}
|
||||
}
|
||||
|
||||
maps.Copy(params, tmp)
|
||||
for key, value := range tmp {
|
||||
params[key] = value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -151,9 +151,9 @@ spec:
|
||||
func newFakeAppsets(fakeAppsetYAML string) []argoappv1.ApplicationSet {
|
||||
var results []argoappv1.ApplicationSet
|
||||
|
||||
appsetRawYamls := strings.SplitSeq(fakeAppsetYAML, "---")
|
||||
appsetRawYamls := strings.Split(fakeAppsetYAML, "---")
|
||||
|
||||
for appsetRawYaml := range appsetRawYamls {
|
||||
for _, appsetRawYaml := range appsetRawYamls {
|
||||
var appset argoappv1.ApplicationSet
|
||||
err := yaml.Unmarshal([]byte(appsetRawYaml), &appset)
|
||||
if err != nil {
|
||||
@@ -174,7 +174,7 @@ func TestApplicationsetCollector(t *testing.T) {
|
||||
appsetCollector := newAppsetCollector(utils.NewAppsetLister(client), collectedLabels, filter)
|
||||
|
||||
metrics.Registry.MustRegister(appsetCollector)
|
||||
req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, "/metrics", http.NoBody)
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", http.NoBody)
|
||||
require.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{})
|
||||
@@ -216,7 +216,7 @@ func TestObserveReconcile(t *testing.T) {
|
||||
|
||||
appsetMetrics := NewApplicationsetMetrics(utils.NewAppsetLister(client), collectedLabels, filter)
|
||||
|
||||
req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, "/metrics", http.NoBody)
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", http.NoBody)
|
||||
require.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{})
|
||||
|
||||
@@ -97,9 +97,7 @@ func TestGitHubMetrics_CollectorApproach_Success(t *testing.T) {
|
||||
),
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, ts.URL+URL, http.NoBody)
|
||||
req, _ := http.NewRequest(http.MethodGet, ts.URL+URL, http.NoBody)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
@@ -111,11 +109,7 @@ func TestGitHubMetrics_CollectorApproach_Success(t *testing.T) {
|
||||
server := httptest.NewServer(handler)
|
||||
defer server.Close()
|
||||
|
||||
req, err = http.NewRequestWithContext(ctx, http.MethodGet, server.URL, http.NoBody)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
resp, err = http.Get(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to scrape metrics: %v", err)
|
||||
}
|
||||
@@ -157,23 +151,15 @@ func TestGitHubMetrics_CollectorApproach_NoRateLimitMetricsOnNilResponse(t *test
|
||||
metrics: metrics,
|
||||
},
|
||||
}
|
||||
ctx := t.Context()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, URL, http.NoBody)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodGet, URL, http.NoBody)
|
||||
_, _ = client.Do(req)
|
||||
|
||||
handler := promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
|
||||
server := httptest.NewServer(handler)
|
||||
defer server.Close()
|
||||
|
||||
req, err = http.NewRequestWithContext(ctx, http.MethodGet, server.URL, http.NoBody)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
resp, err := http.Get(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to scrape metrics: %v", err)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package github_app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
@@ -10,65 +8,40 @@ import (
|
||||
"github.com/google/go-github/v69/github"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/github_app_auth"
|
||||
"github.com/argoproj/argo-cd/v3/util/git"
|
||||
appsetutils "github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
)
|
||||
|
||||
// getInstallationClient creates a new GitHub client with the specified installation ID.
|
||||
// It also returns a ghinstallation.Transport, which can be used for git requests.
|
||||
func getInstallationClient(g github_app_auth.Authentication, url string, httpClient ...*http.Client) (*github.Client, error) {
|
||||
if g.InstallationId <= 0 {
|
||||
return nil, errors.New("installation ID is required for github")
|
||||
func getOptionalHTTPClientAndTransport(optionalHTTPClient ...*http.Client) (*http.Client, http.RoundTripper) {
|
||||
httpClient := appsetutils.GetOptionalHTTPClient(optionalHTTPClient...)
|
||||
if len(optionalHTTPClient) > 0 && optionalHTTPClient[0] != nil && optionalHTTPClient[0].Transport != nil {
|
||||
// will either use the provided custom httpClient and it's transport
|
||||
return httpClient, optionalHTTPClient[0].Transport
|
||||
}
|
||||
|
||||
// Use provided HTTP client's transport or default
|
||||
var transport http.RoundTripper
|
||||
if len(httpClient) > 0 && httpClient[0] != nil && httpClient[0].Transport != nil {
|
||||
transport = httpClient[0].Transport
|
||||
} else {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
itr, err := ghinstallation.New(transport, g.Id, g.InstallationId, []byte(g.PrivateKey))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create GitHub installation transport: %w", err)
|
||||
}
|
||||
|
||||
if url == "" {
|
||||
url = g.EnterpriseBaseURL
|
||||
}
|
||||
|
||||
var client *github.Client
|
||||
if url == "" {
|
||||
client = github.NewClient(&http.Client{Transport: itr})
|
||||
return client, nil
|
||||
}
|
||||
|
||||
itr.BaseURL = url
|
||||
client, err = github.NewClient(&http.Client{Transport: itr}).WithEnterpriseURLs(url, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create GitHub enterprise client: %w", err)
|
||||
}
|
||||
return client, nil
|
||||
// or the default httpClient and transport
|
||||
return httpClient, http.DefaultTransport
|
||||
}
|
||||
|
||||
// Client builds a github client for the given app authentication.
|
||||
func Client(ctx context.Context, g github_app_auth.Authentication, url, org string, optionalHTTPClient ...*http.Client) (*github.Client, error) {
|
||||
func Client(g github_app_auth.Authentication, url string, optionalHTTPClient ...*http.Client) (*github.Client, error) {
|
||||
httpClient, transport := getOptionalHTTPClientAndTransport(optionalHTTPClient...)
|
||||
|
||||
rt, err := ghinstallation.New(transport, g.Id, g.InstallationId, []byte(g.PrivateKey))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github app install: %w", err)
|
||||
}
|
||||
if url == "" {
|
||||
url = g.EnterpriseBaseURL
|
||||
}
|
||||
|
||||
// If an installation ID is already provided, use it directly.
|
||||
if g.InstallationId != 0 {
|
||||
return getInstallationClient(g, url, optionalHTTPClient...)
|
||||
var client *github.Client
|
||||
httpClient.Transport = rt
|
||||
if url == "" {
|
||||
client = github.NewClient(httpClient)
|
||||
} else {
|
||||
rt.BaseURL = url
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github enterprise client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-discover installation ID using shared utility
|
||||
// Pass optional HTTP client for metrics tracking
|
||||
installationId, err := git.DiscoverGitHubAppInstallationID(ctx, g.Id, g.PrivateKey, url, org, optionalHTTPClient...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
g.InstallationId = installationId
|
||||
return getInstallationClient(g, url, optionalHTTPClient...)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package pull_request
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/v7"
|
||||
@@ -108,7 +107,7 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
|
||||
if *pr.Repository.Name == a.repo {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: int64(*pr.PullRequestId),
|
||||
Number: *pr.PullRequestId,
|
||||
Title: *pr.Title,
|
||||
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
|
||||
TargetBranch: strings.Replace(*pr.TargetRefName, "refs/heads/", "", 1),
|
||||
@@ -137,7 +136,13 @@ func convertLabels(tags *[]core.WebApiTagDefinition) []string {
|
||||
// containAzureDevOpsLabels returns true if gotLabels contains expectedLabels
|
||||
func containAzureDevOpsLabels(expectedLabels []string, gotLabels []string) bool {
|
||||
for _, expected := range expectedLabels {
|
||||
found := slices.Contains(gotLabels, expected)
|
||||
found := false
|
||||
for _, got := range gotLabels {
|
||||
if expected == got {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
azureMock "github.com/argoproj/argo-cd/v3/applicationset/services/scm_provider/azure_devops/git/mocks"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/scm_provider/mocks"
|
||||
)
|
||||
|
||||
func createBoolPtr(x bool) *bool {
|
||||
@@ -35,6 +35,29 @@ func createUniqueNamePtr(x string) *string {
|
||||
return &x
|
||||
}
|
||||
|
||||
type AzureClientFactoryMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (m *AzureClientFactoryMock) GetClient(ctx context.Context) (git.Client, error) {
|
||||
args := m.mock.Called(ctx)
|
||||
|
||||
var client git.Client
|
||||
c := args.Get(0)
|
||||
if c != nil {
|
||||
client = c.(git.Client)
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(args) > 1 {
|
||||
if e, ok := args.Get(1).(error); ok {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
||||
return client, err
|
||||
}
|
||||
|
||||
func TestListPullRequest(t *testing.T) {
|
||||
teamProject := "myorg_project"
|
||||
repoName := "myorg_project_repo"
|
||||
@@ -68,10 +91,10 @@ func TestListPullRequest(t *testing.T) {
|
||||
SearchCriteria: &git.GitPullRequestSearchCriteria{},
|
||||
}
|
||||
|
||||
gitClientMock := &azureMock.Client{}
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, nil)
|
||||
gitClientMock.EXPECT().GetPullRequestsByProject(mock.Anything, args).Return(&pullRequestMock, nil)
|
||||
gitClientMock := azureMock.Client{}
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
gitClientMock.On("GetPullRequestsByProject", ctx, args).Return(&pullRequestMock, nil)
|
||||
|
||||
provider := AzureDevOpsService{
|
||||
clientFactory: clientFactoryMock,
|
||||
@@ -87,7 +110,7 @@ func TestListPullRequest(t *testing.T) {
|
||||
assert.Equal(t, "main", list[0].TargetBranch)
|
||||
assert.Equal(t, prHeadSha, list[0].HeadSHA)
|
||||
assert.Equal(t, "feat(123)", list[0].Title)
|
||||
assert.Equal(t, int64(prID), list[0].Number)
|
||||
assert.Equal(t, prID, list[0].Number)
|
||||
assert.Equal(t, uniqueName, list[0].Author)
|
||||
}
|
||||
|
||||
@@ -222,12 +245,12 @@ func TestAzureDevOpsListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
|
||||
pullRequestMock := []git.GitPullRequest{}
|
||||
|
||||
gitClientMock := &azureMock.Client{}
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, nil)
|
||||
gitClientMock := azureMock.Client{}
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
|
||||
// Mock the GetPullRequestsByProject to return an error containing "404"
|
||||
gitClientMock.EXPECT().GetPullRequestsByProject(mock.Anything, args).Return(&pullRequestMock,
|
||||
gitClientMock.On("GetPullRequestsByProject", t.Context(), args).Return(&pullRequestMock,
|
||||
errors.New("The following project does not exist:"))
|
||||
|
||||
provider := AzureDevOpsService{
|
||||
|
||||
@@ -81,10 +81,7 @@ func NewBitbucketCloudServiceBasicAuth(baseURL, username, password, owner, repos
|
||||
return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %w", baseURL, owner, repositorySlug, err)
|
||||
}
|
||||
|
||||
bitbucketClient, err := bitbucket.NewBasicAuth(username, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating BitBucket Cloud client with basic auth: %w", err)
|
||||
}
|
||||
bitbucketClient := bitbucket.NewBasicAuth(username, password)
|
||||
bitbucketClient.SetApiBaseURL(*url)
|
||||
|
||||
return &BitbucketCloudService{
|
||||
@@ -100,13 +97,14 @@ func NewBitbucketCloudServiceBearerToken(baseURL, bearerToken, owner, repository
|
||||
return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %w", baseURL, owner, repositorySlug, err)
|
||||
}
|
||||
|
||||
bitbucketClient, err := bitbucket.NewOAuthbearerToken(bearerToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating BitBucket Cloud client with oauth bearer token: %w", err)
|
||||
}
|
||||
bitbucketClient := bitbucket.NewOAuthbearerToken(bearerToken)
|
||||
bitbucketClient.SetApiBaseURL(*url)
|
||||
|
||||
return &BitbucketCloudService{client: bitbucketClient, owner: owner, repositorySlug: repositorySlug}, nil
|
||||
return &BitbucketCloudService{
|
||||
client: bitbucketClient,
|
||||
owner: owner,
|
||||
repositorySlug: repositorySlug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewBitbucketCloudServiceNoAuth(baseURL, owner, repositorySlug string) (PullRequestService, error) {
|
||||
@@ -156,7 +154,7 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
|
||||
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: int64(pull.ID),
|
||||
Number: pull.ID,
|
||||
Title: pull.Title,
|
||||
Branch: pull.Source.Branch.Name,
|
||||
TargetBranch: pull.Destination.Branch.Name,
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestListPullRequestBearerTokenCloud(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(foo-bar)", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
@@ -107,7 +107,7 @@ func TestListPullRequestNoAuthCloud(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(foo-bar)", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
@@ -125,7 +125,7 @@ func TestListPullRequestBasicAuthCloud(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(foo-bar)", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
|
||||
@@ -82,7 +82,7 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: int64(pull.ID),
|
||||
Number: pull.ID,
|
||||
Title: pull.Title,
|
||||
Branch: pull.FromRef.DisplayID, // ID: refs/heads/main DisplayID: main
|
||||
TargetBranch: pull.ToRef.DisplayID,
|
||||
|
||||
@@ -68,7 +68,7 @@ func TestListPullRequestNoAuth(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(ABC) : 123", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch)
|
||||
assert.Equal(t, "master", pullRequests[0].TargetBranch)
|
||||
@@ -211,7 +211,7 @@ func TestListPullRequestBasicAuth(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch)
|
||||
assert.Equal(t, "cb3cf2e4d1517c83e720d2585b9402dbef71f992", pullRequests[0].HeadSHA)
|
||||
}
|
||||
@@ -228,7 +228,7 @@ func TestListPullRequestBearerAuth(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(ABC) : 123", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch)
|
||||
assert.Equal(t, "cb3cf2e4d1517c83e720d2585b9402dbef71f992", pullRequests[0].HeadSHA)
|
||||
@@ -268,6 +268,7 @@ func TestListPullRequestTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defaultHandler(t)(w, r)
|
||||
|
||||
@@ -68,7 +68,7 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
continue
|
||||
}
|
||||
list = append(list, &PullRequest{
|
||||
Number: int64(pr.Index),
|
||||
Number: int(pr.Index),
|
||||
Title: pr.Title,
|
||||
Branch: pr.Head.Ref,
|
||||
TargetBranch: pr.Base.Ref,
|
||||
@@ -83,7 +83,7 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
// containLabels returns true if gotLabels contains expectedLabels
|
||||
func giteaContainLabels(expectedLabels []string, gotLabels []*gitea.Label) bool {
|
||||
gotLabelNamesMap := make(map[string]bool)
|
||||
for i := range gotLabels {
|
||||
for i := 0; i < len(gotLabels); i++ {
|
||||
gotLabelNamesMap[gotLabels[i].Name] = true
|
||||
}
|
||||
for _, expected := range expectedLabels {
|
||||
|
||||
@@ -303,7 +303,7 @@ func TestGiteaList(t *testing.T) {
|
||||
prs, err := host.List(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, prs, 1)
|
||||
assert.Equal(t, int64(1), prs[0].Number)
|
||||
assert.Equal(t, 1, prs[0].Number)
|
||||
assert.Equal(t, "add an empty file", prs[0].Title)
|
||||
assert.Equal(t, "test", prs[0].Branch)
|
||||
assert.Equal(t, "main", prs[0].TargetBranch)
|
||||
|
||||
@@ -76,7 +76,7 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
continue
|
||||
}
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: int64(*pull.Number),
|
||||
Number: *pull.Number,
|
||||
Title: *pull.Title,
|
||||
Branch: *pull.Head.Ref,
|
||||
TargetBranch: *pull.Base.Ref,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/github_app_auth"
|
||||
@@ -9,9 +8,9 @@ import (
|
||||
appsetutils "github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
)
|
||||
|
||||
func NewGithubAppService(ctx context.Context, g github_app_auth.Authentication, url, owner, repo string, labels []string, optionalHTTPClient ...*http.Client) (PullRequestService, error) {
|
||||
func NewGithubAppService(g github_app_auth.Authentication, url, owner, repo string, labels []string, optionalHTTPClient ...*http.Client) (PullRequestService, error) {
|
||||
httpClient := appsetutils.GetOptionalHTTPClient(optionalHTTPClient...)
|
||||
client, err := github_app.Client(ctx, g, url, owner, httpClient)
|
||||
client, err := github_app.Client(g, url, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -61,15 +61,11 @@ func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
var labelsList gitlab.LabelOptions = g.labels
|
||||
labels = &labelsList
|
||||
}
|
||||
|
||||
snippetsListOptions := gitlab.ExploreSnippetsOptions{
|
||||
opts := &gitlab.ListProjectMergeRequestsOptions{
|
||||
ListOptions: gitlab.ListOptions{
|
||||
PerPage: 100,
|
||||
},
|
||||
}
|
||||
opts := &gitlab.ListProjectMergeRequestsOptions{
|
||||
ListOptions: snippetsListOptions.ListOptions,
|
||||
Labels: labels,
|
||||
Labels: labels,
|
||||
}
|
||||
|
||||
if g.pullRequestState != "" {
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestList(t *testing.T) {
|
||||
prs, err := svc.List(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, prs, 1)
|
||||
assert.Equal(t, int64(15442), prs[0].Number)
|
||||
assert.Equal(t, 15442, prs[0].Number)
|
||||
assert.Equal(t, "Draft: Use structured logging for DB load balancer", prs[0].Title)
|
||||
assert.Equal(t, "use-structured-logging-for-db-load-balancer", prs[0].Branch)
|
||||
assert.Equal(t, "master", prs[0].TargetBranch)
|
||||
@@ -158,6 +158,7 @@ func TestListWithStateTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
writeMRListResponse(t, w)
|
||||
|
||||
@@ -7,8 +7,7 @@ import (
|
||||
|
||||
type PullRequest struct {
|
||||
// Number is a number that will be the ID of the pull request.
|
||||
// Gitlab uses int64 for the pull request number.
|
||||
Number int64
|
||||
Number int
|
||||
// Title of the pull request.
|
||||
Title string
|
||||
// Branch is the name of the branch from which the pull request originated.
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/scm_provider/mocks"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/scm_provider/aws_codecommit/mocks"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -177,8 +177,9 @@ func TestAWSCodeCommitListRepos(t *testing.T) {
|
||||
if repo.getRepositoryNilMetadata {
|
||||
repoMetadata = nil
|
||||
}
|
||||
codeCommitClient.EXPECT().GetRepositoryWithContext(mock.Anything, &codecommit.GetRepositoryInput{RepositoryName: aws.String(repo.name)}).
|
||||
Return(&codecommit.GetRepositoryOutput{RepositoryMetadata: repoMetadata}, repo.getRepositoryError).Maybe()
|
||||
codeCommitClient.
|
||||
On("GetRepositoryWithContext", ctx, &codecommit.GetRepositoryInput{RepositoryName: aws.String(repo.name)}).
|
||||
Return(&codecommit.GetRepositoryOutput{RepositoryMetadata: repoMetadata}, repo.getRepositoryError)
|
||||
codecommitRepoNameIdPairs = append(codecommitRepoNameIdPairs, &codecommit.RepositoryNameIdPair{
|
||||
RepositoryId: aws.String(repo.id),
|
||||
RepositoryName: aws.String(repo.name),
|
||||
@@ -192,18 +193,20 @@ func TestAWSCodeCommitListRepos(t *testing.T) {
|
||||
}
|
||||
|
||||
if testCase.expectListAtCodeCommit {
|
||||
codeCommitClient.EXPECT().ListRepositoriesWithContext(mock.Anything, &codecommit.ListRepositoriesInput{}).
|
||||
codeCommitClient.
|
||||
On("ListRepositoriesWithContext", ctx, &codecommit.ListRepositoriesInput{}).
|
||||
Return(&codecommit.ListRepositoriesOutput{
|
||||
Repositories: codecommitRepoNameIdPairs,
|
||||
}, testCase.listRepositoryError).Maybe()
|
||||
}, testCase.listRepositoryError)
|
||||
} else {
|
||||
taggingClient.EXPECT().GetResourcesWithContext(mock.Anything, mock.MatchedBy(equalIgnoringTagFilterOrder(&resourcegroupstaggingapi.GetResourcesInput{
|
||||
TagFilters: testCase.expectTagFilters,
|
||||
ResourceTypeFilters: aws.StringSlice([]string{resourceTypeCodeCommitRepository}),
|
||||
}))).
|
||||
taggingClient.
|
||||
On("GetResourcesWithContext", ctx, mock.MatchedBy(equalIgnoringTagFilterOrder(&resourcegroupstaggingapi.GetResourcesInput{
|
||||
TagFilters: testCase.expectTagFilters,
|
||||
ResourceTypeFilters: aws.StringSlice([]string{resourceTypeCodeCommitRepository}),
|
||||
}))).
|
||||
Return(&resourcegroupstaggingapi.GetResourcesOutput{
|
||||
ResourceTagMappingList: resourceTaggings,
|
||||
}, testCase.listRepositoryError).Maybe()
|
||||
}, testCase.listRepositoryError)
|
||||
}
|
||||
|
||||
provider := &AWSCodeCommitProvider{
|
||||
@@ -347,12 +350,13 @@ func TestAWSCodeCommitRepoHasPath(t *testing.T) {
|
||||
taggingClient := mocks.NewAWSTaggingClient(t)
|
||||
ctx := t.Context()
|
||||
if testCase.expectedGetFolderPath != "" {
|
||||
codeCommitClient.EXPECT().GetFolderWithContext(mock.Anything, &codecommit.GetFolderInput{
|
||||
CommitSpecifier: aws.String(branch),
|
||||
FolderPath: aws.String(testCase.expectedGetFolderPath),
|
||||
RepositoryName: aws.String(repoName),
|
||||
}).
|
||||
Return(testCase.getFolderOutput, testCase.getFolderError).Maybe()
|
||||
codeCommitClient.
|
||||
On("GetFolderWithContext", ctx, &codecommit.GetFolderInput{
|
||||
CommitSpecifier: aws.String(branch),
|
||||
FolderPath: aws.String(testCase.expectedGetFolderPath),
|
||||
RepositoryName: aws.String(repoName),
|
||||
}).
|
||||
Return(testCase.getFolderOutput, testCase.getFolderError)
|
||||
}
|
||||
provider := &AWSCodeCommitProvider{
|
||||
codeCommitClient: codeCommitClient,
|
||||
@@ -419,16 +423,18 @@ func TestAWSCodeCommitGetBranches(t *testing.T) {
|
||||
taggingClient := mocks.NewAWSTaggingClient(t)
|
||||
ctx := t.Context()
|
||||
if testCase.allBranches {
|
||||
codeCommitClient.EXPECT().ListBranchesWithContext(mock.Anything, &codecommit.ListBranchesInput{
|
||||
RepositoryName: aws.String(name),
|
||||
}).
|
||||
Return(&codecommit.ListBranchesOutput{Branches: aws.StringSlice(testCase.branches)}, testCase.apiError).Maybe()
|
||||
codeCommitClient.
|
||||
On("ListBranchesWithContext", ctx, &codecommit.ListBranchesInput{
|
||||
RepositoryName: aws.String(name),
|
||||
}).
|
||||
Return(&codecommit.ListBranchesOutput{Branches: aws.StringSlice(testCase.branches)}, testCase.apiError)
|
||||
} else {
|
||||
codeCommitClient.EXPECT().GetRepositoryWithContext(mock.Anything, &codecommit.GetRepositoryInput{RepositoryName: aws.String(name)}).
|
||||
codeCommitClient.
|
||||
On("GetRepositoryWithContext", ctx, &codecommit.GetRepositoryInput{RepositoryName: aws.String(name)}).
|
||||
Return(&codecommit.GetRepositoryOutput{RepositoryMetadata: &codecommit.RepositoryMetadata{
|
||||
AccountId: aws.String(organization),
|
||||
DefaultBranch: aws.String(defaultBranch),
|
||||
}}, testCase.apiError).Maybe()
|
||||
}}, testCase.apiError)
|
||||
}
|
||||
provider := &AWSCodeCommitProvider{
|
||||
codeCommitClient: codeCommitClient,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package scm_provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
azureGit "github.com/microsoft/azure-devops-go-api/azuredevops/v7/git"
|
||||
|
||||
azureMock "github.com/argoproj/argo-cd/v3/applicationset/services/scm_provider/azure_devops/git/mocks"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/scm_provider/mocks"
|
||||
)
|
||||
|
||||
func s(input string) *string {
|
||||
@@ -78,13 +78,13 @@ func TestAzureDevopsRepoHasPath(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
gitClientMock := &azureMock.Client{}
|
||||
gitClientMock := azureMock.Client{}
|
||||
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, testCase.clientError)
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, testCase.clientError)
|
||||
|
||||
repoId := &uuid
|
||||
gitClientMock.EXPECT().GetItem(mock.Anything, azureGit.GetItemArgs{Project: &teamProject, Path: &path, VersionDescriptor: &azureGit.GitVersionDescriptor{Version: &branchName}, RepositoryId: repoId}).Return(nil, testCase.azureDevopsError)
|
||||
gitClientMock.On("GetItem", ctx, azureGit.GetItemArgs{Project: &teamProject, Path: &path, VersionDescriptor: &azureGit.GitVersionDescriptor{Version: &branchName}, RepositoryId: repoId}).Return(nil, testCase.azureDevopsError)
|
||||
|
||||
provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock}
|
||||
|
||||
@@ -143,12 +143,12 @@ func TestGetDefaultBranchOnDisabledRepo(t *testing.T) {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
uuid := uuid.New().String()
|
||||
|
||||
gitClientMock := azureMock.NewClient(t)
|
||||
gitClientMock := azureMock.Client{}
|
||||
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, nil)
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
|
||||
gitClientMock.EXPECT().GetBranch(mock.Anything, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}).Return(nil, testCase.azureDevOpsError)
|
||||
gitClientMock.On("GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}).Return(nil, testCase.azureDevOpsError)
|
||||
|
||||
repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch}
|
||||
|
||||
@@ -162,6 +162,8 @@ func TestGetDefaultBranchOnDisabledRepo(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Empty(t, branches)
|
||||
|
||||
gitClientMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -200,12 +202,12 @@ func TestGetAllBranchesOnDisabledRepo(t *testing.T) {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
uuid := uuid.New().String()
|
||||
|
||||
gitClientMock := azureMock.NewClient(t)
|
||||
gitClientMock := azureMock.Client{}
|
||||
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, nil)
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
|
||||
gitClientMock.EXPECT().GetBranches(mock.Anything, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}).Return(nil, testCase.azureDevOpsError)
|
||||
gitClientMock.On("GetBranches", ctx, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}).Return(nil, testCase.azureDevOpsError)
|
||||
|
||||
repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch}
|
||||
|
||||
@@ -219,6 +221,8 @@ func TestGetAllBranchesOnDisabledRepo(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Empty(t, branches)
|
||||
|
||||
gitClientMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -237,12 +241,12 @@ func TestAzureDevOpsGetDefaultBranchStripsRefsName(t *testing.T) {
|
||||
branchReturn := &azureGit.GitBranchStats{Name: &strippedBranchName, Commit: &azureGit.GitCommitRef{CommitId: s("abc123233223")}}
|
||||
repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch}
|
||||
|
||||
gitClientMock := &azureMock.Client{}
|
||||
gitClientMock := azureMock.Client{}
|
||||
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, nil)
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
|
||||
gitClientMock.EXPECT().GetBranch(mock.Anything, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &strippedBranchName}).Return(branchReturn, nil).Maybe()
|
||||
gitClientMock.On("GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &strippedBranchName}).Return(branchReturn, nil)
|
||||
|
||||
provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock, allBranches: false}
|
||||
branches, err := provider.GetBranches(ctx, repo)
|
||||
@@ -291,12 +295,12 @@ func TestAzureDevOpsGetBranchesDefultBranchOnly(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
gitClientMock := &azureMock.Client{}
|
||||
gitClientMock := azureMock.Client{}
|
||||
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, testCase.clientError)
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, testCase.clientError)
|
||||
|
||||
gitClientMock.EXPECT().GetBranch(mock.Anything, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}).Return(testCase.expectedBranch, testCase.getBranchesAPIError)
|
||||
gitClientMock.On("GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}).Return(testCase.expectedBranch, testCase.getBranchesAPIError)
|
||||
|
||||
repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch}
|
||||
|
||||
@@ -375,12 +379,12 @@ func TestAzureDevopsGetBranches(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
gitClientMock := &azureMock.Client{}
|
||||
gitClientMock := azureMock.Client{}
|
||||
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, testCase.clientError)
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, testCase.clientError)
|
||||
|
||||
gitClientMock.EXPECT().GetBranches(mock.Anything, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}).Return(testCase.expectedBranches, testCase.getBranchesAPIError)
|
||||
gitClientMock.On("GetBranches", ctx, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}).Return(testCase.expectedBranches, testCase.getBranchesAPIError)
|
||||
|
||||
repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid}
|
||||
|
||||
@@ -423,6 +427,7 @@ func TestGetAzureDevopsRepositories(t *testing.T) {
|
||||
teamProject := "myorg_project"
|
||||
|
||||
uuid := uuid.New()
|
||||
ctx := t.Context()
|
||||
|
||||
repoId := &uuid
|
||||
|
||||
@@ -472,15 +477,15 @@ func TestGetAzureDevopsRepositories(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
gitClientMock := azureMock.NewClient(t)
|
||||
gitClientMock.EXPECT().GetRepositories(mock.Anything, azureGit.GetRepositoriesArgs{Project: s(teamProject)}).Return(&testCase.repositories, testCase.getRepositoriesError)
|
||||
gitClientMock := azureMock.Client{}
|
||||
gitClientMock.On("GetRepositories", ctx, azureGit.GetRepositoriesArgs{Project: s(teamProject)}).Return(&testCase.repositories, testCase.getRepositoriesError)
|
||||
|
||||
clientFactoryMock := &mocks.AzureDevOpsClientFactory{}
|
||||
clientFactoryMock.EXPECT().GetClient(mock.Anything).Return(gitClientMock, nil)
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock)
|
||||
|
||||
provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock}
|
||||
|
||||
repositories, err := provider.ListRepos(t.Context(), "https")
|
||||
repositories, err := provider.ListRepos(ctx, "https")
|
||||
|
||||
if testCase.getRepositoriesError != nil {
|
||||
require.Error(t, err, "Expected an error from test case %v", testCase.name)
|
||||
@@ -492,6 +497,31 @@ func TestGetAzureDevopsRepositories(t *testing.T) {
|
||||
assert.NotEmpty(t, repositories)
|
||||
assert.Len(t, repositories, testCase.expectedNumberOfRepos)
|
||||
}
|
||||
|
||||
gitClientMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type AzureClientFactoryMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (m *AzureClientFactoryMock) GetClient(ctx context.Context) (azureGit.Client, error) {
|
||||
args := m.mock.Called(ctx)
|
||||
|
||||
var client azureGit.Client
|
||||
c := args.Get(0)
|
||||
if c != nil {
|
||||
client = c.(azureGit.Client)
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(args) > 1 {
|
||||
if e, ok := args.Get(1).(error); ok {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
||||
return client, err
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (c *ExtendedClient) GetContents(repo *Repository, path string) (bool, error
|
||||
urlStr += fmt.Sprintf("/repositories/%s/%s/src/%s/%s?format=meta", c.owner, repo.Repository, repo.SHA, path)
|
||||
body := strings.NewReader("")
|
||||
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, urlStr, body)
|
||||
req, err := http.NewRequest(http.MethodGet, urlStr, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -53,12 +53,8 @@ func (c *ExtendedClient) GetContents(repo *Repository, path string) (bool, error
|
||||
var _ SCMProviderService = &BitBucketCloudProvider{}
|
||||
|
||||
func NewBitBucketCloudProvider(owner string, user string, password string, allBranches bool) (*BitBucketCloudProvider, error) {
|
||||
bitbucketClient, err := bitbucket.NewBasicAuth(user, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating BitBucket Cloud client with basic auth: %w", err)
|
||||
}
|
||||
client := &ExtendedClient{
|
||||
bitbucketClient,
|
||||
bitbucket.NewBasicAuth(user, password),
|
||||
user,
|
||||
password,
|
||||
owner,
|
||||
|
||||
@@ -445,6 +445,7 @@ func TestListReposTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defaultHandler(t)(w, r)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package scm_provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/github_app_auth"
|
||||
@@ -9,9 +8,9 @@ import (
|
||||
appsetutils "github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
)
|
||||
|
||||
func NewGithubAppProviderFor(ctx context.Context, g github_app_auth.Authentication, organization string, url string, allBranches bool, optionalHTTPClient ...*http.Client) (*GithubProvider, error) {
|
||||
func NewGithubAppProviderFor(g github_app_auth.Authentication, organization string, url string, allBranches bool, optionalHTTPClient ...*http.Client) (*GithubProvider, error) {
|
||||
httpClient := appsetutils.GetOptionalHTTPClient(optionalHTTPClient...)
|
||||
client, err := github_app.Client(ctx, g, url, organization, httpClient)
|
||||
client, err := github_app.Client(g, url, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -76,13 +76,8 @@ func (g *GitlabProvider) GetBranches(ctx context.Context, repo *Repository) ([]*
|
||||
}
|
||||
|
||||
func (g *GitlabProvider) ListRepos(_ context.Context, cloneProtocol string) ([]*Repository, error) {
|
||||
snippetsListOptions := gitlab.ExploreSnippetsOptions{
|
||||
ListOptions: gitlab.ListOptions{
|
||||
PerPage: 100,
|
||||
},
|
||||
}
|
||||
opt := &gitlab.ListGroupProjectsOptions{
|
||||
ListOptions: snippetsListOptions.ListOptions,
|
||||
ListOptions: gitlab.ListOptions{PerPage: 100},
|
||||
IncludeSubGroups: &g.includeSubgroups,
|
||||
WithShared: &g.includeSharedProjects,
|
||||
Topic: &g.topic,
|
||||
@@ -178,13 +173,8 @@ func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gi
|
||||
return branches, nil
|
||||
}
|
||||
// Otherwise, scrape the ListBranches API.
|
||||
snippetsListOptions := gitlab.ExploreSnippetsOptions{
|
||||
ListOptions: gitlab.ListOptions{
|
||||
PerPage: 100,
|
||||
},
|
||||
}
|
||||
opt := &gitlab.ListBranchesOptions{
|
||||
ListOptions: snippetsListOptions.ListOptions,
|
||||
ListOptions: gitlab.ListOptions{PerPage: 100},
|
||||
}
|
||||
for {
|
||||
gitlabBranches, resp, err := g.client.Branches.ListBranches(repo.RepositoryId, opt)
|
||||
|
||||
@@ -1301,6 +1301,7 @@ func TestGetBranchesTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gitlabMockHandler(t)(w, r)
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
// Code generated by mockery; DO NOT EDIT.
|
||||
// github.com/vektra/mockery
|
||||
// template: testify
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/v7/git"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// NewAzureDevOpsClientFactory creates a new instance of AzureDevOpsClientFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewAzureDevOpsClientFactory(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *AzureDevOpsClientFactory {
|
||||
mock := &AzureDevOpsClientFactory{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
// AzureDevOpsClientFactory is an autogenerated mock type for the AzureDevOpsClientFactory type
|
||||
type AzureDevOpsClientFactory struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type AzureDevOpsClientFactory_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *AzureDevOpsClientFactory) EXPECT() *AzureDevOpsClientFactory_Expecter {
|
||||
return &AzureDevOpsClientFactory_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// GetClient provides a mock function for the type AzureDevOpsClientFactory
|
||||
func (_mock *AzureDevOpsClientFactory) GetClient(ctx context.Context) (git.Client, error) {
|
||||
ret := _mock.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetClient")
|
||||
}
|
||||
|
||||
var r0 git.Client
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context) (git.Client, error)); ok {
|
||||
return returnFunc(ctx)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context) git.Client); ok {
|
||||
r0 = returnFunc(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(git.Client)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = returnFunc(ctx)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// AzureDevOpsClientFactory_GetClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClient'
|
||||
type AzureDevOpsClientFactory_GetClient_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetClient is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
func (_e *AzureDevOpsClientFactory_Expecter) GetClient(ctx interface{}) *AzureDevOpsClientFactory_GetClient_Call {
|
||||
return &AzureDevOpsClientFactory_GetClient_Call{Call: _e.mock.On("GetClient", ctx)}
|
||||
}
|
||||
|
||||
func (_c *AzureDevOpsClientFactory_GetClient_Call) Run(run func(ctx context.Context)) *AzureDevOpsClientFactory_GetClient_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *AzureDevOpsClientFactory_GetClient_Call) Return(client git.Client, err error) *AzureDevOpsClientFactory_GetClient_Call {
|
||||
_c.Call.Return(client, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *AzureDevOpsClientFactory_GetClient_Call) RunAndReturn(run func(ctx context.Context) (git.Client, error)) *AzureDevOpsClientFactory_GetClient_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
@@ -59,7 +58,13 @@ func matchFilter(ctx context.Context, provider SCMProviderService, repo *Reposit
|
||||
}
|
||||
|
||||
if filter.LabelMatch != nil {
|
||||
found := slices.ContainsFunc(repo.Labels, filter.LabelMatch.MatchString)
|
||||
found := false
|
||||
for _, label := range repo.Labels {
|
||||
if filter.LabelMatch.MatchString(label) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"testing"
|
||||
@@ -11,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSetupBitbucketClient(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
ctx := context.Background()
|
||||
cfg := &bitbucketv1.Configuration{}
|
||||
|
||||
// Act
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
"github.com/argoproj/argo-cd/v3/util/db"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// ClusterSpecifier contains only the name and server URL of a cluster. We use this struct to avoid partially-populating
|
||||
@@ -16,44 +19,42 @@ type ClusterSpecifier struct {
|
||||
Server string
|
||||
}
|
||||
|
||||
// SecretsContainInClusterCredentials checks if any of the provided secrets represent the in-cluster configuration.
|
||||
func SecretsContainInClusterCredentials(secrets []corev1.Secret) bool {
|
||||
for _, secret := range secrets {
|
||||
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ListClusters returns a list of cluster specifiers using the ClusterInformer.
|
||||
func ListClusters(clusterInformer *settings.ClusterInformer) ([]ClusterSpecifier, error) {
|
||||
clusters, err := clusterInformer.ListClusters()
|
||||
func ListClusters(ctx context.Context, clientset kubernetes.Interface, namespace string) ([]ClusterSpecifier, error) {
|
||||
clusterSecretsList, err := clientset.CoreV1().Secrets(namespace).List(ctx,
|
||||
metav1.ListOptions{LabelSelector: common.LabelKeySecretType + "=" + common.LabelValueSecretTypeCluster})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
// len of clusters +1 for the in cluster secret
|
||||
clusterList := make([]ClusterSpecifier, 0, len(clusters)+1)
|
||||
hasInCluster := false
|
||||
|
||||
for _, cluster := range clusters {
|
||||
clusterList = append(clusterList, ClusterSpecifier{
|
||||
if clusterSecretsList == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
clusterSecrets := clusterSecretsList.Items
|
||||
|
||||
clusterList := make([]ClusterSpecifier, len(clusterSecrets))
|
||||
|
||||
hasInClusterCredentials := false
|
||||
for i, clusterSecret := range clusterSecrets {
|
||||
cluster, err := db.SecretToCluster(&clusterSecret)
|
||||
if err != nil || cluster == nil {
|
||||
return nil, fmt.Errorf("unable to convert cluster secret to cluster object '%s': %w", clusterSecret.Name, err)
|
||||
}
|
||||
clusterList[i] = ClusterSpecifier{
|
||||
Name: cluster.Name,
|
||||
Server: cluster.Server,
|
||||
})
|
||||
}
|
||||
if cluster.Server == appv1.KubernetesInternalAPIServerAddr {
|
||||
hasInCluster = true
|
||||
hasInClusterCredentials = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasInCluster {
|
||||
if !hasInClusterCredentials {
|
||||
// There was no secret for the in-cluster config, so we add it here. We don't fully-populate the Cluster struct,
|
||||
// since only the name and server fields are used by the generator.
|
||||
clusterList = append(clusterList, ClusterSpecifier{
|
||||
Name: appv1.KubernetesInClusterName,
|
||||
Name: "in-cluster",
|
||||
Server: appv1.KubernetesInternalAPIServerAddr,
|
||||
})
|
||||
}
|
||||
|
||||
return clusterList, nil
|
||||
}
|
||||
|
||||
@@ -216,6 +216,7 @@ spec:
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
|
||||
@@ -2,7 +2,6 @@ package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -208,7 +207,12 @@ type Requirement struct {
|
||||
}
|
||||
|
||||
func (r *Requirement) hasValue(value string) bool {
|
||||
return slices.Contains(r.strValues, value)
|
||||
for i := range r.strValues {
|
||||
if r.strValues[i] == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Requirement) Matches(ls labels.Labels) bool {
|
||||
|
||||
@@ -30,10 +30,6 @@ import (
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
|
||||
// baseTemplate is a pre-initialized template with all sprig functions loaded.
|
||||
// Cloning this is much faster than calling Funcs() on a new template each time.
|
||||
var baseTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
// Avoid allowing the user to learn things about the environment.
|
||||
delete(sprigFuncMap, "env")
|
||||
@@ -44,10 +40,6 @@ func init() {
|
||||
sprigFuncMap["toYaml"] = toYAML
|
||||
sprigFuncMap["fromYaml"] = fromYAML
|
||||
sprigFuncMap["fromYamlArray"] = fromYAMLArray
|
||||
|
||||
// Initialize the base template with sprig functions once at startup.
|
||||
// This must be done after modifying sprigFuncMap above.
|
||||
baseTemplate = template.New("base").Funcs(sprigFuncMap)
|
||||
}
|
||||
|
||||
type Renderer interface {
|
||||
@@ -317,21 +309,16 @@ var isTemplatedRegex = regexp.MustCompile(".*{{.*}}.*")
|
||||
// remaining in the substituted template.
|
||||
func (r *Render) Replace(tmpl string, replaceMap map[string]any, useGoTemplate bool, goTemplateOptions []string) (string, error) {
|
||||
if useGoTemplate {
|
||||
// Clone the base template which has sprig funcs pre-loaded
|
||||
cloned, err := baseTemplate.Clone()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to clone base template: %w", err)
|
||||
}
|
||||
for _, option := range goTemplateOptions {
|
||||
cloned = cloned.Option(option)
|
||||
}
|
||||
parsed, err := cloned.Parse(tmpl)
|
||||
template, err := template.New("").Funcs(sprigFuncMap).Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", tmpl, err)
|
||||
}
|
||||
for _, option := range goTemplateOptions {
|
||||
template = template.Option(option)
|
||||
}
|
||||
|
||||
var replacedTmplBuffer bytes.Buffer
|
||||
if err = parsed.Execute(&replacedTmplBuffer, replaceMap); err != nil {
|
||||
if err = template.Execute(&replacedTmplBuffer, replaceMap); err != nil {
|
||||
return "", fmt.Errorf("failed to execute go template %s: %w", tmpl, err)
|
||||
}
|
||||
|
||||
@@ -388,7 +375,8 @@ func invalidGenerators(applicationSetInfo *argoappsv1.ApplicationSet) (bool, map
|
||||
for index, generator := range applicationSetInfo.Spec.Generators {
|
||||
v := reflect.Indirect(reflect.ValueOf(generator))
|
||||
found := false
|
||||
for _, field := range v.Fields() {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if !field.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -514,7 +514,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
params: map[string]any{
|
||||
"data": `a data string`,
|
||||
},
|
||||
errorMessage: `failed to parse template {{functiondoesnotexist}}: template: base:1: function "functiondoesnotexist" not defined`,
|
||||
errorMessage: `failed to parse template {{functiondoesnotexist}}: template: :1: function "functiondoesnotexist" not defined`,
|
||||
},
|
||||
{
|
||||
name: "Test template error",
|
||||
@@ -523,7 +523,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
params: map[string]any{
|
||||
"data": `a data string`,
|
||||
},
|
||||
errorMessage: `failed to execute go template {{.data.test}}: template: base:1:7: executing "base" at <.data.test>: can't evaluate field test in type interface {}`,
|
||||
errorMessage: `failed to execute go template {{.data.test}}: template: :1:7: executing "" at <.data.test>: can't evaluate field test in type interface {}`,
|
||||
},
|
||||
{
|
||||
name: "lookup missing value with missingkey=default",
|
||||
@@ -543,7 +543,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
"unused": "this is not used",
|
||||
},
|
||||
templateOptions: []string{"missingkey=error"},
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: base:1:6: executing "base" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: :1:6: executing "" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
},
|
||||
{
|
||||
name: "toYaml",
|
||||
@@ -563,7 +563,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
name: "toYaml Error",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: base:1:3: executing \"base\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: :1:3: executing \"\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
params: map[string]any{
|
||||
"foo": func(_ *string) {
|
||||
},
|
||||
@@ -581,7 +581,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
name: "fromYaml error",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: base:1:8: executing \"base\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: :1:8: executing \"\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
params: map[string]any{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
@@ -598,7 +598,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
name: "fromYamlArray error",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: base:1:3: executing \"base\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: :1:3: executing \"\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
params: map[string]any{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
|
||||
@@ -107,8 +107,10 @@ func NewWebhookHandler(webhookParallelism int, argocdSettingsMgr *argosettings.S
|
||||
|
||||
func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
|
||||
compLog := log.WithField("component", "applicationset-webhook")
|
||||
for range webhookParallelism {
|
||||
h.Go(func() {
|
||||
for i := 0; i < webhookParallelism; i++ {
|
||||
h.Add(1)
|
||||
go func() {
|
||||
defer h.Done()
|
||||
for {
|
||||
payload, ok := <-h.queue
|
||||
if !ok {
|
||||
@@ -116,7 +118,7 @@ func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
|
||||
}
|
||||
guard.RecoverAndLog(func() { h.HandleEvent(payload) }, compLog, panicMsgAppSet)
|
||||
}
|
||||
})
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -609,7 +609,7 @@ func fakeAppWithMatrixAndNestedGitGenerator(name, namespace, repo string) *v1alp
|
||||
},
|
||||
{
|
||||
Matrix: &apiextensionsv1.JSON{
|
||||
Raw: fmt.Appendf(nil, `{
|
||||
Raw: []byte(fmt.Sprintf(`{
|
||||
"Generators": [
|
||||
{
|
||||
"List": {
|
||||
@@ -626,7 +626,7 @@ func fakeAppWithMatrixAndNestedGitGenerator(name, namespace, repo string) *v1alp
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, repo),
|
||||
}`, repo)),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -707,7 +707,7 @@ func fakeAppWithMergeAndNestedGitGenerator(name, namespace, repo string) *v1alph
|
||||
},
|
||||
{
|
||||
Merge: &apiextensionsv1.JSON{
|
||||
Raw: fmt.Appendf(nil, `{
|
||||
Raw: []byte(fmt.Sprintf(`{
|
||||
"MergeKeys": ["server"],
|
||||
"Generators": [
|
||||
{
|
||||
@@ -719,7 +719,7 @@ func fakeAppWithMergeAndNestedGitGenerator(name, namespace, repo string) *v1alph
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, repo),
|
||||
}`, repo)),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
95
assets/swagger.json
generated
95
assets/swagger.json
generated
@@ -2265,44 +2265,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}/events": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationSetService"
|
||||
],
|
||||
"summary": "ListResourceEvents returns a list of event resources",
|
||||
"operationId": "ApplicationSetService_ListResourceEvents",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "the applicationsets's name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The application set namespace. Default empty is argocd control plane namespace.",
|
||||
"name": "appsetNamespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EventList"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}/resource-tree": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -5657,9 +5619,6 @@
|
||||
"statusBadgeRootUrl": {
|
||||
"type": "string"
|
||||
},
|
||||
"syncWithReplaceAllowed": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"trackingMethod": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -6867,14 +6826,14 @@
|
||||
"type": "array",
|
||||
"title": "ClusterResourceBlacklist contains list of blacklisted cluster level resources",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ClusterResourceRestrictionItem"
|
||||
"$ref": "#/definitions/v1GroupKind"
|
||||
}
|
||||
},
|
||||
"clusterResourceWhitelist": {
|
||||
"type": "array",
|
||||
"title": "ClusterResourceWhitelist contains list of whitelisted cluster level resources",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ClusterResourceRestrictionItem"
|
||||
"$ref": "#/definitions/v1GroupKind"
|
||||
}
|
||||
},
|
||||
"description": {
|
||||
@@ -7088,7 +7047,7 @@
|
||||
},
|
||||
"v1alpha1ApplicationSet": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSet is a set of Application resources.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status",
|
||||
"title": "ApplicationSet is a set of Application resources\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status",
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"$ref": "#/definitions/v1ObjectMeta"
|
||||
@@ -7118,7 +7077,7 @@
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource"
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)"
|
||||
},
|
||||
"step": {
|
||||
"type": "string",
|
||||
@@ -7299,7 +7258,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"applyNestedSelectors": {
|
||||
"description": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators.\n\nDeprecated: This field is ignored, and the behavior is always enabled. The field will be removed in a future\nversion of the ApplicationSet CRD.",
|
||||
"description": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators\nDeprecated: This field is ignored, and the behavior is always enabled. The field will be removed in a future\nversion of the ApplicationSet CRD.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"generators": {
|
||||
@@ -7357,9 +7316,6 @@
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetCondition"
|
||||
}
|
||||
},
|
||||
"health": {
|
||||
"$ref": "#/definitions/v1alpha1HealthStatus"
|
||||
},
|
||||
"resources": {
|
||||
"description": "Resources is a list of Applications resources managed by this application set.",
|
||||
"type": "array",
|
||||
@@ -8205,22 +8161,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ClusterResourceRestrictionItem": {
|
||||
"type": "object",
|
||||
"title": "ClusterResourceRestrictionItem is a cluster resource that is restricted by the project's whitelist or blacklist",
|
||||
"properties": {
|
||||
"group": {
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name of the restricted resource. Glob patterns using Go's filepath.Match syntax are supported.\nUnlike the group and kind fields, if no name is specified, all resources of the specified group/kind are matched.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1Command": {
|
||||
"type": "object",
|
||||
"title": "Command holds binary path and arguments list",
|
||||
@@ -8314,7 +8254,7 @@
|
||||
}
|
||||
},
|
||||
"v1alpha1ConfigMapKeyRef": {
|
||||
"description": "ConfigMapKeyRef struct for a reference to a configmap key.",
|
||||
"description": "Utility struct for a reference to a configmap key.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"configMapName": {
|
||||
@@ -8346,22 +8286,10 @@
|
||||
"description": "DrySource specifies a location for dry \"don't repeat yourself\" manifest source information.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"directory": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourceDirectory"
|
||||
},
|
||||
"helm": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourceHelm"
|
||||
},
|
||||
"kustomize": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourceKustomize"
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"title": "Path is a directory path within the Git repository where the manifests are located"
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourcePlugin"
|
||||
},
|
||||
"repoURL": {
|
||||
"type": "string",
|
||||
"title": "RepoURL is the URL to the git repository that contains the application manifests"
|
||||
@@ -9408,7 +9336,7 @@
|
||||
}
|
||||
},
|
||||
"v1alpha1PullRequestGeneratorGithub": {
|
||||
"description": "PullRequestGeneratorGithub defines connection info specific to GitHub.",
|
||||
"description": "PullRequestGenerator defines connection info specific to GitHub.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"api": {
|
||||
@@ -9506,7 +9434,7 @@
|
||||
"title": "TLSClientCertKey specifies the TLS client cert key for authenticating at the repo server"
|
||||
},
|
||||
"type": {
|
||||
"description": "Type specifies the type of the repoCreds. Can be either \"git\", \"helm\" or \"oci\". \"git\" is assumed if empty or absent.",
|
||||
"description": "Type specifies the type of the repoCreds. Can be either \"git\" or \"helm. \"git\" is assumed if empty or absent.",
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
@@ -9549,11 +9477,6 @@
|
||||
"connectionState": {
|
||||
"$ref": "#/definitions/v1alpha1ConnectionState"
|
||||
},
|
||||
"depth": {
|
||||
"description": "Depth specifies the depth for shallow clones. A value of 0 or omitting the field indicates a full clone.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"enableLfs": {
|
||||
"description": "EnableLFS specifies whether git-lfs support should be enabled for this repo. Only valid for Git repositories.",
|
||||
"type": "boolean"
|
||||
@@ -10417,7 +10340,7 @@
|
||||
}
|
||||
},
|
||||
"v1alpha1SecretRef": {
|
||||
"description": "SecretRef struct for a reference to a secret key.",
|
||||
"description": "Utility struct for a reference to a secret key.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
|
||||
@@ -41,6 +41,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = common.ApplicationController
|
||||
// Default time in seconds for application resync period
|
||||
defaultAppResyncPeriod = 120
|
||||
// Default time in seconds for application resync period jitter
|
||||
@@ -97,7 +99,7 @@ func NewCommand() *cobra.Command {
|
||||
hydratorEnabled bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: common.CommandApplicationController,
|
||||
Use: cliName,
|
||||
Short: "Run ArgoCD Application Controller",
|
||||
Long: "ArgoCD application controller is a Kubernetes controller that continuously monitors running applications and compares the current, live state against the desired target state (as specified in the repo). This command runs Application Controller in the foreground. It can be configured by following options.",
|
||||
DisableAutoGenTag: true,
|
||||
@@ -200,6 +202,7 @@ func NewCommand() *cobra.Command {
|
||||
time.Duration(appResyncJitter)*time.Second,
|
||||
time.Duration(selfHealTimeoutSeconds)*time.Second,
|
||||
selfHealBackoff,
|
||||
time.Duration(selfHealBackoffCooldownSeconds)*time.Second,
|
||||
time.Duration(syncTimeout)*time.Second,
|
||||
time.Duration(repoErrorGracePeriod)*time.Second,
|
||||
metricsPort,
|
||||
@@ -272,7 +275,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&selfHealBackoffFactor, "self-heal-backoff-factor", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_FACTOR", 3, 0, math.MaxInt32), "Specifies factor of exponential timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCapSeconds, "self-heal-backoff-cap-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_CAP_SECONDS", 300, 0, math.MaxInt32), "Specifies max timeout of exponential backoff between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCooldownSeconds, "self-heal-backoff-cooldown-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_COOLDOWN_SECONDS", 330, 0, math.MaxInt32), "Specifies period of time the app needs to stay synced before the self heal backoff can reset")
|
||||
errors.CheckError(command.Flags().MarkDeprecated("self-heal-backoff-cooldown-seconds", "This flag is deprecated and has no effect."))
|
||||
command.Flags().IntVar(&syncTimeout, "sync-timeout", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SYNC_TIMEOUT", 0, 0, math.MaxInt32), "Specifies the timeout after which a sync would be terminated. 0 means no timeout (default 0).")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -49,6 +48,10 @@ import (
|
||||
|
||||
var gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
|
||||
const (
|
||||
cliName = common.ApplicationSetController
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
@@ -83,7 +86,7 @@ func NewCommand() *cobra.Command {
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = appv1alpha1.AddToScheme(scheme)
|
||||
command := cobra.Command{
|
||||
Use: common.CommandApplicationSetController,
|
||||
Use: cliName,
|
||||
Short: "Starts Argo CD ApplicationSet controller",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(c *cobra.Command, _ []string) error {
|
||||
@@ -102,12 +105,7 @@ func NewCommand() *cobra.Command {
|
||||
)
|
||||
|
||||
cli.SetLogFormat(cmdutil.LogFormat)
|
||||
|
||||
if debugLog {
|
||||
cli.SetLogLevel("debug")
|
||||
} else {
|
||||
cli.SetLogLevel(cmdutil.LogLevel)
|
||||
}
|
||||
cli.SetLogLevel(cmdutil.LogLevel)
|
||||
|
||||
ctrl.SetLogger(logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()))
|
||||
|
||||
@@ -190,18 +188,6 @@ func NewCommand() *cobra.Command {
|
||||
argoSettingsMgr := argosettings.NewSettingsManager(ctx, k8sClient, namespace)
|
||||
argoCDDB := db.NewDB(namespace, argoSettingsMgr, k8sClient)
|
||||
|
||||
clusterInformer, err := argosettings.NewClusterInformer(k8sClient, namespace)
|
||||
if err != nil {
|
||||
log.Error(err, "unable to create cluster informer")
|
||||
os.Exit(1)
|
||||
}
|
||||
go clusterInformer.Run(ctx.Done())
|
||||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), clusterInformer.HasSynced) {
|
||||
log.Error("Timed out waiting for cluster cache to sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
scmConfig := generators.NewSCMConfig(scmRootCAPath, allowedScmProviders, enableScmProviders, enableGitHubAPIMetrics, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)), tokenRefStrictMode)
|
||||
|
||||
tlsConfig := apiclient.TLSConfiguration{
|
||||
@@ -221,7 +207,7 @@ func NewCommand() *cobra.Command {
|
||||
repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, repoServerTimeoutSeconds, tlsConfig)
|
||||
argoCDService := services.NewArgoCDService(argoCDDB, gitSubmoduleEnabled, repoClientset, enableNewGitFileGlobbing)
|
||||
|
||||
topLevelGenerators := generators.GetGenerators(ctx, mgr.GetClient(), k8sClient, namespace, argoCDService, dynamicClient, scmConfig, clusterInformer)
|
||||
topLevelGenerators := generators.GetGenerators(ctx, mgr.GetClient(), k8sClient, namespace, argoCDService, dynamicClient, scmConfig)
|
||||
|
||||
// start a webhook server that listens to incoming webhook payloads
|
||||
webhookHandler, err := webhook.NewWebhookHandler(webhookParallelism, argoSettingsMgr, mgr.GetClient(), topLevelGenerators)
|
||||
@@ -257,7 +243,6 @@ func NewCommand() *cobra.Command {
|
||||
GlobalPreservedLabels: globalPreservedLabels,
|
||||
Metrics: &metrics,
|
||||
MaxResourcesStatusCount: maxResourcesStatusCount,
|
||||
ClusterInformer: clusterInformer,
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -302,7 +287,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&metricsAplicationsetLabels, "metrics-applicationset-labels", []string{}, "List of Application labels that will be added to the argocd_applicationset_labels metric")
|
||||
command.Flags().BoolVar(&enableGitHubAPIMetrics, "enable-github-api-metrics", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_GITHUB_API_METRICS", false), "Enable GitHub API metrics for generators that use the GitHub API")
|
||||
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 5000, 0, math.MaxInt), "Max number of resources stored in appset status.")
|
||||
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 0, 0, math.MaxInt), "Max number of resources stored in appset status.")
|
||||
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -18,6 +18,11 @@ import (
|
||||
traceutil "github.com/argoproj/argo-cd/v3/util/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-cmp-server"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
configFilePath string
|
||||
@@ -27,7 +32,7 @@ func NewCommand() *cobra.Command {
|
||||
otlpAttrs []string
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: common.CommandCMPServer,
|
||||
Use: cliName,
|
||||
Short: "Run ArgoCD ConfigManagementPlugin Server",
|
||||
Long: "ArgoCD ConfigManagementPlugin Server is an internal service which runs as sidecar container in reposerver deployment. The following configuration options are available:",
|
||||
DisableAutoGenTag: true,
|
||||
|
||||
@@ -35,10 +35,10 @@ func NewCommand() *cobra.Command {
|
||||
metricsHost string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: common.CommandCommitServer,
|
||||
Use: "argocd-commit-server",
|
||||
Short: "Run Argo CD Commit Server",
|
||||
Long: "Argo CD Commit Server is an internal service which commits and pushes hydrated manifests to git. This command runs Commit Server in the foreground.",
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
vers := common.GetVersion()
|
||||
vers.LogStartupInfo(
|
||||
"Argo CD Commit Server",
|
||||
@@ -59,10 +59,8 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
server := commitserver.NewServer(askPassServer, metricsServer)
|
||||
grpc := server.CreateGRPC()
|
||||
ctx := cmd.Context()
|
||||
|
||||
lc := &net.ListenConfig{}
|
||||
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf("%s:%d", listenHost, listenPort))
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", listenHost, listenPort))
|
||||
errors.CheckError(err)
|
||||
|
||||
healthz.ServeHealthCheck(http.DefaultServeMux, func(r *http.Request) error {
|
||||
@@ -91,11 +89,13 @@ func NewCommand() *cobra.Command {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Go(func() {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
grpc.GracefulStop()
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
log.Println("starting grpc server")
|
||||
err = grpc.Serve(listener)
|
||||
|
||||
@@ -25,9 +25,13 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/tls"
|
||||
)
|
||||
|
||||
const (
|
||||
cliName = "argocd-dex"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: common.CommandDex,
|
||||
Use: cliName,
|
||||
Short: "argocd-dex tools used by Argo CD",
|
||||
Long: "argocd-dex has internal utility tools used by Argo CD",
|
||||
DisableAutoGenTag: true,
|
||||
@@ -111,7 +115,7 @@ func NewRunDexCommand() *cobra.Command {
|
||||
err = os.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0o644)
|
||||
errors.CheckError(err)
|
||||
log.Debug(redactor(string(dexCfgBytes)))
|
||||
cmd = exec.CommandContext(ctx, "dex", "serve", "/tmp/dex.yaml")
|
||||
cmd = exec.Command("dex", "serve", "/tmp/dex.yaml")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err = cmd.Start()
|
||||
|
||||
@@ -9,16 +9,20 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/util/askpass"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
grpc_util "github.com/argoproj/argo-cd/v3/util/grpc"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
const (
|
||||
// cliName is the name of the CLI
|
||||
cliName = "argocd-git-ask-pass"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
command := cobra.Command{
|
||||
Use: common.CommandGitAskPass,
|
||||
Use: cliName,
|
||||
Short: "Argo CD git credential helper",
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
|
||||
@@ -2,13 +2,15 @@ package commands
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
const (
|
||||
cliName = "argocd-k8s-auth"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: common.CommandK8sAuth,
|
||||
Use: cliName,
|
||||
Short: "argocd-k8s-auth a set of commands to generate k8s auth token",
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
@@ -6,14 +6,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
@@ -60,13 +58,13 @@ func newAWSCommand() *cobra.Command {
|
||||
return command
|
||||
}
|
||||
|
||||
type getSignedRequestFunc func(ctx context.Context, clusterName, roleARN string, profile string) (string, error)
|
||||
type getSignedRequestFunc func(clusterName, roleARN string, profile string) (string, error)
|
||||
|
||||
func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, profile string, fn getSignedRequestFunc) (string, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
for {
|
||||
signed, err := fn(ctx, clusterName, roleARN, profile)
|
||||
signed, err := fn(clusterName, roleARN, profile)
|
||||
if err == nil {
|
||||
return signed, nil
|
||||
}
|
||||
@@ -78,53 +76,25 @@ func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Durat
|
||||
}
|
||||
}
|
||||
|
||||
func getSignedRequest(ctx context.Context, clusterName, roleARN string, profile string) (string, error) {
|
||||
cfg, err := loadAWSConfig(ctx, profile)
|
||||
func getSignedRequest(clusterName, roleARN string, profile string) (string, error) {
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Profile: profile,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", fmt.Errorf("error creating new AWS session: %w", err)
|
||||
}
|
||||
return getSignedRequestWithConfig(ctx, clusterName, roleARN, cfg)
|
||||
}
|
||||
|
||||
func loadAWSConfig(ctx context.Context, profile string) (aws.Config, error) {
|
||||
var opts []func(*config.LoadOptions) error
|
||||
if profile != "" {
|
||||
opts = append(opts, config.WithSharedConfigProfile(profile))
|
||||
}
|
||||
cfg, err := config.LoadDefaultConfig(ctx, opts...)
|
||||
if err != nil {
|
||||
return aws.Config{}, fmt.Errorf("error loading AWS configuration: %w", err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// getSignedRequestWithConfig presigns GetCallerIdentity using the given config. Used by getSignedRequest and by tests
|
||||
// that inject a config with static credentials to exercise the roleARN path without real AWS credentials.
|
||||
func getSignedRequestWithConfig(ctx context.Context, clusterName, roleARN string, cfg aws.Config) (string, error) {
|
||||
// Use PresignOptions.ClientOptions + SetHeaderValue (same as aws-iam-authenticator) so the
|
||||
// canonical request matches what EKS sends when validating. Build middleware can produce
|
||||
// a different canonical form and thus an invalid signature for EKS.
|
||||
// See kubernetes-sigs/aws-iam-authenticator pkg/token/token.go GetWithSTS().
|
||||
client := sts.NewFromConfig(cfg)
|
||||
stsAPI := sts.New(sess)
|
||||
if roleARN != "" {
|
||||
appCreds := stscreds.NewAssumeRoleProvider(client, roleARN)
|
||||
cfg.Credentials = aws.NewCredentialsCache(appCreds)
|
||||
client = sts.NewFromConfig(cfg)
|
||||
creds := stscreds.NewCredentials(sess, roleARN)
|
||||
stsAPI = sts.New(sess, &aws.Config{Credentials: creds})
|
||||
}
|
||||
|
||||
presignClient := sts.NewPresignClient(client)
|
||||
presigned, err := presignClient.PresignGetCallerIdentity(ctx, &sts.GetCallerIdentityInput{},
|
||||
func(presignOptions *sts.PresignOptions) {
|
||||
presignOptions.ClientOptions = append(presignOptions.ClientOptions, func(stsOptions *sts.Options) {
|
||||
stsOptions.APIOptions = append(stsOptions.APIOptions,
|
||||
smithyhttp.SetHeaderValue(clusterIDHeader, clusterName),
|
||||
smithyhttp.SetHeaderValue("X-Amz-Expires", strconv.Itoa(requestPresignParam)))
|
||||
})
|
||||
})
|
||||
request, _ := stsAPI.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{})
|
||||
request.HTTPRequest.Header.Add(clusterIDHeader, clusterName)
|
||||
signed, err := request.Presign(requestPresignParam)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error presigning AWS request: %w", err)
|
||||
}
|
||||
return presigned.URL, nil
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
func formatJSON(token string, expiration time.Time) string {
|
||||
|
||||
@@ -1,60 +1,14 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetSignedRequest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("returns error when context is cancelled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
url, err := getSignedRequest(ctx, "my-cluster", "", "")
|
||||
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
assert.Empty(t, url)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-existent profile", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
profile := "argocd-k8s-auth-test-nonexistent-profile-12345"
|
||||
|
||||
url, err := getSignedRequest(ctx, "my-cluster", "", profile)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.Empty(t, url)
|
||||
assert.Contains(t, err.Error(), "configuration", "error should mention configuration load failed")
|
||||
})
|
||||
|
||||
t.Run("returns error when roleARN is provided and assume role fails", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
cfg, err := config.LoadDefaultConfig(ctx,
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("test", "test", "")),
|
||||
config.WithRegion("us-east-1"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
url, err := getSignedRequestWithConfig(ctx, "my-cluster", "arn:aws:iam::123456789012:role/NonExistentRole", cfg)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.Empty(t, url)
|
||||
assert.Contains(t, err.Error(), "presigning", "error should mention presigning failed when assume role is used")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSignedRequestWithRetry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -118,7 +72,7 @@ type signedRequestMock struct {
|
||||
returnFunc func(m *signedRequestMock) (string, error)
|
||||
}
|
||||
|
||||
func (m *signedRequestMock) getSignedRequestMock(_ context.Context, _, _ string, _ string) (string, error) {
|
||||
func (m *signedRequestMock) getSignedRequestMock(_, _ string, _ string) (string, error) {
|
||||
m.getSignedRequestCalls++
|
||||
return m.returnFunc(m)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func NewCommand() *cobra.Command {
|
||||
selfServiceNotificationEnabled bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: common.CommandNotifications,
|
||||
Use: "controller",
|
||||
Short: "Starts Argo CD Notifications controller",
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -150,11 +150,13 @@ func NewCommand() *cobra.Command {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Go(func() {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
cancel()
|
||||
})
|
||||
}()
|
||||
|
||||
go ctrl.Run(ctx, processorsCount)
|
||||
<-ctx.Done()
|
||||
@@ -34,18 +34,21 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/gpg"
|
||||
"github.com/argoproj/argo-cd/v3/util/healthz"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
"github.com/argoproj/argo-cd/v3/util/profile"
|
||||
"github.com/argoproj/argo-cd/v3/util/tls"
|
||||
traceutil "github.com/argoproj/argo-cd/v3/util/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-repo-server"
|
||||
)
|
||||
|
||||
var (
|
||||
gnuPGSourcePath = env.StringFromEnv(common.EnvGPGDataPath, "/app/config/gpg/source")
|
||||
pauseGenerationAfterFailedGenerationAttempts = env.ParseNumFromEnv(common.EnvPauseGenerationAfterFailedAttempts, 3, 0, math.MaxInt32)
|
||||
pauseGenerationOnFailureForMinutes = env.ParseNumFromEnv(common.EnvPauseGenerationMinutes, 60, 0, math.MaxInt32)
|
||||
pauseGenerationOnFailureForRequests = env.ParseNumFromEnv(common.EnvPauseGenerationRequests, 0, 0, math.MaxInt32)
|
||||
gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
helmUserAgent = env.StringFromEnv(common.EnvHelmUserAgent, "")
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
@@ -80,7 +83,7 @@ func NewCommand() *cobra.Command {
|
||||
enableBuiltinGitConfig bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: common.CommandRepoServer,
|
||||
Use: cliName,
|
||||
Short: "Run ArgoCD Repository Server",
|
||||
Long: "ArgoCD Repository Server is an internal service which maintains a local cache of the Git repository holding the application manifests, and is responsible for generating and returning the Kubernetes manifests. This command runs Repository Server in the foreground. It can be configured by following options.",
|
||||
DisableAutoGenTag: true,
|
||||
@@ -154,7 +157,6 @@ func NewCommand() *cobra.Command {
|
||||
CMPUseManifestGeneratePaths: cmpUseManifestGeneratePaths,
|
||||
OCIMediaTypes: ociMediaTypes,
|
||||
EnableBuiltinGitConfig: enableBuiltinGitConfig,
|
||||
HelmUserAgent: helmUserAgent,
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -169,12 +171,10 @@ func NewCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
grpc := server.CreateGRPC()
|
||||
lc := &net.ListenConfig{}
|
||||
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf("%s:%d", listenHost, listenPort))
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", listenHost, listenPort))
|
||||
errors.CheckError(err)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
healthz.ServeHealthCheck(mux, func(r *http.Request) error {
|
||||
healthz.ServeHealthCheck(http.DefaultServeMux, func(r *http.Request) error {
|
||||
if val, ok := r.URL.Query()["full"]; ok && len(val) > 0 && val[0] == "true" {
|
||||
// connect to itself to make sure repo server is able to serve connection
|
||||
// used by liveness probe to auto restart repo server
|
||||
@@ -196,9 +196,8 @@ func NewCommand() *cobra.Command {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
mux.Handle("/metrics", metricsServer.GetHandler())
|
||||
profile.RegisterProfiler(mux)
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), mux)) }()
|
||||
http.Handle("/metrics", metricsServer.GetHandler())
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil)) }()
|
||||
go func() { errors.CheckError(askPassServer.Run()) }()
|
||||
|
||||
if gpg.IsGPGEnabled() {
|
||||
@@ -223,11 +222,13 @@ func NewCommand() *cobra.Command {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Go(func() {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
grpc.GracefulStop()
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
log.Println("starting grpc server")
|
||||
err = grpc.Serve(listener)
|
||||
|
||||
@@ -101,7 +101,7 @@ func NewCommand() *cobra.Command {
|
||||
enableK8sEvent []string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: common.CommandServer,
|
||||
Use: cliName,
|
||||
Short: "Run the ArgoCD API server",
|
||||
Long: "The API server is a gRPC/REST server which exposes the API consumed by the Web UI, CLI, and CI/CD systems. This command runs API server in the foreground. It can be configured by following options.",
|
||||
DisableAutoGenTag: true,
|
||||
@@ -307,7 +307,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json", env.StringFromEnvOpts{AllowEmpty: true}), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
|
||||
command.AddCommand(cli.NewVersionCmd(common.CommandServer))
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
|
||||
command.Flags().IntVar(&listenPort, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
command.Flags().StringVar(&metricsHost, env.StringFromEnv("ARGOCD_SERVER_METRICS_LISTEN_ADDRESS", "metrics-address"), common.DefaultAddressAPIServerMetrics, "Listen for metrics on given address")
|
||||
|
||||
6
cmd/argocd-server/commands/common.go
Normal file
6
cmd/argocd-server/commands/common.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package commands
|
||||
|
||||
const (
|
||||
// cliName is the name of the CLI
|
||||
cliName = "argocd-server"
|
||||
)
|
||||
@@ -183,9 +183,9 @@ func getAdditionalNamespaces(ctx context.Context, configMapsClient dynamic.Resou
|
||||
namespacesListFromString := func(namespaces string) []string {
|
||||
listOfNamespaces := []string{}
|
||||
|
||||
ss := strings.SplitSeq(namespaces, ",")
|
||||
ss := strings.Split(namespaces, ",")
|
||||
|
||||
for namespace := range ss {
|
||||
for _, namespace := range ss {
|
||||
if namespace != "" {
|
||||
listOfNamespaces = append(listOfNamespaces, strings.TrimSpace(namespace))
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user