Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
8126886e25 chore(deps): bump node from 20 to 24
Bumps node from 20 to 24.

---
updated-dependencies:
- dependency-name: node
  dependency-version: '24'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-18 03:23:56 +00:00
836 changed files with 24942 additions and 39916 deletions

View File

@@ -2,7 +2,7 @@
name: Bug report
about: Create a report to help us improve
title: ''
labels: ['bug', 'triage/pending']
labels: 'bug'
assignees: ''
---
@@ -10,9 +10,9 @@ assignees: ''
Checklist:
- [ ] I've searched in the docs and FAQ for my answer: https://bit.ly/argocd-faq.
- [ ] I've included steps to reproduce the bug.
- [ ] I've pasted the output of `argocd version`.
* [ ] I've searched in the docs and FAQ for my answer: https://bit.ly/argocd-faq.
* [ ] I've included steps to reproduce the bug.
* [ ] I've pasted the output of `argocd version`.
**Describe the bug**

View File

@@ -2,10 +2,9 @@
name: Enhancement proposal
about: Propose an enhancement for this project
title: ''
labels: ['enhancement', 'triage/pending']
labels: 'enhancement'
assignees: ''
---
# Summary
What change you think needs making.
@@ -16,4 +15,4 @@ Please give examples of your use case, e.g. when would you use this.
# Proposal
How do you think this should be implemented?
How do you think this should be implemented?

View File

@@ -2,17 +2,17 @@
name: New Dev Tool Request
about: This is a request for adding a new tool for setting up a dev environment.
title: ''
labels: ['component:dev-env', 'triage/pending']
labels: ''
assignees: ''
---
Checklist:
- [ ] I am willing to maintain this tool, or have another Argo CD maintainer who is.
- [ ] I have another Argo CD maintainer who is willing to help maintain this tool (there needs to be at least two maintainers willing to maintain this tool)
- [ ] I have a lead sponsor who is a core Argo CD maintainer
- [ ] There is a PR which adds said tool - this is so that the maintainers can assess the impact of having this in the tree
- [ ] I have given a motivation why this should be added
* [ ] I am willing to maintain this tool, or have another Argo CD maintainer who is.
* [ ] I have another Argo CD maintainer who is willing to help maintain this tool (there needs to be at least two maintainers willing to maintain this tool)
* [ ] I have a lead sponsor who is a core Argo CD maintainer
* [ ] There is a PR which adds said tool - this is so that the maintainers can assess the impact of having this in the tree
* [ ] I have given a motivation why this should be added
### The proposer
@@ -24,7 +24,7 @@ Checklist:
### Motivation
<!-- Why this tool would be useful to have in the tree. -->
<!-- Why this tool would be useful to have in the tree. -->
### Link to PR (Optional)

View File

@@ -1,11 +1,10 @@
---
name: Security log
about: Propose adding security-related logs or tagging existing logs with security fields
title: 'seclog: [Event Description]'
labels: ['security', 'triage/pending']
assignees: ''
title: "seclog: [Event Description]"
labels: security-log
assignees: notfromstatefarm
---
# Event to be logged
Specify the event that needs to be logged or existing logs that need to be tagged.
@@ -17,3 +16,4 @@ What security level should these events be logged under? Refer to https://argo-c
# Common Weakness Enumeration
Is there an associated [CWE](https://cwe.mitre.org/) that could be tagged as well?

3
.github/cherry-pick-bot.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
enabled: true
preservePullRequestTitle: true

View File

@@ -1,15 +0,0 @@
module.exports = {
platform: 'github',
gitAuthor: 'renovate[bot] <renovate[bot]@users.noreply.github.com>',
autodiscover: false,
allowPostUpgradeCommandTemplating: true,
allowedPostUpgradeCommands: ["make mockgen"],
extends: [
"github>argoproj/argo-cd//renovate-presets/commons.json5",
"github>argoproj/argo-cd//renovate-presets/custom-managers/shell.json5",
"github>argoproj/argo-cd//renovate-presets/custom-managers/yaml.json5",
"github>argoproj/argo-cd//renovate-presets/fix/disable-all-updates.json5",
"github>argoproj/argo-cd//renovate-presets/devtool.json5",
"github>argoproj/argo-cd//renovate-presets/docs.json5"
]
}

View File

@@ -8,7 +8,7 @@ Checklist:
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
* [ ] The title of the PR conforms to the [Title of the PR](https://argo-cd.readthedocs.io/en/latest/developer-guide/submit-your-pr/#title-of-the-pr)
* [ ] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr)
* [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue.
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
* [ ] Does this PR require documentation updates?

View File

@@ -37,7 +37,7 @@ jobs:
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Add ~/go/bin to PATH

View File

@@ -1,114 +0,0 @@
name: Cherry Pick Single
on:
workflow_call:
inputs:
merge_commit_sha:
required: true
type: string
description: "The merge commit SHA to cherry-pick"
version_number:
required: true
type: string
description: "The version number (from cherry-pick/ label)"
pr_number:
required: true
type: string
description: "The original PR number"
pr_title:
required: true
type: string
description: "The original PR title"
secrets:
CHERRYPICK_APP_ID:
required: true
CHERRYPICK_APP_PRIVATE_KEY:
required: true
jobs:
cherry-pick:
name: Cherry Pick to ${{ inputs.version_number }}
runs-on: ubuntu-latest
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
with:
app-id: ${{ secrets.CHERRYPICK_APP_ID }}
private-key: ${{ secrets.CHERRYPICK_APP_PRIVATE_KEY }}
- name: Checkout repository
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
with:
fetch-depth: 0
token: ${{ steps.generate-token.outputs.token }}
- name: Configure Git
run: |
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
- name: Cherry pick commit
id: cherry-pick
run: |
set -e
MERGE_COMMIT="${{ inputs.merge_commit_sha }}"
TARGET_BRANCH="release-${{ inputs.version_number }}"
echo "🍒 Cherry-picking commit $MERGE_COMMIT to branch $TARGET_BRANCH"
# Check if target branch exists
if ! git show-ref --verify --quiet "refs/remotes/origin/$TARGET_BRANCH"; then
echo "❌ Target branch '$TARGET_BRANCH' does not exist"
exit 1
fi
# Create new branch for cherry-pick
CHERRY_PICK_BRANCH="cherry-pick-${{ inputs.pr_number }}-to-${TARGET_BRANCH}"
git checkout -b "$CHERRY_PICK_BRANCH" "origin/$TARGET_BRANCH"
# Perform cherry-pick
if git cherry-pick -m 1 "$MERGE_COMMIT"; then
echo "✅ Cherry-pick successful"
# Extract Signed-off-by from the cherry-pick commit
SIGNOFF=$(git log -1 --pretty=format:"%B" | grep -E '^Signed-off-by:' || echo "")
# Push the new branch
git push origin "$CHERRY_PICK_BRANCH"
# Save data for PR creation
echo "branch_name=$CHERRY_PICK_BRANCH" >> "$GITHUB_OUTPUT"
echo "signoff=$SIGNOFF" >> "$GITHUB_OUTPUT"
echo "target_branch=$TARGET_BRANCH" >> "$GITHUB_OUTPUT"
else
echo "❌ Cherry-pick failed due to conflicts"
git cherry-pick --abort
exit 1
fi
- name: Create Pull Request
run: |
# Create cherry-pick PR
gh pr create \
--title "${{ inputs.pr_title }} (cherry-pick #${{ inputs.pr_number }} for ${{ inputs.version_number }})" \
--body "Cherry-picked ${{ inputs.pr_title }} (#${{ inputs.pr_number }})
${{ steps.cherry-pick.outputs.signoff }}" \
--base "${{ steps.cherry-pick.outputs.target_branch }}" \
--head "${{ steps.cherry-pick.outputs.branch_name }}"
# Comment on original PR
gh pr comment ${{ inputs.pr_number }} \
--body "🍒 Cherry-pick PR created for ${{ inputs.version_number }}: #$(gh pr list --head ${{ steps.cherry-pick.outputs.branch_name }} --json number --jq '.[0].number')"
env:
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
- name: Comment on failure
if: failure()
run: |
gh pr comment ${{ inputs.pr_number }} \
--body "❌ Cherry-pick failed for ${{ inputs.version_number }}. Please check the workflow logs for details."
env:
GH_TOKEN: ${{ steps.generate-token.outputs.token }}

View File

@@ -1,53 +0,0 @@
name: Cherry Pick
on:
pull_request_target:
branches:
- master
types: ["labeled", "closed"]
jobs:
find-labels:
name: Find Cherry Pick Labels
if: |
github.event.pull_request.merged == true && (
(github.event.action == 'labeled' && startsWith(github.event.label.name, 'cherry-pick/')) ||
(github.event.action == 'closed' && contains(toJSON(github.event.pull_request.labels.*.name), 'cherry-pick/'))
)
runs-on: ubuntu-latest
outputs:
labels: ${{ steps.extract-labels.outputs.labels }}
steps:
- name: Extract cherry-pick labels
id: extract-labels
run: |
if [[ "${{ github.event.action }}" == "labeled" ]]; then
# Label was just added - use it directly
LABEL_NAME="${{ github.event.label.name }}"
VERSION="${LABEL_NAME#cherry-pick/}"
CHERRY_PICK_DATA='[{"label":"'$LABEL_NAME'","version":"'$VERSION'"}]'
else
# PR was closed - find all cherry-pick labels
CHERRY_PICK_DATA=$(echo '${{ toJSON(github.event.pull_request.labels) }}' | jq -c '[.[] | select(.name | startswith("cherry-pick/")) | {label: .name, version: (.name | sub("cherry-pick/"; ""))}]')
fi
echo "labels=$CHERRY_PICK_DATA" >> "$GITHUB_OUTPUT"
echo "Found cherry-pick data: $CHERRY_PICK_DATA"
cherry-pick:
name: Cherry Pick
needs: find-labels
if: needs.find-labels.outputs.labels != '[]'
strategy:
matrix:
include: ${{ fromJSON(needs.find-labels.outputs.labels) }}
fail-fast: false
uses: ./.github/workflows/cherry-pick-single.yml
with:
merge_commit_sha: ${{ github.event.pull_request.merge_commit_sha }}
version_number: ${{ matrix.version }}
pr_number: ${{ github.event.pull_request.number }}
pr_title: ${{ github.event.pull_request.title }}
secrets:
CHERRYPICK_APP_ID: ${{ vars.CHERRYPICK_APP_ID }}
CHERRYPICK_APP_PRIVATE_KEY: ${{ secrets.CHERRYPICK_APP_PRIVATE_KEY }}

View File

@@ -14,7 +14,7 @@ on:
env:
# Golang version to use across CI steps
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.25.0'
GOLANG_VERSION: '1.24.4'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -32,7 +32,7 @@ jobs:
docs: ${{ steps.filter.outputs.docs_any_changed }}
steps:
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
id: filter
with:
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
@@ -57,7 +57,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Download all Go modules
@@ -78,11 +78,11 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Restore go build cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -105,14 +105,14 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Run golangci-lint
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
with:
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
version: v2.4.0
version: v2.1.6
args: --verbose
test-go:
@@ -133,7 +133,7 @@ jobs:
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
@@ -153,7 +153,7 @@ jobs:
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -197,7 +197,7 @@ jobs:
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
@@ -217,7 +217,7 @@ jobs:
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -253,7 +253,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Create symlink in GOPATH
@@ -305,13 +305,13 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup NodeJS
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
# renovate: datasource=node-version packageName=node versioning=node
node-version: '22.9.0'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -339,7 +339,7 @@ jobs:
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- run: |
sudo apt-get install shellcheck
shellcheck -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC1091 $(find . -type f -name '*.sh' | grep -v './ui/node_modules') | tee sc.log
shellcheck -e SC2086 -e SC2046 -e SC2068 -e SC2206 -e SC2048 -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC2128 -e SC1091 -e SC2207 $(find . -type f -name '*.sh') | tee sc.log
test ! -s sc.log
analyze:
@@ -360,7 +360,7 @@ jobs:
fetch-depth: 0
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -368,12 +368,12 @@ jobs:
run: |
rm -rf ui/node_modules/argo-ui/node_modules
- name: Get e2e code coverage
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: e2e-code-coverage
path: e2e-code-coverage
- name: Get unit test code coverage
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: test-results
path: test-results
@@ -385,7 +385,7 @@ jobs:
run: |
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller,e2e-code-coverage/commit-server -o test-results/full-coverage.out
- name: Upload code coverage information to codecov.io
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
files: test-results/full-coverage.out
fail_ci_if_error: true
@@ -402,12 +402,12 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
uses: SonarSource/sonarqube-scan-action@1a6d90ebcb0e6a6b1d87e37ba693fe453195ae25 # v5.3.1
uses: SonarSource/sonarqube-scan-action@2500896589ef8f7247069a56136f8dc177c27ccf # v5.2.0
if: env.sonar_secret != ''
test-e2e:
name: Run end-to-end tests
if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: oracle-vm-16cpu-64gb-x86-64
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
@@ -426,7 +426,7 @@ jobs:
- build-go
- changes
env:
GOPATH: /home/ubuntu/go
GOPATH: /home/runner/go
ARGOCD_FAKE_IN_CLUSTER: 'true'
ARGOCD_SSH_DATA_PATH: '/tmp/argo-e2e/app/config/ssh'
ARGOCD_TLS_DATA_PATH: '/tmp/argo-e2e/app/config/tls'
@@ -449,7 +449,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: GH actions workaround - Kill XSP4 process
@@ -462,19 +462,19 @@ jobs:
set -x
curl -sfL https://get.k3s.io | sh -
sudo chmod -R a+rw /etc/rancher/k3s
sudo mkdir -p $HOME/.kube && sudo chown -R ubuntu $HOME/.kube
sudo mkdir -p $HOME/.kube && sudo chown -R runner $HOME/.kube
sudo k3s kubectl config view --raw > $HOME/.kube/config
sudo chown ubuntu $HOME/.kube/config
sudo chown runner $HOME/.kube/config
sudo chmod go-r $HOME/.kube/config
kubectl version
- name: Restore go build cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Add ~/go/bin to PATH
run: |
echo "/home/ubuntu/go/bin" >> $GITHUB_PATH
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
@@ -496,11 +496,11 @@ jobs:
run: |
docker pull ghcr.io/dexidp/dex:v2.43.0
docker pull argoproj/argo-cd-ci-builder:v1.0.0
docker pull redis:8.2.2-alpine
docker pull redis:7.2.7-alpine
- name: Create target directory for binaries in the build-process
run: |
mkdir -p dist
chown ubuntu dist
chown runner dist
- name: Run E2E server and wait for it being available
timeout-minutes: 30
run: |

View File

@@ -33,7 +33,7 @@ jobs:
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version-file: go.mod

View File

@@ -67,16 +67,16 @@ jobs:
if: ${{ github.ref_type != 'tag'}}
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ inputs.go-version }}
cache: false
- name: Install cosign
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: Setup tags for container image as a CSV type
run: |
@@ -103,7 +103,7 @@ jobs:
echo 'EOF' >> $GITHUB_ENV
- name: Login to Quay.io
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: quay.io
username: ${{ secrets.quay_username }}
@@ -111,7 +111,7 @@ jobs:
if: ${{ inputs.quay_image_name && inputs.push }}
- name: Login to GitHub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ secrets.ghcr_username }}
@@ -119,7 +119,7 @@ jobs:
if: ${{ inputs.ghcr_image_name && inputs.push }}
- name: Login to dockerhub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
username: ${{ secrets.docker_username }}
password: ${{ secrets.docker_password }}

View File

@@ -53,7 +53,7 @@ jobs:
with:
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.24.4
platforms: ${{ needs.set-vars.outputs.platforms }}
push: false
@@ -70,7 +70,7 @@ jobs:
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.24.4
platforms: ${{ needs.set-vars.outputs.platforms }}
push: true
secrets:

View File

@@ -11,7 +11,7 @@ permissions: {}
env:
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.25.0' # Note: go-version must also be set in job argocd-image.with.go-version
GOLANG_VERSION: '1.24.4' # Note: go-version must also be set in job argocd-image.with.go-version
jobs:
argocd-image:
@@ -25,49 +25,13 @@ jobs:
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.24.4
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
push: true
secrets:
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
setup-variables:
name: Setup Release Variables
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
outputs:
is_pre_release: ${{ steps.var.outputs.is_pre_release }}
is_latest_release: ${{ steps.var.outputs.is_latest_release }}
steps:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup variables
id: var
run: |
set -xue
# Fetch all tag information
git fetch --prune --tags --force
LATEST_RELEASE_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | grep -v '-' | tail -n1)
PRE_RELEASE=false
# Check if latest tag is a pre-release
if echo ${{ github.ref_name }} | grep -E -- '-rc[0-9]+$';then
PRE_RELEASE=true
fi
IS_LATEST=false
# Ensure latest release tag matches github.ref_name
if [[ $LATEST_RELEASE_TAG == ${{ github.ref_name }} ]];then
IS_LATEST=true
fi
echo "is_pre_release=$PRE_RELEASE" >> $GITHUB_OUTPUT
echo "is_latest_release=$IS_LATEST" >> $GITHUB_OUTPUT
argocd-image-provenance:
needs: [argocd-image]
permissions:
@@ -86,17 +50,15 @@ jobs:
goreleaser:
needs:
- setup-variables
- argocd-image
- argocd-image-provenance
permissions:
contents: write # used for uploading assets
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
env:
GORELEASER_MAKE_LATEST: ${{ needs.setup-variables.outputs.is_latest_release }}
outputs:
hashes: ${{ steps.hash.outputs.hashes }}
steps:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
@@ -108,7 +70,7 @@ jobs:
run: git fetch --force --tags
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
cache: false
@@ -134,7 +96,7 @@ jobs:
tool-cache: false
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
id: run-goreleaser
with:
version: latest
@@ -180,7 +142,7 @@ jobs:
permissions:
contents: write # Needed for release uploads
outputs:
hashes: ${{ steps.sbom-hash.outputs.hashes }}
hashes: ${{ steps.sbom-hash.outputs.hashes}}
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
steps:
@@ -191,7 +153,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Golang
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
cache: false
@@ -236,7 +198,7 @@ jobs:
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
- name: Upload SBOM
uses: softprops/action-gh-release@6cbd405e2c4e67a21c47fa9e383d020e4e28b836 # v2.3.3
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -259,7 +221,6 @@ jobs:
post-release:
needs:
- setup-variables
- argocd-image
- goreleaser
- generate-sbom
@@ -268,8 +229,6 @@ jobs:
pull-requests: write # Needed to create PR for VERSION update.
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-22.04
env:
TAG_STABLE: ${{ needs.setup-variables.outputs.is_latest_release }}
steps:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
@@ -283,6 +242,27 @@ jobs:
git config --global user.email 'ci@argoproj.com'
git config --global user.name 'CI'
- name: Check if tag is the latest version and not a pre-release
run: |
set -xue
# Fetch all tag information
git fetch --prune --tags --force
LATEST_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n1)
PRE_RELEASE=false
# Check if latest tag is a pre-release
if echo $LATEST_TAG | grep -E -- '-rc[0-9]+$';then
PRE_RELEASE=true
fi
# Ensure latest tag matches github.ref_name & not a pre-release
if [[ $LATEST_TAG == ${{ github.ref_name }} ]] && [[ $PRE_RELEASE != 'true' ]];then
echo "TAG_STABLE=true" >> $GITHUB_ENV
else
echo "TAG_STABLE=false" >> $GITHUB_ENV
fi
- name: Update stable tag to latest version
run: |
git tag -f stable ${{ github.ref_name }}

View File

@@ -1,31 +0,0 @@
name: Renovate
on:
schedule:
- cron: '0 * * * *'
workflow_dispatch: {}
permissions:
contents: read
jobs:
renovate:
runs-on: ubuntu-latest
steps:
- name: Get token
id: get_token
uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
with:
app-id: ${{ vars.RENOVATE_APP_ID }}
private-key: ${{ secrets.RENOVATE_APP_PRIVATE_KEY }}
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
- name: Self-hosted Renovate
uses: renovatebot/github-action@f8af9272cd94a4637c29f60dea8731afd3134473 #43.0.12
with:
configurationFile: .github/configs/renovate-config.js
token: '${{ steps.get_token.outputs.token }}'
env:
LOG_LEVEL: 'debug'
RENOVATE_REPOSITORIES: '${{ github.repository }}'

View File

@@ -58,6 +58,7 @@ linters:
- commentedOutCode
- deferInLoop
- exitAfterDefer
- exposedSyncMutex
- hugeParam
- importShadow
- paramTypeCombine # Leave disabled, there are too many failures to be worth fixing.

View File

@@ -21,7 +21,7 @@ builds:
- -X github.com/argoproj/argo-cd/v3/common.gitCommit={{ .FullCommit }}
- -X github.com/argoproj/argo-cd/v3/common.gitTreeState={{ .Env.GIT_TREE_STATE }}
- -X github.com/argoproj/argo-cd/v3/common.kubectlVersion={{ .Env.KUBECTL_VERSION }}
- -extldflags="-static"
- '{{ if or (eq .Runtime.Goos "linux") (eq .Runtime.Goos "windows") }}-extldflags="-static"{{ end }}'
goos:
- linux
- windows
@@ -42,6 +42,15 @@ builds:
goarch: ppc64le
- goos: windows
goarch: arm64
overrides:
- goos: darwin
goarch: amd64
env:
- CGO_ENABLED=1
- goos: darwin
goarch: arm64
env:
- CGO_ENABLED=1
archives:
- id: argocd-archive
@@ -49,14 +58,13 @@ archives:
- argocd-cli
name_template: |-
{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}
formats: [binary]
formats: [ binary ]
checksum:
name_template: 'cli_checksums.txt'
algorithm: sha256
release:
make_latest: '{{ .Env.GORELEASER_MAKE_LATEST }}'
prerelease: auto
draft: false
header: |

View File

@@ -24,6 +24,7 @@ packages:
Renderer: {}
github.com/argoproj/argo-cd/v3/commitserver/apiclient:
interfaces:
Clientset: {}
CommitServiceClient: {}
github.com/argoproj/argo-cd/v3/commitserver/commit:
interfaces:
@@ -31,10 +32,6 @@ packages:
github.com/argoproj/argo-cd/v3/controller/cache:
interfaces:
LiveStateCache: {}
github.com/argoproj/argo-cd/v3/controller/hydrator:
interfaces:
Dependencies: {}
RepoGetter: {}
github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster:
interfaces:
ClusterServiceServer: {}
@@ -69,9 +66,6 @@ packages:
github.com/argoproj/argo-cd/v3/util/helm:
interfaces:
Client: {}
github.com/argoproj/argo-cd/v3/util/oci:
interfaces:
Client: {}
github.com/argoproj/argo-cd/v3/util/io:
interfaces:
TempPaths: {}

View File

@@ -12,8 +12,3 @@
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
/.goreleaser.yaml @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
/sonar-project.properties @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
# CLI
/cmd/argocd/** @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
/cmd/main.go @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
/docs/operator-manual/ @argoproj/argocd-approvers @argoproj/argocd-approvers-cli

View File

@@ -1,10 +1,10 @@
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36715270ead632cfcb74d08ca2273712a0dfb42
ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:80dd3c3b9c6cecb9f1667e9290b3bc61b78c2678c02cbdae5f0fea92cc6734ab
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS builder
FROM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS builder
WORKDIR /tmp
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS argocd-build
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd

View File

@@ -1,4 +1,4 @@
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6
FROM docker.io/library/golang:1.24.1@sha256:c5adecdb7b3f8c5ca3c88648a861882849cc8b02fed68ece31e25de88ad13418
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -1,4 +1,4 @@
FROM node:20
FROM node:24
WORKDIR /app/ui

View File

@@ -43,17 +43,6 @@ endif
DOCKER_SRCDIR?=$(GOPATH)/src
DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
# Allows you to control which Docker network the test-util containers attach to.
# This is particularly useful if you are running Kubernetes in Docker (e.g., k3d)
# and want the test containers to reach the Kubernetes API via an already-existing Docker network.
DOCKER_NETWORK ?= default
ifneq ($(DOCKER_NETWORK),default)
DOCKER_NETWORK_ARG := --network $(DOCKER_NETWORK)
else
DOCKER_NETWORK_ARG :=
endif
ARGOCD_PROCFILE?=Procfile
# pointing to python 3.7 to match https://github.com/argoproj/argo-cd/blob/master/.readthedocs.yml
@@ -124,11 +113,11 @@ define run-in-test-server
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
-v /tmp:/tmp${VOLUME_MOUNT} \
-w ${DOCKER_WORKDIR} \
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
-p 4000:4000 \
-p 5000:5000 \
$(DOCKER_NETWORK_ARG)\
$(PODMAN_ARGS) \
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
bash -c "$(1)"
@@ -149,8 +138,8 @@ define run-in-test-client
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
-v /tmp:/tmp${VOLUME_MOUNT} \
-w ${DOCKER_WORKDIR} \
$(DOCKER_NETWORK_ARG)\
$(PODMAN_ARGS) \
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
bash -c "$(1)"
@@ -615,7 +604,6 @@ install-test-tools-local:
.PHONY: install-codegen-tools-local
install-codegen-tools-local:
./hack/install.sh codegen-tools
./hack/install.sh codegen-go-tools
# Installs all tools required for running codegen (Go packages)
.PHONY: install-go-tools-local

View File

@@ -3,9 +3,9 @@ header:
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
last-updated: '2023-10-27'
last-reviewed: '2023-10-27'
commit-hash: 320f46f06beaf75f9c406e3a47e2e09d36e2047a
commit-hash: 226a670fe6b3c6769ff6d18e6839298a58e4577d
project-url: https://github.com/argoproj/argo-cd
project-release: v3.2.0
project-release: v3.1.0
changelog: https://github.com/argoproj/argo-cd/releases
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
project-lifecycle:

View File

@@ -10,14 +10,6 @@ cmd_button(
text='make codegen-local',
)
cmd_button(
'make test-local',
argv=['sh', '-c', 'make test-local'],
location=location.NAV,
icon_name='science',
text='make test-local',
)
# add ui button in web ui to run make codegen-local (top nav)
cmd_button(
'make cli-local',
@@ -77,7 +69,7 @@ docker_build_with_restart(
],
platform=platform,
live_update=[
sync('.tilt-bin/argocd_linux', '/usr/local/bin/argocd'),
sync('.tilt-bin/argocd_linux_amd64', '/usr/local/bin/argocd'),
],
only=[
'.tilt-bin',
@@ -268,7 +260,6 @@ local_resource(
'make lint-local',
deps = code_deps,
allow_parallel=True,
resource_deps=['vendor']
)
local_resource(

View File

@@ -5,10 +5,8 @@ PR with your organization name if you are using Argo CD.
Currently, the following organizations are **officially** using Argo CD:
1. [100ms](https://www.100ms.ai/)
1. [127Labs](https://127labs.com/)
1. [3Rein](https://www.3rein.com/)
1. [42 School](https://42.fr/)
1. [4data](https://4data.ch/)
1. [7shifts](https://www.7shifts.com/)
1. [Adevinta](https://www.adevinta.com/)
@@ -42,7 +40,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Back Market](https://www.backmarket.com)
1. [Bajaj Finserv Health Ltd.](https://www.bajajfinservhealth.in)
1. [Baloise](https://www.baloise.com)
1. [Batumbu](https://batumbu.id)
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
1. [Beat](https://thebeat.co/en/)
1. [Beez Innovation Labs](https://www.beezlabs.com/)
@@ -74,7 +71,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Chime](https://www.chime.com)
1. [Chronicle Labs](https://chroniclelabs.org)
1. [Cisco ET&I](https://eti.cisco.com/)
1. [Close](https://www.close.com/)
1. [Cloud Posse](https://www.cloudposse.com/)
1. [Cloud Scale](https://cloudscaleinc.com/)
1. [CloudScript](https://www.cloudscript.com.br/)
@@ -164,7 +160,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Hiya](https://hiya.com)
1. [Honestbank](https://honestbank.com)
1. [Hostinger](https://www.hostinger.com)
1. [Hotjar](https://www.hotjar.com)
1. [IABAI](https://www.iab.ai)
1. [IBM](https://www.ibm.com/)
1. [Ibotta](https://home.ibotta.com)
@@ -178,7 +173,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Info Support](https://www.infosupport.com/)
1. [InsideBoard](https://www.insideboard.com)
1. [Instruqt](https://www.instruqt.com)
1. [Intel](https://www.intel.com)
1. [Intuit](https://www.intuit.com/)
1. [Jellysmack](https://www.jellysmack.com)
1. [Joblift](https://joblift.com/)
@@ -326,10 +320,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [SEEK](https://seek.com.au)
1. [SEKAI](https://www.sekai.io/)
1. [Semgrep](https://semgrep.com)
1. [Seznam.cz](https://o-seznam.cz/)
1. [Shield](https://shield.com)
1. [Shipfox](https://www.shipfox.io)
1. [Shock Media](https://www.shockmedia.nl)
1. [SI Analytics](https://si-analytics.ai)
1. [Sidewalk Entertainment](https://sidewalkplay.com/)
1. [Skit](https://skit.ai/)
@@ -342,7 +333,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Snapp](https://snapp.ir/)
1. [Snyk](https://snyk.io/)
1. [Softway Medical](https://www.softwaymedical.fr/)
1. [Sophotech](https://sopho.tech)
1. [South China Morning Post (SCMP)](https://www.scmp.com/)
1. [Speee](https://speee.jp/)
1. [Spendesk](https://spendesk.com/)

View File

@@ -1 +1 @@
3.2.0
3.1.0

View File

@@ -16,7 +16,6 @@ package controllers
import (
"context"
"errors"
"fmt"
"reflect"
"runtime/debug"
@@ -68,8 +67,6 @@ const (
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
ReconcileRequeueOnValidationError = time.Minute * 3
ReverseDeletionOrder = "Reverse"
AllAtOnceDeletionOrder = "AllAtOnce"
)
var defaultPreservedAnnotations = []string{
@@ -77,11 +74,6 @@ var defaultPreservedAnnotations = []string{
argov1alpha1.AnnotationKeyRefresh,
}
type deleteInOrder struct {
AppName string
Step int
}
// ApplicationSetReconciler reconciles a ApplicationSet object
type ApplicationSetReconciler struct {
client.Client
@@ -100,7 +92,6 @@ type ApplicationSetReconciler struct {
GlobalPreservedAnnotations []string
GlobalPreservedLabels []string
Metrics *metrics.ApplicationsetMetrics
MaxResourcesStatusCount int
}
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
@@ -148,19 +139,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
logCtx.Debugf("ownerReferences referring %s is deleted from generated applications", appsetName)
}
if isProgressiveSyncDeletionOrderReversed(&applicationSetInfo) {
logCtx.Debugf("DeletionOrder is set as Reverse on %s", appsetName)
currentApplications, err := r.getCurrentApplications(ctx, applicationSetInfo)
if err != nil {
return ctrl.Result{}, err
}
requeueTime, err := r.performReverseDeletion(ctx, logCtx, applicationSetInfo, currentApplications)
if err != nil {
return ctrl.Result{}, err
} else if requeueTime > 0 {
return ctrl.Result{RequeueAfter: requeueTime}, err
}
}
controllerutil.RemoveFinalizer(&applicationSetInfo, argov1alpha1.ResourcesFinalizerName)
if err := r.Update(ctx, &applicationSetInfo); err != nil {
return ctrl.Result{}, err
@@ -176,7 +154,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// Log a warning if there are unrecognized generators
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
// desiredApplications is the main list of all expected Applications from all generators in this appset.
generatedApplications, applicationSetReason, err := template.GenerateApplications(logCtx, applicationSetInfo, r.Generators, r.Renderer, r.Client)
desiredApplications, applicationSetReason, err := template.GenerateApplications(logCtx, applicationSetInfo, r.Generators, r.Renderer, r.Client)
if err != nil {
logCtx.Errorf("unable to generate applications: %v", err)
_ = r.setApplicationSetStatusCondition(ctx,
@@ -194,7 +172,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
parametersGenerated = true
validateErrors, err := r.validateGeneratedApplications(ctx, generatedApplications, applicationSetInfo)
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo)
if err != nil {
// While some generators may return an error that requires user intervention,
// other generators reference external resources that may change to cause
@@ -247,41 +225,25 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
appMap[app.Name] = app
}
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications, appMap)
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, desiredApplications, appMap)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
}
}
} else {
// Progressive Sync is disabled, clear any existing applicationStatus to prevent stale data
if len(applicationSetInfo.Status.ApplicationStatus) > 0 {
logCtx.Infof("Progressive Sync disabled, removing %v AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
err := r.setAppSetApplicationStatus(ctx, logCtx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to clear AppSet application statuses when Progressive Sync is disabled for %v: %w", applicationSetInfo.Name, err)
}
}
}
var validApps []argov1alpha1.Application
for i := range generatedApplications {
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
validApps = append(validApps, generatedApplications[i])
for i := range desiredApplications {
if validateErrors[i] == nil {
validApps = append(validApps, desiredApplications[i])
}
}
if len(validateErrors) > 0 {
errorApps := make([]string, 0, len(validateErrors))
for key := range validateErrors {
errorApps = append(errorApps, key)
}
sort.Strings(errorApps)
var message string
for _, appName := range errorApps {
message = validateErrors[appName].Error()
logCtx.WithField("application", appName).Errorf("validation error found during application validation: %s", message)
for _, v := range validateErrors {
message = v.Error()
logCtx.Errorf("validation error found during application validation: %s", message)
}
if len(validateErrors) > 1 {
// Only the last message gets added to the appset status, to keep the size reasonable.
@@ -336,12 +298,12 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, generatedApplications)
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, desiredApplications)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: err.Error(),
Reason: argov1alpha1.ApplicationSetReasonDeleteApplicationError,
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
@@ -395,169 +357,120 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}, nil
}
func (r *ApplicationSetReconciler) performReverseDeletion(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, currentApps []argov1alpha1.Application) (time.Duration, error) {
requeueTime := 10 * time.Second
stepLength := len(appset.Spec.Strategy.RollingSync.Steps)
// map applications by name using current applications
appMap := make(map[string]*argov1alpha1.Application)
for _, app := range currentApps {
appMap[app.Name] = &app
}
// Get Rolling Sync Step Maps
_, appStepMap := r.buildAppDependencyList(logCtx, appset, currentApps)
// reverse the AppStepMap to perform deletion
var reverseDeleteAppSteps []deleteInOrder
for appName, appStep := range appStepMap {
reverseDeleteAppSteps = append(reverseDeleteAppSteps, deleteInOrder{appName, stepLength - appStep - 1})
}
sort.Slice(reverseDeleteAppSteps, func(i, j int) bool {
return reverseDeleteAppSteps[i].Step < reverseDeleteAppSteps[j].Step
})
for _, step := range reverseDeleteAppSteps {
logCtx.Infof("step %v : app %v", step.Step, step.AppName)
app := appMap[step.AppName]
retrievedApp := argov1alpha1.Application{}
if err := r.Get(ctx, types.NamespacedName{Name: app.Name, Namespace: app.Namespace}, &retrievedApp); err != nil {
if apierrors.IsNotFound(err) {
logCtx.Infof("application %s successfully deleted", step.AppName)
continue
}
}
// Check if the application is already being deleted
if retrievedApp.DeletionTimestamp != nil {
logCtx.Infof("application %s has been marked for deletion, but object not removed yet", step.AppName)
if time.Since(retrievedApp.DeletionTimestamp.Time) > 2*time.Minute {
return 0, errors.New("application has not been deleted in over 2 minutes")
}
}
// The application has not been deleted yet, trigger its deletion
if err := r.Delete(ctx, &retrievedApp); err != nil {
return 0, err
}
return requeueTime, nil
}
logCtx.Infof("completed reverse deletion for ApplicationSet %v", appset.Name)
return 0, nil
}
func getParametersGeneratedCondition(parametersGenerated bool, message string) argov1alpha1.ApplicationSetCondition {
var parametersGeneratedCondition argov1alpha1.ApplicationSetCondition
var paramtersGeneratedCondition argov1alpha1.ApplicationSetCondition
if parametersGenerated {
parametersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
paramtersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionParametersGenerated,
Message: "Successfully generated parameters for all Applications",
Reason: argov1alpha1.ApplicationSetReasonParametersGenerated,
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
}
} else {
parametersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
paramtersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionParametersGenerated,
Message: message,
Reason: argov1alpha1.ApplicationSetReasonErrorOccurred,
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
}
}
return parametersGeneratedCondition
return paramtersGeneratedCondition
}
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, condition argov1alpha1.ApplicationSetCondition, parametersGenerated bool) error {
// Initialize the default condition types that this method evaluates
func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argov1alpha1.ApplicationSetCondition {
var resourceUpToDateCondition argov1alpha1.ApplicationSetCondition
if errorOccurred {
resourceUpToDateCondition = argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: message,
Reason: reason,
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
}
} else {
resourceUpToDateCondition = argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "ApplicationSet up to date",
Reason: argov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
}
}
return resourceUpToDateCondition
}
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, condition argov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error {
// check if error occurred during reconcile process
errOccurred := condition.Type == argov1alpha1.ApplicationSetConditionErrorOccurred
var errOccurredCondition argov1alpha1.ApplicationSetCondition
if errOccurred {
errOccurredCondition = condition
} else {
errOccurredCondition = argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
Message: "Successfully generated parameters for all Applications",
Reason: argov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
}
}
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
evaluatedTypes := map[argov1alpha1.ApplicationSetConditionType]bool{
argov1alpha1.ApplicationSetConditionErrorOccurred: true,
argov1alpha1.ApplicationSetConditionParametersGenerated: true,
argov1alpha1.ApplicationSetConditionErrorOccurred: false,
argov1alpha1.ApplicationSetConditionResourcesUpToDate: false,
argov1alpha1.ApplicationSetConditionRolloutProgressing: false,
argov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
}
// Evaluate current condition
evaluatedTypes[condition.Type] = true
newConditions := []argov1alpha1.ApplicationSetCondition{condition}
newConditions := []argov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
if !isRollingSyncStrategy(applicationSet) {
// Progressing sync is always evaluated so conditions are removed when it is not enabled
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
evaluatedTypes[argov1alpha1.ApplicationSetConditionRolloutProgressing] = true
}
// Evaluate ParametersGenerated since it is always provided
if condition.Type != argov1alpha1.ApplicationSetConditionParametersGenerated {
newConditions = append(newConditions, getParametersGeneratedCondition(parametersGenerated, condition.Message))
}
// Evaluate dependencies between conditions.
switch condition.Type {
case argov1alpha1.ApplicationSetConditionResourcesUpToDate:
if condition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
// If the resources are up to date, we know there was no errors
evaluatedTypes[argov1alpha1.ApplicationSetConditionErrorOccurred] = true
newConditions = append(newConditions, argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
Reason: condition.Reason,
Message: condition.Message,
})
}
case argov1alpha1.ApplicationSetConditionErrorOccurred:
if condition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
// If there is an error anywhere in the reconciliation, we cannot consider the resources up to date
evaluatedTypes[argov1alpha1.ApplicationSetConditionResourcesUpToDate] = true
newConditions = append(newConditions, argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
Reason: argov1alpha1.ApplicationSetReasonErrorOccurred,
Message: condition.Message,
})
}
case argov1alpha1.ApplicationSetConditionRolloutProgressing:
if !isRollingSyncStrategy(applicationSet) {
// if the condition is a rolling sync and it is disabled, ignore it
evaluatedTypes[condition.Type] = false
if condition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing {
newConditions = append(newConditions, condition)
}
}
// Update the applicationSet conditions
previousConditions := applicationSet.Status.Conditions
applicationSet.Status.SetConditions(newConditions, evaluatedTypes)
// Try to not call get/update if nothing has changed
needToUpdateConditions := len(applicationSet.Status.Conditions) != len(previousConditions)
if !needToUpdateConditions {
for i, c := range applicationSet.Status.Conditions {
previous := previousConditions[i]
if c.Type != previous.Type || c.Reason != previous.Reason || c.Status != previous.Status || c.Message != previous.Message {
needToUpdateConditions := false
for _, condition := range newConditions {
// do nothing if appset already has same condition
for _, c := range applicationSet.Status.Conditions {
if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) {
needToUpdateConditions = true
break
}
}
}
if !needToUpdateConditions {
return nil
}
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
updatedAppset := &argov1alpha1.ApplicationSet{}
if err := r.Get(ctx, types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}, updatedAppset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
if needToUpdateConditions || len(applicationSet.Status.Conditions) < len(newConditions) {
// fetch updated Application Set object before updating it
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
updatedAppset := &argov1alpha1.ApplicationSet{}
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
}
return fmt.Errorf("error fetching updated application set: %w", err)
}
updatedAppset.Status.SetConditions(newConditions, evaluatedTypes)
updatedAppset.Status.SetConditions(
newConditions, evaluatedTypes,
)
// Update the newly fetched object with new set of conditions
err := r.Client.Status().Update(ctx, updatedAppset)
if err != nil {
return err
// Update the newly fetched object with new set of conditions
err := r.Client.Status().Update(ctx, updatedAppset)
if err != nil {
return err
}
updatedAppset.DeepCopyInto(applicationSet)
return nil
})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("unable to set application set condition: %w", err)
}
updatedAppset.DeepCopyInto(applicationSet)
return nil
})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("unable to set application set condition: %w", err)
}
return nil
@@ -565,33 +478,33 @@ func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
// generated applications.
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet) (map[string]error, error) {
errorsByApp := map[string]error{}
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet) (map[int]error, error) {
errorsByIndex := map[int]error{}
namesSet := map[string]bool{}
for i := range desiredApplications {
app := &desiredApplications[i]
for i, app := range desiredApplications {
if namesSet[app.Name] {
errorsByApp[app.QualifiedName()] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
continue
}
namesSet[app.Name] = true
appProject := &argov1alpha1.AppProject{}
err := r.Get(ctx, types.NamespacedName{Name: app.Spec.Project, Namespace: r.ArgoCDNamespace}, appProject)
if err != nil {
if apierrors.IsNotFound(err) {
errorsByApp[app.QualifiedName()] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
continue
}
return nil, err
}
if _, err = argoutil.GetDestinationCluster(ctx, app.Spec.Destination, r.ArgoDB); err != nil {
errorsByApp[app.QualifiedName()] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
continue
}
}
return errorsByApp, nil
return errorsByIndex, nil
}
func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argov1alpha1.ApplicationSet) time.Duration {
@@ -819,7 +732,7 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
return fmt.Errorf("error getting current applications: %w", err)
}
m := make(map[string]bool) // will hold the app names in appList for the deletion process
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
for _, app := range desiredApplications {
m[app.Name] = true
@@ -868,7 +781,7 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
if destCluster, err := argoutil.GetDestinationCluster(ctx, app.Spec.Destination, r.ArgoDB); err != nil {
appLog.Warnf("The destination cluster for %s could not be found: %v", app.Name, err)
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
validDestination = false
} else {
// Detect if the destination's server field does not match an existing cluster
@@ -887,7 +800,7 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
}
if !matchingCluster {
appLog.Warnf("A match for the destination cluster for %s, by server url, could not be found", app.Name)
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name)
}
validDestination = matchingCluster
@@ -1098,11 +1011,6 @@ func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.Application
return isRollingSyncStrategy(appset) && len(appset.Spec.Strategy.RollingSync.Steps) > 0
}
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
}
func isApplicationHealthy(app argov1alpha1.Application) bool {
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
@@ -1232,10 +1140,15 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
updateCountMap := []int{}
totalCountMap := []int{}
length := len(applicationSet.Spec.Strategy.RollingSync.Steps)
updateCountMap := make([]int, length)
totalCountMap := make([]int, length)
for s := 0; s < length; s++ {
updateCountMap = append(updateCountMap, 0)
totalCountMap = append(totalCountMap, 0)
}
// populate updateCountMap with counts of existing Pending and Progressing Applications
for _, appStatus := range applicationSet.Status.ApplicationStatus {
@@ -1294,56 +1207,44 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
}
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) []argov1alpha1.ApplicationSetCondition {
if !isRollingSyncStrategy(applicationSet) {
return applicationSet.Status.Conditions
}
completedWaves := map[string]bool{}
appSetProgressing := false
for _, appStatus := range applicationSet.Status.ApplicationStatus {
if v, ok := completedWaves[appStatus.Step]; !ok {
completedWaves[appStatus.Step] = appStatus.Status == "Healthy"
} else {
completedWaves[appStatus.Step] = v && appStatus.Status == "Healthy"
}
}
isProgressing := false
progressingStep := ""
for i := range applicationSet.Spec.Strategy.RollingSync.Steps {
step := strconv.Itoa(i + 1)
isCompleted, ok := completedWaves[step]
if !ok {
// Step has no applications, so it is completed
continue
}
if !isCompleted {
isProgressing = true
progressingStep = step
if appStatus.Status != "Healthy" {
appSetProgressing = true
break
}
}
if isProgressing {
appSetConditionProgressing := false
for _, appSetCondition := range applicationSet.Status.Conditions {
if appSetCondition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing && appSetCondition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
appSetConditionProgressing = true
break
}
}
if appSetProgressing && !appSetConditionProgressing {
_ = r.setApplicationSetStatusCondition(ctx,
applicationSet,
argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "ApplicationSet is performing rollout of step " + progressingStep,
Message: "ApplicationSet Rollout Rollout started",
Reason: argov1alpha1.ApplicationSetReasonApplicationSetModified,
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
}, true,
)
} else {
} else if !appSetProgressing && appSetConditionProgressing {
_ = r.setApplicationSetStatusCondition(ctx,
applicationSet,
argov1alpha1.ApplicationSetCondition{
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "ApplicationSet Rollout has completed",
Message: "ApplicationSet Rollout Rollout complete",
Reason: argov1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
}, true,
)
}
return applicationSet.Status.Conditions
}
@@ -1409,13 +1310,7 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
sort.Slice(statuses, func(i, j int) bool {
return statuses[i].Name < statuses[j].Name
})
resourcesCount := int64(len(statuses))
if r.MaxResourcesStatusCount > 0 && len(statuses) > r.MaxResourcesStatusCount {
logCtx.Warnf("Truncating ApplicationSet %s resource status from %d to max allowed %d entries", appset.Name, len(statuses), r.MaxResourcesStatusCount)
statuses = statuses[:r.MaxResourcesStatusCount]
}
appset.Status.Resources = statuses
appset.Status.ResourcesCount = resourcesCount
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
@@ -1428,7 +1323,6 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
}
updatedAppset.Status.Resources = appset.Status.Resources
updatedAppset.Status.ResourcesCount = resourcesCount
// Update the newly fetched object with new status resources
err := r.Client.Status().Update(ctx, updatedAppset)
@@ -1451,37 +1345,17 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
needToUpdateStatus := false
if len(applicationStatuses) != len(applicationSet.Status.ApplicationStatus) {
logCtx.WithFields(log.Fields{
"current_count": len(applicationSet.Status.ApplicationStatus),
"expected_count": len(applicationStatuses),
}).Debug("application status count changed")
needToUpdateStatus = true
} else {
for i := range applicationStatuses {
appStatus := applicationStatuses[i]
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appStatus.Application)
if idx == -1 {
logCtx.WithFields(log.Fields{"application": appStatus.Application}).Debug("application not found in current status")
needToUpdateStatus = true
break
}
currentStatus := applicationSet.Status.ApplicationStatus[idx]
statusChanged := currentStatus.Status != appStatus.Status
stepChanged := currentStatus.Step != appStatus.Step
messageChanged := currentStatus.Message != appStatus.Message
if statusChanged || stepChanged || messageChanged {
if statusChanged {
logCtx.WithFields(log.Fields{"application": appStatus.Application, "previous_status": currentStatus.Status, "new_status": appStatus.Status}).
Debug("application status changed")
}
if stepChanged {
logCtx.WithFields(log.Fields{"application": appStatus.Application, "previous_step": currentStatus.Step, "new_step": appStatus.Step}).
Debug("application step changed")
}
if messageChanged {
logCtx.WithFields(log.Fields{"application": appStatus.Application}).Debug("application message changed")
}
if currentStatus.Message != appStatus.Message || currentStatus.Status != appStatus.Status || currentStatus.Step != appStatus.Step {
needToUpdateStatus = true
break
}
@@ -1489,17 +1363,17 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
}
if needToUpdateStatus {
// sort to make sure the array is always in the same order
applicationSet.Status.ApplicationStatus = make([]argov1alpha1.ApplicationSetApplicationStatus, len(applicationStatuses))
copy(applicationSet.Status.ApplicationStatus, applicationStatuses)
sort.Slice(applicationSet.Status.ApplicationStatus, func(i, j int) bool {
return applicationSet.Status.ApplicationStatus[i].Application < applicationSet.Status.ApplicationStatus[j].Application
})
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
// rebuild ApplicationStatus from scratch, we don't need any previous status history
applicationSet.Status.ApplicationStatus = []argov1alpha1.ApplicationSetApplicationStatus{}
for i := range applicationStatuses {
applicationSet.Status.SetApplicationStatus(applicationStatuses[i])
}
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
updatedAppset := &argov1alpha1.ApplicationSet{}
if err := r.Get(ctx, types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}, updatedAppset); err != nil {
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
@@ -1563,7 +1437,7 @@ func syncApplication(application argov1alpha1.Application, prune bool) argov1alp
Info: []*argov1alpha1.Info{
{
Name: "Reason",
Value: "ApplicationSet RollingSync triggered a sync of this Application resource",
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
},
},
Sync: &argov1alpha1.SyncOperation{},
@@ -1740,15 +1614,14 @@ func shouldRequeueForApplicationSet(appSetOld, appSetNew *argov1alpha1.Applicati
}
}
// only compare the applicationset spec, annotations, labels and finalizers, deletionTimestamp, specifically avoiding
// only compare the applicationset spec, annotations, labels and finalizers, specifically avoiding
// the status field. status is owned by the applicationset controller,
// and we do not need to requeue when it does bookkeeping
// NB: the ApplicationDestination comes from the ApplicationSpec being embedded
// in the ApplicationSetTemplate from the generators
if !cmp.Equal(appSetOld.Spec, appSetNew.Spec, cmpopts.EquateEmpty(), cmpopts.EquateComparable(argov1alpha1.ApplicationDestination{})) ||
!cmp.Equal(appSetOld.GetLabels(), appSetNew.GetLabels(), cmpopts.EquateEmpty()) ||
!cmp.Equal(appSetOld.GetFinalizers(), appSetNew.GetFinalizers(), cmpopts.EquateEmpty()) ||
!cmp.Equal(appSetOld.DeletionTimestamp, appSetNew.DeletionTimestamp, cmpopts.EquateEmpty()) {
!cmp.Equal(appSetOld.GetFinalizers(), appSetNew.GetFinalizers(), cmpopts.EquateEmpty()) {
return true
}

View File

@@ -1954,15 +1954,14 @@ func TestValidateGeneratedApplications(t *testing.T) {
for _, cc := range []struct {
name string
apps []v1alpha1.Application
validationErrors map[string]error
validationErrors map[int]error
}{
{
name: "valid app should return true",
apps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app",
},
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1alpha1.ApplicationSpec{
Project: "default",
Source: &v1alpha1.ApplicationSource{
@@ -1977,15 +1976,14 @@ func TestValidateGeneratedApplications(t *testing.T) {
},
},
},
validationErrors: map[string]error{},
validationErrors: map[int]error{},
},
{
name: "can't have both name and server defined",
apps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app",
},
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1alpha1.ApplicationSpec{
Project: "default",
Source: &v1alpha1.ApplicationSource{
@@ -2001,15 +1999,14 @@ func TestValidateGeneratedApplications(t *testing.T) {
},
},
},
validationErrors: map[string]error{"app": errors.New("application destination spec is invalid: application destination can't have both name and server defined: my-cluster my-server")},
validationErrors: map[int]error{0: errors.New("application destination spec is invalid: application destination can't have both name and server defined: my-cluster my-server")},
},
{
name: "project mismatch should return error",
apps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app",
},
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1alpha1.ApplicationSpec{
Project: "DOES-NOT-EXIST",
Source: &v1alpha1.ApplicationSource{
@@ -2024,15 +2021,14 @@ func TestValidateGeneratedApplications(t *testing.T) {
},
},
},
validationErrors: map[string]error{"app": errors.New("application references project DOES-NOT-EXIST which does not exist")},
validationErrors: map[int]error{0: errors.New("application references project DOES-NOT-EXIST which does not exist")},
},
{
name: "valid app should return true",
apps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app",
},
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1alpha1.ApplicationSpec{
Project: "default",
Source: &v1alpha1.ApplicationSource{
@@ -2047,15 +2043,14 @@ func TestValidateGeneratedApplications(t *testing.T) {
},
},
},
validationErrors: map[string]error{},
validationErrors: map[int]error{},
},
{
name: "cluster should match",
apps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app",
},
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1alpha1.ApplicationSpec{
Project: "default",
Source: &v1alpha1.ApplicationSource{
@@ -2070,7 +2065,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
},
},
},
validationErrors: map[string]error{"app": errors.New("application destination spec is invalid: there are no clusters with this name: nonexistent-cluster")},
validationErrors: map[int]error{0: errors.New("application destination spec is invalid: there are no clusters with this name: nonexistent-cluster")},
},
} {
t.Run(cc.name, func(t *testing.T) {
@@ -2203,20 +2198,13 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
someTime := &metav1.Time{Time: time.Now().Add(-5 * time.Minute)}
existingParameterGeneratedCondition := getParametersGeneratedCondition(true, "")
existingParameterGeneratedCondition.LastTransitionTime = someTime
for _, c := range []struct {
name string
appset v1alpha1.ApplicationSet
condition v1alpha1.ApplicationSetCondition
parametersGenerated bool
testfunc func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition)
testCases := []struct {
appset v1alpha1.ApplicationSet
conditions []v1alpha1.ApplicationSetCondition
testfunc func(t *testing.T, appset v1alpha1.ApplicationSet)
}{
{
name: "has parameters generated condition when false",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
@@ -2233,28 +2221,20 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
Template: v1alpha1.ApplicationSetTemplate{},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "This is a message",
Reason: "test",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
conditions: []v1alpha1.ApplicationSetCondition{
{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "All applications have been generated successfully",
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
},
parametersGenerated: false,
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
t.Helper()
require.Len(t, conditions, 2)
// Conditions are ordered by type, so the order is deterministic
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[0].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[0].Status)
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[1].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[1].Status)
assert.Equal(t, "test", conditions[1].Reason)
assert.Len(t, appset.Status.Conditions, 3)
},
},
{
name: "parameters generated condition is used when specified",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
@@ -2271,268 +2251,37 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
Template: v1alpha1.ApplicationSetTemplate{},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionParametersGenerated,
Message: "This is a message",
Reason: "test",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
conditions: []v1alpha1.ApplicationSetCondition{
{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "All applications have been generated successfully",
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
{
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "ApplicationSet Rollout Rollout started",
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
},
parametersGenerated: true,
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
t.Helper()
require.Len(t, conditions, 1)
assert.Len(t, appset.Status.Conditions, 3)
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[0].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[0].Status)
assert.Equal(t, "test", conditions[0].Reason)
},
},
{
name: "has parameter conditions when true",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSetSpec{
Generators: []v1alpha1.ApplicationSetGenerator{
{List: &v1alpha1.ListGenerator{
Elements: []apiextensionsv1.JSON{{
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
}},
}},
},
Template: v1alpha1.ApplicationSetTemplate{},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "This is a message",
Reason: "test",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
},
parametersGenerated: true,
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
t.Helper()
require.Len(t, conditions, 2)
isProgressingCondition := false
// Conditions are ordered by type, so the order is deterministic
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[0].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusTrue, conditions[0].Status)
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[1].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[1].Status)
assert.Equal(t, "test", conditions[1].Reason)
},
},
{
name: "resource up to date sets error condition to false",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSetSpec{
Generators: []v1alpha1.ApplicationSetGenerator{
{List: &v1alpha1.ListGenerator{
Elements: []apiextensionsv1.JSON{{
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
}},
}},
},
Template: v1alpha1.ApplicationSetTemplate{},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "Completed",
Reason: "test",
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
t.Helper()
require.Len(t, conditions, 3)
assert.Equal(t, v1alpha1.ApplicationSetConditionErrorOccurred, conditions[0].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[0].Status)
assert.Equal(t, "test", conditions[0].Reason)
assert.Equal(t, "Completed", conditions[0].Message)
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[1].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[2].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusTrue, conditions[2].Status)
assert.Equal(t, "test", conditions[2].Reason)
assert.Equal(t, "Completed", conditions[2].Message)
},
},
{
name: "error condition sets resource up to date to false",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSetSpec{
Generators: []v1alpha1.ApplicationSetGenerator{
{List: &v1alpha1.ListGenerator{
Elements: []apiextensionsv1.JSON{{
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
}},
}},
},
Template: v1alpha1.ApplicationSetTemplate{},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
Message: "Error",
Reason: "test",
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
t.Helper()
require.Len(t, conditions, 3)
assert.Equal(t, v1alpha1.ApplicationSetConditionErrorOccurred, conditions[0].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusTrue, conditions[0].Status)
assert.Equal(t, "test", conditions[0].Reason)
assert.Equal(t, "Error", conditions[0].Message)
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[1].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[2].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[2].Status)
assert.Equal(t, v1alpha1.ApplicationSetReasonErrorOccurred, conditions[2].Reason)
assert.Equal(t, "Error", conditions[2].Message)
},
},
{
name: "updating an unchanged condition does not mutate existing conditions",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSetSpec{
Generators: []v1alpha1.ApplicationSetGenerator{
{List: &v1alpha1.ListGenerator{
Elements: []apiextensionsv1.JSON{{
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
}},
}},
},
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
},
Template: v1alpha1.ApplicationSetTemplate{},
},
Status: v1alpha1.ApplicationSetStatus{
Conditions: []v1alpha1.ApplicationSetCondition{
{
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
Message: "existing",
LastTransitionTime: someTime,
},
existingParameterGeneratedCondition,
{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "existing",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
LastTransitionTime: someTime,
},
{
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "existing",
LastTransitionTime: someTime,
},
},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "existing",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
},
parametersGenerated: true,
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
t.Helper()
require.Len(t, conditions, 4)
assert.Equal(t, v1alpha1.ApplicationSetConditionErrorOccurred, conditions[0].Type)
assert.Equal(t, someTime, conditions[0].LastTransitionTime)
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[1].Type)
assert.Equal(t, someTime, conditions[1].LastTransitionTime)
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[2].Type)
assert.Equal(t, someTime, conditions[2].LastTransitionTime)
assert.Equal(t, v1alpha1.ApplicationSetConditionRolloutProgressing, conditions[3].Type)
assert.Equal(t, someTime, conditions[3].LastTransitionTime)
},
},
{
name: "progressing conditions is removed when AppSet is not configured",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSetSpec{
Generators: []v1alpha1.ApplicationSetGenerator{
{List: &v1alpha1.ListGenerator{
Elements: []apiextensionsv1.JSON{{
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
}},
}},
},
// Strategy removed
// Strategy: &v1alpha1.ApplicationSetStrategy{
// Type: "RollingSync",
// RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
// },
Template: v1alpha1.ApplicationSetTemplate{},
},
Status: v1alpha1.ApplicationSetStatus{
Conditions: []v1alpha1.ApplicationSetCondition{
{
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
Message: "existing",
LastTransitionTime: someTime,
},
existingParameterGeneratedCondition,
{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "existing",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
LastTransitionTime: someTime,
},
{
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "existing",
LastTransitionTime: someTime,
},
},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "existing",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
},
parametersGenerated: true,
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
t.Helper()
require.Len(t, conditions, 3)
for _, c := range conditions {
assert.NotEqual(t, v1alpha1.ApplicationSetConditionRolloutProgressing, c.Type)
for _, condition := range appset.Status.Conditions {
if condition.Type == v1alpha1.ApplicationSetConditionRolloutProgressing {
isProgressingCondition = true
break
}
}
assert.False(t, isProgressingCondition, "no RolloutProgressing should be set for applicationsets that don't have rolling strategy")
},
},
{
name: "progressing conditions is ignored when AppSet is not configured",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
@@ -2546,126 +2295,84 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
}},
}},
},
// Strategy removed
// Strategy: &v1alpha1.ApplicationSetStrategy{
// Type: "RollingSync",
// RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
// },
Template: v1alpha1.ApplicationSetTemplate{},
},
Status: v1alpha1.ApplicationSetStatus{
Conditions: []v1alpha1.ApplicationSetCondition{
{
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
Message: "existing",
LastTransitionTime: someTime,
},
existingParameterGeneratedCondition,
{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "existing",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
LastTransitionTime: someTime,
},
},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "do not add me",
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
parametersGenerated: true,
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
t.Helper()
require.Len(t, conditions, 3)
for _, c := range conditions {
assert.NotEqual(t, v1alpha1.ApplicationSetConditionRolloutProgressing, c.Type)
}
},
},
{
name: "progressing conditions is updated correctly when configured",
appset: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSetSpec{
Generators: []v1alpha1.ApplicationSetGenerator{
{List: &v1alpha1.ListGenerator{
Elements: []apiextensionsv1.JSON{{
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
}},
}},
},
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
},
Template: v1alpha1.ApplicationSetTemplate{},
},
Status: v1alpha1.ApplicationSetStatus{
Conditions: []v1alpha1.ApplicationSetCondition{
{
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
Message: "existing",
LastTransitionTime: someTime,
},
existingParameterGeneratedCondition,
{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "existing",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
LastTransitionTime: someTime,
},
{
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "old value",
Status: v1alpha1.ApplicationSetConditionStatusTrue,
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
{
Key: "test",
Operator: "In",
Values: []string{"test"},
},
},
},
},
},
},
},
},
condition: v1alpha1.ApplicationSetCondition{
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "new value",
Status: v1alpha1.ApplicationSetConditionStatusFalse,
conditions: []v1alpha1.ApplicationSetCondition{
{
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "All applications have been generated successfully",
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
{
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
Message: "ApplicationSet Rollout Rollout started",
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: v1alpha1.ApplicationSetConditionStatusTrue,
},
},
parametersGenerated: true,
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
t.Helper()
require.Len(t, conditions, 4)
assert.Len(t, appset.Status.Conditions, 4)
assert.Equal(t, v1alpha1.ApplicationSetConditionRolloutProgressing, conditions[3].Type)
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[3].Status)
assert.Equal(t, "new value", conditions[3].Message)
isProgressingCondition := false
for _, condition := range appset.Status.Conditions {
if condition.Type == v1alpha1.ApplicationSetConditionRolloutProgressing {
isProgressingCondition = true
break
}
}
assert.True(t, isProgressingCondition, "RolloutProgressing should be set for rollout strategy appset")
},
},
} {
t.Run(c.name, func(t *testing.T) {
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&c.appset).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).WithStatusSubresource(&c.appset).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
}
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Renderer: &utils.Render{},
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{
"List": generators.NewListGenerator(),
},
ArgoDB: argodb,
KubeClientset: kubeclientset,
Metrics: metrics,
}
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
err = r.setApplicationSetStatusCondition(t.Context(), &c.appset, c.condition, c.parametersGenerated)
for _, testCase := range testCases {
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&testCase.appset).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).WithStatusSubresource(&testCase.appset).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Renderer: &utils.Render{},
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{
"List": generators.NewListGenerator(),
},
ArgoDB: argodb,
KubeClientset: kubeclientset,
Metrics: metrics,
}
for _, condition := range testCase.conditions {
err = r.setApplicationSetStatusCondition(t.Context(), &testCase.appset, condition, true)
require.NoError(t, err)
}
c.testfunc(t, c.appset.Status.Conditions)
})
testCase.testfunc(t, testCase.appset)
}
}
@@ -6410,11 +6117,10 @@ func TestUpdateResourceStatus(t *testing.T) {
require.NoError(t, err)
for _, cc := range []struct {
name string
appSet v1alpha1.ApplicationSet
apps []v1alpha1.Application
expectedResources []v1alpha1.ResourceStatus
maxResourcesStatusCount int
name string
appSet v1alpha1.ApplicationSet
apps []v1alpha1.Application
expectedResources []v1alpha1.ResourceStatus
}{
{
name: "handles an empty application list",
@@ -6578,73 +6284,6 @@ func TestUpdateResourceStatus(t *testing.T) {
apps: []v1alpha1.Application{},
expectedResources: nil,
},
{
name: "truncates resources status list to",
appSet: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "argocd",
},
Status: v1alpha1.ApplicationSetStatus{
Resources: []v1alpha1.ResourceStatus{
{
Name: "app1",
Status: v1alpha1.SyncStatusCodeOutOfSync,
Health: &v1alpha1.HealthStatus{
Status: health.HealthStatusProgressing,
Message: "this is progressing",
},
},
{
Name: "app2",
Status: v1alpha1.SyncStatusCodeOutOfSync,
Health: &v1alpha1.HealthStatus{
Status: health.HealthStatusProgressing,
Message: "this is progressing",
},
},
},
},
},
apps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
},
Status: v1alpha1.ApplicationStatus{
Sync: v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeSynced,
},
Health: v1alpha1.AppHealthStatus{
Status: health.HealthStatusHealthy,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "app2",
},
Status: v1alpha1.ApplicationStatus{
Sync: v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeSynced,
},
Health: v1alpha1.AppHealthStatus{
Status: health.HealthStatusHealthy,
},
},
},
},
expectedResources: []v1alpha1.ResourceStatus{
{
Name: "app1",
Status: v1alpha1.SyncStatusCodeSynced,
Health: &v1alpha1.HealthStatus{
Status: health.HealthStatusHealthy,
},
},
},
maxResourcesStatusCount: 1,
},
} {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
@@ -6655,14 +6294,13 @@ func TestUpdateResourceStatus(t *testing.T) {
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: argodb,
KubeClientset: kubeclientset,
Metrics: metrics,
MaxResourcesStatusCount: cc.maxResourcesStatusCount,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: argodb,
KubeClientset: kubeclientset,
Metrics: metrics,
}
err := r.updateResourcesStatus(t.Context(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)
@@ -7175,28 +6813,6 @@ func TestApplicationSetOwnsHandlerUpdate(t *testing.T) {
enableProgressiveSyncs: false,
want: false,
},
{
name: "deletionTimestamp present when progressive sync enabled",
appSetOld: buildAppSet(map[string]string{}),
appSetNew: &v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
},
enableProgressiveSyncs: true,
want: true,
},
{
name: "deletionTimestamp present when progressive sync disabled",
appSetOld: buildAppSet(map[string]string{}),
appSetNew: &v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
},
enableProgressiveSyncs: false,
want: true,
},
}
for _, tt := range tests {
@@ -7345,36 +6961,6 @@ func TestShouldRequeueForApplicationSet(t *testing.T) {
},
want: true,
},
{
name: "ApplicationSetWithDeletionTimestamp",
args: args{
appSetOld: &v1alpha1.ApplicationSet{
Status: v1alpha1.ApplicationSetStatus{
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Status: "Healthy",
},
},
},
},
appSetNew: &v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
Status: v1alpha1.ApplicationSetStatus{
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Status: "Waiting",
},
},
},
},
enableProgressiveSyncs: false,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -7548,7 +7134,7 @@ func TestSyncApplication(t *testing.T) {
Info: []*v1alpha1.Info{
{
Name: "Reason",
Value: "ApplicationSet RollingSync triggered a sync of this Application resource",
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
},
},
Sync: &v1alpha1.SyncOperation{
@@ -7590,7 +7176,7 @@ func TestSyncApplication(t *testing.T) {
Info: []*v1alpha1.Info{
{
Name: "Reason",
Value: "ApplicationSet RollingSync triggered a sync of this Application resource",
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
},
},
Sync: &v1alpha1.SyncOperation{
@@ -7612,82 +7198,3 @@ func TestSyncApplication(t *testing.T) {
})
}
}
func TestReconcileProgressiveSyncDisabled(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
for _, cc := range []struct {
name string
appSet v1alpha1.ApplicationSet
enableProgressiveSyncs bool
expectedAppStatuses []v1alpha1.ApplicationSetApplicationStatus
}{
{
name: "clears applicationStatus when Progressive Sync is disabled",
appSet: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-appset",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSetSpec{
Generators: []v1alpha1.ApplicationSetGenerator{},
Template: v1alpha1.ApplicationSetTemplate{},
},
Status: v1alpha1.ApplicationSetStatus{
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "test-appset-guestbook",
Message: "Application resource became Healthy, updating status from Progressing to Healthy.",
Status: "Healthy",
Step: "1",
},
},
},
},
enableProgressiveSyncs: false,
expectedAppStatuses: nil,
},
} {
t.Run(cc.name, func(t *testing.T) {
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Renderer: &utils.Render{},
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: argodb,
KubeClientset: kubeclientset,
Metrics: metrics,
EnableProgressiveSyncs: cc.enableProgressiveSyncs,
}
req := ctrl.Request{
NamespacedName: types.NamespacedName{
Namespace: cc.appSet.Namespace,
Name: cc.appSet.Name,
},
}
// Run reconciliation
_, err = r.Reconcile(t.Context(), req)
require.NoError(t, err)
// Fetch the updated ApplicationSet
var updatedAppSet v1alpha1.ApplicationSet
err = r.Get(t.Context(), req.NamespacedName, &updatedAppSet)
require.NoError(t, err)
// Verify the applicationStatus field
assert.Equal(t, cc.expectedAppStatuses, updatedAppSet.Status.ApplicationStatus, "applicationStatus should match expected value")
})
}
}

View File

@@ -79,10 +79,14 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
return nil, fmt.Errorf("error getting cluster secrets: %w", err)
}
paramHolder := &paramHolder{isFlatMode: appSetGenerator.Clusters.FlatList}
logCtx.Debugf("Using flat mode = %t for cluster generator", paramHolder.isFlatMode)
res := []map[string]any{}
secretsFound := []corev1.Secret{}
isFlatMode := appSetGenerator.Clusters.FlatList
logCtx.Debugf("Using flat mode = %t for cluster generator", isFlatMode)
clustersParams := make([]map[string]any, 0)
for _, cluster := range clustersFromArgoCD {
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
// handled by the next step.
@@ -101,80 +105,72 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
}
paramHolder.append(params)
if isFlatMode {
clustersParams = append(clustersParams, params)
} else {
res = append(res, params)
}
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
}
}
// For each matching cluster secret (non-local clusters only)
for _, cluster := range secretsFound {
params := g.getClusterParameters(cluster, appSet)
params := map[string]any{}
params["name"] = string(cluster.Data["name"])
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
params["server"] = string(cluster.Data["server"])
project, ok := cluster.Data["project"]
if ok {
params["project"] = string(project)
} else {
params["project"] = ""
}
if appSet.Spec.GoTemplate {
meta := map[string]any{}
if len(cluster.Annotations) > 0 {
meta["annotations"] = cluster.Annotations
}
if len(cluster.Labels) > 0 {
meta["labels"] = cluster.Labels
}
params["metadata"] = meta
} else {
for key, value := range cluster.Annotations {
params["metadata.annotations."+key] = value
}
for key, value := range cluster.Labels {
params["metadata.labels."+key] = value
}
}
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
if err != nil {
return nil, fmt.Errorf("error appending templated values for cluster: %w", err)
}
paramHolder.append(params)
if isFlatMode {
clustersParams = append(clustersParams, params)
} else {
res = append(res, params)
}
logCtx.WithField("cluster", cluster.Name).Debug("matched cluster secret")
}
return paramHolder.consolidate(), nil
}
type paramHolder struct {
isFlatMode bool
params []map[string]any
}
func (p *paramHolder) append(params map[string]any) {
p.params = append(p.params, params)
}
func (p *paramHolder) consolidate() []map[string]any {
if p.isFlatMode {
p.params = []map[string]any{
{"clusters": p.params},
}
if isFlatMode {
res = append(res, map[string]any{
"clusters": clustersParams,
})
}
return p.params
}
func (g *ClusterGenerator) getClusterParameters(cluster corev1.Secret, appSet *argoappsetv1alpha1.ApplicationSet) map[string]any {
params := map[string]any{}
params["name"] = string(cluster.Data["name"])
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
params["server"] = string(cluster.Data["server"])
project, ok := cluster.Data["project"]
if ok {
params["project"] = string(project)
} else {
params["project"] = ""
}
if appSet.Spec.GoTemplate {
meta := map[string]any{}
if len(cluster.Annotations) > 0 {
meta["annotations"] = cluster.Annotations
}
if len(cluster.Labels) > 0 {
meta["labels"] = cluster.Labels
}
params["metadata"] = meta
} else {
for key, value := range cluster.Annotations {
params["metadata.annotations."+key] = value
}
for key, value := range cluster.Labels {
params["metadata.labels."+key] = value
}
}
return params
return res, nil
}
func (g *ClusterGenerator) getSecretsByClusterName(log *log.Entry, appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {

View File

@@ -29,10 +29,10 @@ type GitGenerator struct {
}
// NewGitGenerator creates a new instance of Git Generator
func NewGitGenerator(repos services.Repos, controllerNamespace string) Generator {
func NewGitGenerator(repos services.Repos, namespace string) Generator {
g := &GitGenerator{
repos: repos,
namespace: controllerNamespace,
namespace: namespace,
}
return g
@@ -78,11 +78,11 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
if !strings.Contains(appSet.Spec.Template.Spec.Project, "{{") {
project := appSet.Spec.Template.Spec.Project
appProject := &argoprojiov1alpha1.AppProject{}
controllerNamespace := g.namespace
if controllerNamespace == "" {
controllerNamespace = appSet.Namespace
namespace := g.namespace
if namespace == "" {
namespace = appSet.Namespace
}
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: controllerNamespace}, appProject); err != nil {
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: namespace}, appProject); err != nil {
return nil, fmt.Errorf("error getting project %s: %w", project, err)
}
// we need to verify the signature on the Git revision if GPG is enabled
@@ -222,18 +222,19 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, values map[string]string, useGoTemplate bool, goTemplateOptions []string, pathParamPrefix string) ([]map[string]any, error) {
objectsFound := []map[string]any{}
// First, we attempt to parse as a single object.
// This will also succeed for empty files.
singleObj := map[string]any{}
err := yaml.Unmarshal(fileContent, &singleObj)
if err == nil {
objectsFound = append(objectsFound, singleObj)
} else {
// If unable to parse as an object, try to parse as an array
err = yaml.Unmarshal(fileContent, &objectsFound)
// First, we attempt to parse as an array
err := yaml.Unmarshal(fileContent, &objectsFound)
if err != nil {
// If unable to parse as an array, attempt to parse as a single object
singleObj := make(map[string]any)
err = yaml.Unmarshal(fileContent, &singleObj)
if err != nil {
return nil, fmt.Errorf("unable to parse file: %w", err)
}
objectsFound = append(objectsFound, singleObj)
} else if len(objectsFound) == 0 {
// If file is valid but empty, add a default empty item
objectsFound = append(objectsFound, map[string]any{})
}
res := []map[string]any{}

View File

@@ -825,7 +825,7 @@ func TestGitGenerateParamsFromFiles(t *testing.T) {
},
repoPathsError: nil,
expected: []map[string]any{},
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []map[string]interface {}"),
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
},
{
name: "test JSON array",
@@ -982,16 +982,6 @@ cluster:
},
expectedError: nil,
},
{
name: "test empty YAML array",
files: []v1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
repoFileContents: map[string][]byte{
"cluster-config/production/config.yaml": []byte(`[]`),
},
repoPathsError: nil,
expected: []map[string]any{},
expectedError: nil,
},
}
for _, testCase := range cases {
@@ -2070,7 +2060,7 @@ func TestGitGenerateParamsFromFilesGoTemplate(t *testing.T) {
},
repoPathsError: nil,
expected: []map[string]any{},
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []map[string]interface {}"),
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
},
{
name: "test JSON array",

View File

@@ -11,7 +11,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gosimple/slug"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/applicationset/services"
pullrequest "github.com/argoproj/argo-cd/v3/applicationset/services/pull_request"
@@ -19,6 +18,8 @@ import (
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
)
var _ Generator = (*PullRequestGenerator)(nil)
const (
DefaultPullRequestRequeueAfter = 30 * time.Minute
)
@@ -48,10 +49,6 @@ func (g *PullRequestGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alph
return DefaultPullRequestRequeueAfter
}
func (g *PullRequestGenerator) GetContinueOnRepoNotFoundError(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) bool {
return appSetGenerator.PullRequest.ContinueOnRepoNotFoundError
}
func (g *PullRequestGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
return &appSetGenerator.PullRequest.Template
}
@@ -72,15 +69,10 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
}
pulls, err := pullrequest.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
params := make([]map[string]any, 0, len(pulls))
if err != nil {
if pullrequest.IsRepositoryNotFoundError(err) && g.GetContinueOnRepoNotFoundError(appSetGenerator) {
log.WithError(err).WithField("generator", g).
Warn("Skipping params generation for this repository since it was not found.")
return params, nil
}
return nil, fmt.Errorf("error listing repos: %w", err)
}
params := make([]map[string]any, 0, len(pulls))
// In order to follow the DNS label standard as defined in RFC 1123,
// we need to limit the 'branch' to 50 to give room to append/suffix-ing it
@@ -119,15 +111,15 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
"author": pull.Author,
}
// PR lables will only be supported for Go Template appsets, since fasttemplate will be deprecated.
if applicationSetInfo != nil && applicationSetInfo.Spec.GoTemplate {
paramMap["labels"] = pull.Labels
}
err := appendTemplatedValues(appSetGenerator.PullRequest.Values, paramMap, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
if err != nil {
return nil, fmt.Errorf("failed to append templated values: %w", err)
}
// PR lables will only be supported for Go Template appsets, since fasttemplate will be deprecated.
if applicationSetInfo != nil && applicationSetInfo.Spec.GoTemplate {
paramMap["labels"] = pull.Labels
}
params = append(params, paramMap)
}
return params, nil

View File

@@ -16,12 +16,11 @@ import (
func TestPullRequestGithubGenerateParams(t *testing.T) {
ctx := t.Context()
cases := []struct {
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
values map[string]string
expected []map[string]any
expectedErr error
applicationSet argoprojiov1alpha1.ApplicationSet
continueOnRepoNotFoundError bool
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
values map[string]string
expected []map[string]any
expectedErr error
applicationSet argoprojiov1alpha1.ApplicationSet
}{
{
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
@@ -172,30 +171,6 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
expected: nil,
expectedErr: errors.New("error listing repos: fake error"),
},
{
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
return pullrequest.NewFakeService(
ctx,
nil,
pullrequest.NewRepositoryNotFoundError(errors.New("repository not found")),
)
},
expected: []map[string]any{},
expectedErr: nil,
continueOnRepoNotFoundError: true,
},
{
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
return pullrequest.NewFakeService(
ctx,
nil,
pullrequest.NewRepositoryNotFoundError(errors.New("repository not found")),
)
},
expected: nil,
expectedErr: errors.New("error listing repos: repository not found"),
continueOnRepoNotFoundError: false,
},
{
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
return pullrequest.NewFakeService(
@@ -277,51 +252,6 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
},
},
},
{
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
return pullrequest.NewFakeService(
ctx,
[]*pullrequest.PullRequest{
{
Number: 1,
Title: "title1",
Branch: "my_branch",
TargetBranch: "master",
HeadSHA: "abcd",
Author: "testName",
Labels: []string{"preview", "preview:team1"},
},
},
nil,
)
},
values: map[string]string{
"preview_env": "{{ regexFind \"(team1|team2)\" (.labels | join \",\") }}",
},
expected: []map[string]any{
{
"number": "1",
"title": "title1",
"branch": "my_branch",
"branch_slug": "my-branch",
"target_branch": "master",
"target_branch_slug": "master",
"head_sha": "abcd",
"head_short_sha": "abcd",
"head_short_sha_7": "abcd",
"author": "testName",
"labels": []string{"preview", "preview:team1"},
"values": map[string]string{"preview_env": "team1"},
},
},
expectedErr: nil,
applicationSet: argoprojiov1alpha1.ApplicationSet{
Spec: argoprojiov1alpha1.ApplicationSetSpec{
// Application set is using fasttemplate.
GoTemplate: true,
},
},
},
}
for _, c := range cases {
@@ -330,8 +260,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
}
generatorConfig := argoprojiov1alpha1.ApplicationSetGenerator{
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{
Values: c.values,
ContinueOnRepoNotFoundError: c.continueOnRepoNotFoundError,
Values: c.values,
},
}

View File

@@ -10,15 +10,15 @@ import (
"github.com/argoproj/argo-cd/v3/applicationset/services"
)
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, namespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
terminalGenerators := map[string]Generator{
"List": NewListGenerator(),
"Clusters": NewClusterGenerator(ctx, c, k8sClient, controllerNamespace),
"Git": NewGitGenerator(argoCDService, controllerNamespace),
"Clusters": NewClusterGenerator(ctx, c, k8sClient, namespace),
"Git": NewGitGenerator(argoCDService, namespace),
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace),
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
"PullRequest": NewPullRequestGenerator(c, scmConfig),
"Plugin": NewPluginGenerator(c, controllerNamespace),
"Plugin": NewPluginGenerator(c, namespace),
}
nestedGenerators := map[string]Generator{

View File

@@ -10,10 +10,7 @@ import (
"github.com/microsoft/azure-devops-go-api/azuredevops/v7/git"
)
const (
AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com"
AZURE_DEVOPS_PROJECT_NOT_FOUND_ERROR = "The following project does not exist"
)
const AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com"
type AzureDevOpsClientFactory interface {
// Returns an Azure Devops Client interface.
@@ -73,22 +70,13 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
SearchCriteria: &git.GitPullRequestSearchCriteria{},
}
pullRequests := []*PullRequest{}
azurePullRequests, err := client.GetPullRequestsByProject(ctx, args)
if err != nil {
// A standard Http 404 error is not returned for Azure DevOps,
// so checking the error message for a specific pattern.
// NOTE: Since the repos are filtered later, only existence of the project
// is relevant for AzureDevOps
if strings.Contains(err.Error(), AZURE_DEVOPS_PROJECT_NOT_FOUND_ERROR) {
// return a custom error indicating that the repository is not found,
// but also return the empty result since the decision to continue or not in this case is made by the caller
return pullRequests, NewRepositoryNotFoundError(err)
}
return nil, fmt.Errorf("failed to get pull requests by project: %w", err)
}
pullRequests := []*PullRequest{}
for _, pr := range *azurePullRequests {
if pr.Repository == nil ||
pr.Repository.Name == nil ||

View File

@@ -2,7 +2,6 @@ package pull_request
import (
"context"
"errors"
"testing"
"github.com/microsoft/azure-devops-go-api/azuredevops/v7/core"
@@ -236,36 +235,3 @@ func TestBuildURL(t *testing.T) {
})
}
}
func TestAzureDevOpsListReturnsRepositoryNotFoundError(t *testing.T) {
args := git.GetPullRequestsByProjectArgs{
Project: createStringPtr("nonexistent"),
SearchCriteria: &git.GitPullRequestSearchCriteria{},
}
pullRequestMock := []git.GitPullRequest{}
gitClientMock := azureMock.Client{}
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
// Mock the GetPullRequestsByProject to return an error containing "404"
gitClientMock.On("GetPullRequestsByProject", t.Context(), args).Return(&pullRequestMock,
errors.New("The following project does not exist:"))
provider := AzureDevOpsService{
clientFactory: clientFactoryMock,
project: "nonexistent",
repo: "nonexistent",
labels: nil,
}
prs, err := provider.List(t.Context())
// Should return empty pull requests list
assert.Empty(t, prs)
// Should return RepositoryNotFoundError
require.Error(t, err)
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"net/url"
"strings"
"github.com/ktrysmt/go-bitbucket"
)
@@ -118,17 +117,8 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
RepoSlug: b.repositorySlug,
}
pullRequests := []*PullRequest{}
response, err := b.client.Repositories.PullRequests.Gets(opts)
if err != nil {
// A standard Http 404 error is not returned for Bitbucket Cloud,
// so checking the error message for a specific pattern
if strings.Contains(err.Error(), "404 Not Found") {
// return a custom error indicating that the repository is not found,
// but also return the empty result since the decision to continue or not in this case is made by the caller
return pullRequests, NewRepositoryNotFoundError(err)
}
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", b.owner, b.repositorySlug, err)
}
@@ -152,6 +142,7 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
return nil, fmt.Errorf("error unmarshalling json to type '[]BitbucketCloudPullRequest': %w", err)
}
pullRequests := []*PullRequest{}
for _, pull := range pulls {
pullRequests = append(pullRequests, &PullRequest{
Number: pull.ID,

View File

@@ -492,29 +492,3 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
TargetBranch: "branch-200",
}, *pullRequests[0])
}
func TestBitbucketCloudListReturnsRepositoryNotFoundError(t *testing.T) {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
defer server.Close()
path := "/repositories/nonexistent/nonexistent/pullrequests/"
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
// Return 404 status to simulate repository not found
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
})
svc, err := NewBitbucketCloudServiceNoAuth(server.URL, "nonexistent", "nonexistent")
require.NoError(t, err)
prs, err := svc.List(t.Context())
// Should return empty pull requests list
assert.Empty(t, prs)
// Should return RepositoryNotFoundError
require.Error(t, err)
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
}

View File

@@ -8,7 +8,7 @@ import (
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/applicationset/services"
"github.com/argoproj/argo-cd/v3/applicationset/utils"
)
type BitbucketService struct {
@@ -49,10 +49,15 @@ func NewBitbucketServiceNoAuth(ctx context.Context, url, projectKey, repositoryS
}
func newBitbucketService(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey, repositorySlug string, scmRootCAPath string, insecure bool, caCerts []byte) (PullRequestService, error) {
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
TLSClientConfig: tlsConfig,
}}
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
return &BitbucketService{
client: bbClient,
client: bitbucketClient,
projectKey: projectKey,
repositorySlug: repositorySlug,
}, nil
@@ -67,11 +72,6 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
for {
response, err := b.client.DefaultApi.GetPullRequestsPage(b.projectKey, b.repositorySlug, paged)
if err != nil {
if response != nil && response.Response != nil && response.StatusCode == http.StatusNotFound {
// return a custom error indicating that the repository is not found,
// but also return the empty result since the decision to continue or not in this case is made by the caller
return pullRequests, NewRepositoryNotFoundError(err)
}
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", b.projectKey, b.repositorySlug, err)
}
pulls, err := bitbucketv1.GetPullRequestsResponse(response)

View File

@@ -510,29 +510,3 @@ func TestListPullRequestBranchMatch(t *testing.T) {
})
require.Error(t, err)
}
func TestBitbucketServerListReturnsRepositoryNotFoundError(t *testing.T) {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
defer server.Close()
path := "/rest/api/1.0/projects/nonexistent/repos/nonexistent/pull-requests?limit=100"
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
// Return 404 status to simulate repository not found
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
})
svc, err := NewBitbucketServiceNoAuth(t.Context(), server.URL, "nonexistent", "nonexistent", "", false, nil)
require.NoError(t, err)
prs, err := svc.List(t.Context())
// Should return empty pull requests list
assert.Empty(t, prs)
// Should return RepositoryNotFoundError
require.Error(t, err)
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
}

View File

@@ -1,23 +0,0 @@
package pull_request
import "errors"
// RepositoryNotFoundError represents an error when a repository is not found by a pull request provider
type RepositoryNotFoundError struct {
causingError error
}
func (e *RepositoryNotFoundError) Error() string {
return e.causingError.Error()
}
// NewRepositoryNotFoundError creates a new repository not found error
func NewRepositoryNotFoundError(err error) error {
return &RepositoryNotFoundError{causingError: err}
}
// IsRepositoryNotFoundError checks if the given error is a repository not found error
func IsRepositoryNotFoundError(err error) bool {
var repoErr *RepositoryNotFoundError
return errors.As(err, &repoErr)
}

View File

@@ -1,48 +0,0 @@
package pull_request
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRepositoryNotFoundError(t *testing.T) {
t.Run("NewRepositoryNotFoundError creates correct error type", func(t *testing.T) {
originalErr := errors.New("repository does not exist")
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
require.Error(t, repoNotFoundErr)
assert.Equal(t, "repository does not exist", repoNotFoundErr.Error())
})
t.Run("IsRepositoryNotFoundError identifies RepositoryNotFoundError", func(t *testing.T) {
originalErr := errors.New("repository does not exist")
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
assert.True(t, IsRepositoryNotFoundError(repoNotFoundErr))
})
t.Run("IsRepositoryNotFoundError returns false for regular errors", func(t *testing.T) {
regularErr := errors.New("some other error")
assert.False(t, IsRepositoryNotFoundError(regularErr))
})
t.Run("IsRepositoryNotFoundError returns false for nil error", func(t *testing.T) {
assert.False(t, IsRepositoryNotFoundError(nil))
})
t.Run("IsRepositoryNotFoundError works with wrapped errors", func(t *testing.T) {
originalErr := errors.New("repository does not exist")
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
wrappedErr := errors.New("wrapped: " + repoNotFoundErr.Error())
// Direct RepositoryNotFoundError should be identified
assert.True(t, IsRepositoryNotFoundError(repoNotFoundErr))
// Wrapped string error should not be identified (this is expected behavior)
assert.False(t, IsRepositoryNotFoundError(wrappedErr))
})
}

View File

@@ -52,17 +52,11 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
State: gitea.StateOpen,
}
g.client.SetContext(ctx)
list := []*PullRequest{}
prs, resp, err := g.client.ListRepoPullRequests(g.owner, g.repo, opts)
prs, _, err := g.client.ListRepoPullRequests(g.owner, g.repo, opts)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
// return a custom error indicating that the repository is not found,
// but also returning the empty result since the decision to continue or not in this case is made by the caller
return list, NewRepositoryNotFoundError(err)
}
return nil, err
}
list := []*PullRequest{}
for _, pr := range prs {
if !giteaContainLabels(g.labels, pr.Labels) {
continue

View File

@@ -339,35 +339,3 @@ func TestGetGiteaPRLabelNames(t *testing.T) {
})
}
}
func TestGiteaListReturnsRepositoryNotFoundError(t *testing.T) {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
defer server.Close()
// Handle version endpoint that Gitea client calls first
mux.HandleFunc("/api/v1/version", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"version":"1.17.0+dev-452-g1f0541780"}`))
})
path := "/api/v1/repos/nonexistent/nonexistent/pulls?limit=0&page=1&state=open"
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
// Return 404 status to simulate repository not found
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
})
svc, err := NewGiteaService("", server.URL, "nonexistent", "nonexistent", []string{}, false)
require.NoError(t, err)
prs, err := svc.List(t.Context())
// Should return empty pull requests list
assert.Empty(t, prs)
// Should return RepositoryNotFoundError
require.Error(t, err)
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
}

View File

@@ -37,11 +37,7 @@ func NewGithubService(token, url, owner, repo string, labels []string, optionalH
}
} else {
var err error
if token == "" {
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
} else {
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
}
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
if err != nil {
return nil, err
}
@@ -64,11 +60,6 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
for {
pulls, resp, err := g.client.PullRequests.List(ctx, g.owner, g.repo, opts)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
// return a custom error indicating that the repository is not found,
// but also returning the empty result since the decision to continue or not in this case is made by the caller
return pullRequests, NewRepositoryNotFoundError(err)
}
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", g.owner, g.repo, err)
}
for _, pull := range pulls {

View File

@@ -1,12 +1,9 @@
package pull_request
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/google/go-github/v69/github"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -89,29 +86,3 @@ func TestGetGitHubPRLabelNames(t *testing.T) {
})
}
}
func TestGitHubListReturnsRepositoryNotFoundError(t *testing.T) {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
defer server.Close()
path := "/repos/nonexistent/nonexistent/pulls"
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
// Return 404 status to simulate repository not found
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
})
svc, err := NewGithubService("", server.URL, "nonexistent", "nonexistent", []string{}, nil)
require.NoError(t, err)
prs, err := svc.List(t.Context())
// Should return empty pull requests list
assert.Empty(t, prs)
// Should return RepositoryNotFoundError
require.Error(t, err)
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
}

View File

@@ -76,11 +76,6 @@ func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) {
for {
mrs, resp, err := g.client.MergeRequests.ListProjectMergeRequests(g.project, opts, gitlab.WithContext(ctx))
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
// return a custom error indicating that the repository is not found,
// but also returning the empty result since the decision to continue or not in this case is made by the caller
return pullRequests, NewRepositoryNotFoundError(err)
}
return nil, fmt.Errorf("error listing merge requests for project '%s': %w", g.project, err)
}
for _, mr := range mrs {

View File

@@ -191,29 +191,3 @@ func TestListWithStateTLS(t *testing.T) {
})
}
}
func TestGitLabListReturnsRepositoryNotFoundError(t *testing.T) {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
defer server.Close()
path := "/api/v4/projects/nonexistent/merge_requests"
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
// Return 404 status to simulate repository not found
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
})
svc, err := NewGitLabService("", server.URL, "nonexistent", []string{}, "", "", false, nil)
require.NoError(t, err)
prs, err := svc.List(t.Context())
// Should return empty pull requests list
assert.Empty(t, prs)
// Should return RepositoryNotFoundError
require.Error(t, err)
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
}

View File

@@ -30,5 +30,4 @@ type PullRequestService interface {
type Filter struct {
BranchMatch *regexp.Regexp
TargetBranchMatch *regexp.Regexp
TitleMatch *regexp.Regexp
}

View File

@@ -25,12 +25,6 @@ func compileFilters(filters []argoprojiov1alpha1.PullRequestGeneratorFilter) ([]
return nil, fmt.Errorf("error compiling TargetBranchMatch regexp %q: %w", *filter.TargetBranchMatch, err)
}
}
if filter.TitleMatch != nil {
outFilter.TitleMatch, err = regexp.Compile(*filter.TitleMatch)
if err != nil {
return nil, fmt.Errorf("error compiling TitleMatch regexp %q: %w", *filter.TitleMatch, err)
}
}
outFilters = append(outFilters, outFilter)
}
return outFilters, nil
@@ -43,9 +37,6 @@ func matchFilter(pullRequest *PullRequest, filter *Filter) bool {
if filter.TargetBranchMatch != nil && !filter.TargetBranchMatch.MatchString(pullRequest.TargetBranch) {
return false
}
if filter.TitleMatch != nil && !filter.TitleMatch.MatchString(pullRequest.Title) {
return false
}
return true
}

View File

@@ -137,110 +137,6 @@ func TestFilterTargetBranchMatch(t *testing.T) {
assert.Equal(t, "two", pullRequests[0].Branch)
}
func TestFilterTitleMatch(t *testing.T) {
provider, _ := NewFakeService(
t.Context(),
[]*PullRequest{
{
Number: 1,
Title: "PR one - filter",
Branch: "one",
TargetBranch: "master",
HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name1",
},
{
Number: 2,
Title: "PR two - ignore",
Branch: "two",
TargetBranch: "branch1",
HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name2",
},
{
Number: 3,
Title: "[filter] PR three",
Branch: "three",
TargetBranch: "branch2",
HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name3",
},
{
Number: 4,
Title: "[ignore] PR four",
Branch: "four",
TargetBranch: "branch3",
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name4",
},
},
nil,
)
filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{
{
TitleMatch: strp("\\[filter]"),
},
}
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
require.NoError(t, err)
assert.Len(t, pullRequests, 1)
assert.Equal(t, "three", pullRequests[0].Branch)
}
func TestMultiFilterOrWithTitle(t *testing.T) {
provider, _ := NewFakeService(
t.Context(),
[]*PullRequest{
{
Number: 1,
Title: "PR one - filter",
Branch: "one",
TargetBranch: "master",
HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name1",
},
{
Number: 2,
Title: "PR two - ignore",
Branch: "two",
TargetBranch: "branch1",
HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name2",
},
{
Number: 3,
Title: "[filter] PR three",
Branch: "three",
TargetBranch: "branch2",
HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name3",
},
{
Number: 4,
Title: "[ignore] PR four",
Branch: "four",
TargetBranch: "branch3",
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name4",
},
},
nil,
)
filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{
{
TitleMatch: strp("\\[filter]"),
},
{
TitleMatch: strp("- filter"),
},
}
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
require.NoError(t, err)
assert.Len(t, pullRequests, 2)
assert.Equal(t, "one", pullRequests[0].Branch)
assert.Equal(t, "three", pullRequests[1].Branch)
}
func TestMultiFilterOr(t *testing.T) {
provider, _ := NewFakeService(
t.Context(),
@@ -296,7 +192,7 @@ func TestMultiFilterOr(t *testing.T) {
assert.Equal(t, "four", pullRequests[2].Branch)
}
func TestMultiFilterOrWithTargetBranchFilterOrWithTitleFilter(t *testing.T) {
func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) {
provider, _ := NewFakeService(
t.Context(),
[]*PullRequest{
@@ -332,14 +228,6 @@ func TestMultiFilterOrWithTargetBranchFilterOrWithTitleFilter(t *testing.T) {
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name4",
},
{
Number: 5,
Title: "PR title is different than branch name",
Branch: "five",
TargetBranch: "branch3",
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
Author: "name5",
},
},
nil,
)
@@ -352,21 +240,12 @@ func TestMultiFilterOrWithTargetBranchFilterOrWithTitleFilter(t *testing.T) {
BranchMatch: strp("r"),
TargetBranchMatch: strp("3"),
},
{
TitleMatch: strp("two"),
},
{
BranchMatch: strp("five"),
TitleMatch: strp("PR title is different than branch name"),
},
}
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
require.NoError(t, err)
assert.Len(t, pullRequests, 3)
assert.Len(t, pullRequests, 2)
assert.Equal(t, "two", pullRequests[0].Branch)
assert.Equal(t, "four", pullRequests[1].Branch)
assert.Equal(t, "five", pullRequests[2].Branch)
assert.Equal(t, "PR title is different than branch name", pullRequests[2].Title)
}
func TestNoFilters(t *testing.T) {

View File

@@ -10,7 +10,7 @@ import (
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/applicationset/services"
"github.com/argoproj/argo-cd/v3/applicationset/utils"
)
type BitbucketServerProvider struct {
@@ -49,10 +49,15 @@ func NewBitbucketServerProviderNoAuth(ctx context.Context, url, projectKey strin
}
func newBitbucketServerProvider(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey string, allBranches bool, scmRootCAPath string, insecure bool, caCerts []byte) (*BitbucketServerProvider, error) {
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
TLSClientConfig: tlsConfig,
}}
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
return &BitbucketServerProvider{
client: bbClient,
client: bitbucketClient,
projectKey: projectKey,
allBranches: allBranches,
}, nil

View File

@@ -36,11 +36,7 @@ func NewGithubProvider(organization string, token string, url string, allBranche
}
} else {
var err error
if token == "" {
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
} else {
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
}
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
if err != nil {
return nil, err
}

View File

@@ -1,22 +0,0 @@
package services
import (
"context"
"net/http"
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
"github.com/argoproj/argo-cd/v3/applicationset/utils"
)
// SetupBitbucketClient configures and creates a Bitbucket API client with TLS settings
func SetupBitbucketClient(ctx context.Context, config *bitbucketv1.Configuration, scmRootCAPath string, insecure bool, caCerts []byte) *bitbucketv1.APIClient {
config.BasePath = utils.NormalizeBitbucketBasePath(config.BasePath)
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSClientConfig = tlsConfig
config.HTTPClient = &http.Client{Transport: transport}
return bitbucketv1.NewAPIClient(ctx, config)
}

View File

@@ -1,37 +0,0 @@
package services
import (
"context"
"crypto/tls"
"net/http"
"testing"
"time"
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
"github.com/stretchr/testify/require"
)
func TestSetupBitbucketClient(t *testing.T) {
ctx := context.Background()
cfg := &bitbucketv1.Configuration{}
// Act
client := SetupBitbucketClient(ctx, cfg, "", false, nil)
// Assert
require.NotNil(t, client, "expected client to be created")
require.NotNil(t, cfg.HTTPClient, "expected HTTPClient to be set")
// The transport should be a clone of DefaultTransport
tr, ok := cfg.HTTPClient.Transport.(*http.Transport)
require.True(t, ok, "expected HTTPClient.Transport to be *http.Transport")
require.NotSame(t, http.DefaultTransport, tr, "transport should be a clone, not the global DefaultTransport")
// Ensure TLSClientConfig is set
require.IsType(t, &tls.Config{}, tr.TLSClientConfig)
// Defaults from http.DefaultTransport.Clone() should be preserved
require.Greater(t, tr.IdleConnTimeout, time.Duration(0), "IdleConnTimeout should be non-zero")
require.Positive(t, tr.MaxIdleConns, "MaxIdleConns should be non-zero")
require.Greater(t, tr.TLSHandshakeTimeout, time.Duration(0), "TLSHandshakeTimeout should be non-zero")
}

View File

@@ -14,7 +14,7 @@ import (
var ErrDisallowedSecretAccess = fmt.Errorf("secret must have label %q=%q", common.LabelKeySecretType, common.LabelValueSecretTypeSCMCreds)
// GetSecretRef gets the value of the key for the specified Secret resource.
// getSecretRef gets the value of the key for the specified Secret resource.
func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov1alpha1.SecretRef, namespace string, tokenRefStrictMode bool) (string, error) {
if ref == nil {
return "", nil

View File

@@ -399,19 +399,19 @@ func addInvalidGeneratorNames(names map[string]bool, applicationSetInfo *argoapp
var values map[string]any
err := json.Unmarshal([]byte(config), &values)
if err != nil {
log.Warnf("could not unmarshal kubectl.kubernetes.io/last-applied-configuration: %+v", config)
log.Warnf("couldn't unmarshal kubectl.kubernetes.io/last-applied-configuration: %+v", config)
return
}
spec, ok := values["spec"].(map[string]any)
if !ok {
log.Warn("could not get spec from kubectl.kubernetes.io/last-applied-configuration annotation")
log.Warn("coundn't get spec from kubectl.kubernetes.io/last-applied-configuration annotation")
return
}
generators, ok := spec["generators"].([]any)
if !ok {
log.Warn("could not get generators from kubectl.kubernetes.io/last-applied-configuration annotation")
log.Warn("coundn't get generators from kubectl.kubernetes.io/last-applied-configuration annotation")
return
}
@@ -422,7 +422,7 @@ func addInvalidGeneratorNames(names map[string]bool, applicationSetInfo *argoapp
generator, ok := generators[index].(map[string]any)
if !ok {
log.Warn("could not get generator from kubectl.kubernetes.io/last-applied-configuration annotation")
log.Warn("coundn't get generator from kubectl.kubernetes.io/last-applied-configuration annotation")
return
}

View File

@@ -26,14 +26,10 @@ import (
"github.com/go-playground/webhooks/v6/github"
"github.com/go-playground/webhooks/v6/gitlab"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/util/guard"
)
const payloadQueueSize = 50000
const panicMsgAppSet = "panic while processing applicationset-controller webhook event"
type WebhookHandler struct {
sync.WaitGroup // for testing
github *github.Webhook
@@ -78,15 +74,15 @@ func NewWebhookHandler(webhookParallelism int, argocdSettingsMgr *argosettings.S
if err != nil {
return nil, fmt.Errorf("failed to get argocd settings: %w", err)
}
githubHandler, err := github.New(github.Options.Secret(argocdSettings.GetWebhookGitHubSecret()))
githubHandler, err := github.New(github.Options.Secret(argocdSettings.WebhookGitHubSecret))
if err != nil {
return nil, fmt.Errorf("unable to init GitHub webhook: %w", err)
}
gitlabHandler, err := gitlab.New(gitlab.Options.Secret(argocdSettings.GetWebhookGitLabSecret()))
gitlabHandler, err := gitlab.New(gitlab.Options.Secret(argocdSettings.WebhookGitLabSecret))
if err != nil {
return nil, fmt.Errorf("unable to init GitLab webhook: %w", err)
}
azuredevopsHandler, err := azuredevops.New(azuredevops.Options.BasicAuth(argocdSettings.GetWebhookAzureDevOpsUsername(), argocdSettings.GetWebhookAzureDevOpsPassword()))
azuredevopsHandler, err := azuredevops.New(azuredevops.Options.BasicAuth(argocdSettings.WebhookAzureDevOpsUsername, argocdSettings.WebhookAzureDevOpsPassword))
if err != nil {
return nil, fmt.Errorf("unable to init Azure DevOps webhook: %w", err)
}
@@ -106,7 +102,6 @@ func NewWebhookHandler(webhookParallelism int, argocdSettingsMgr *argosettings.S
}
func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
compLog := log.WithField("component", "applicationset-webhook")
for i := 0; i < webhookParallelism; i++ {
h.Add(1)
go func() {
@@ -116,7 +111,7 @@ func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
if !ok {
return
}
guard.RecoverAndLog(func() { h.HandleEvent(payload) }, compLog, panicMsgAppSet)
h.HandleEvent(payload)
}
}()
}
@@ -344,7 +339,7 @@ func genRevisionHasChanged(gen *v1alpha1.GitGenerator, revision string, touchedH
func gitGeneratorUsesURL(gen *v1alpha1.GitGenerator, webURL string, repoRegexp *regexp.Regexp) bool {
if !repoRegexp.MatchString(gen.RepoURL) {
log.Warnf("%s does not match %s", gen.RepoURL, repoRegexp.String())
log.Debugf("%s does not match %s", gen.RepoURL, repoRegexp.String())
return false
}

View File

@@ -7,7 +7,6 @@
# p, <role/user/group>, <resource>, <action>, <object>, <allow/deny>
p, role:readonly, applications, get, */*, allow
p, role:readonly, applicationsets, get, */*, allow
p, role:readonly, certificates, get, *, allow
p, role:readonly, clusters, get, *, allow
p, role:readonly, repositories, get, *, allow
1 # Built-in policy which defines two roles: role:readonly and role:admin,
7 # p, <role/user/group>, <resource>, <action>, <object>, <allow/deny>
8 p, role:readonly, applications, get, */*, allow
9 p, role:readonly, applicationsets, get, */*, allow p, role:readonly, certificates, get, *, allow
p, role:readonly, certificates, get, *, allow
10 p, role:readonly, clusters, get, *, allow
11 p, role:readonly, repositories, get, *, allow
12 p, role:readonly, write-repositories, get, *, allow

216
assets/swagger.json generated
View File

@@ -374,56 +374,6 @@
}
}
},
"/api/v1/applications/{appName}/server-side-diff": {
"get": {
"tags": [
"ApplicationService"
],
"summary": "ServerSideDiff performs server-side diff calculation using dry-run apply",
"operationId": "ApplicationService_ServerSideDiff",
"parameters": [
{
"type": "string",
"name": "appName",
"in": "path",
"required": true
},
{
"type": "string",
"name": "appNamespace",
"in": "query"
},
{
"type": "string",
"name": "project",
"in": "query"
},
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "multi",
"name": "targetManifests",
"in": "query"
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/applicationApplicationServerSideDiffResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
}
}
},
"/api/v1/applications/{application.metadata.name}": {
"put": {
"tags": [
@@ -1049,11 +999,6 @@
"collectionFormat": "multi",
"name": "revisions",
"in": "query"
},
{
"type": "boolean",
"name": "noCache",
"in": "query"
}
],
"responses": {
@@ -1528,11 +1473,10 @@
}
},
"post": {
"description": "Deprecated: use RunResourceActionV2 instead. This version does not support resource action parameters but is\nmaintained for backward compatibility. It will be removed in a future release.",
"tags": [
"ApplicationService"
],
"summary": "RunResourceAction runs a resource action",
"summary": "RunResourceAction run resource action",
"operationId": "ApplicationService_RunResourceAction",
"parameters": [
{
@@ -1546,81 +1490,7 @@
"in": "body",
"required": true,
"schema": {
"type": "string"
}
},
{
"type": "string",
"name": "namespace",
"in": "query"
},
{
"type": "string",
"name": "resourceName",
"in": "query"
},
{
"type": "string",
"name": "version",
"in": "query"
},
{
"type": "string",
"name": "group",
"in": "query"
},
{
"type": "string",
"name": "kind",
"in": "query"
},
{
"type": "string",
"name": "appNamespace",
"in": "query"
},
{
"type": "string",
"name": "project",
"in": "query"
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/applicationApplicationResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
}
}
},
"/api/v1/applications/{name}/resource/actions/v2": {
"post": {
"tags": [
"ApplicationService"
],
"summary": "RunResourceActionV2 runs a resource action with parameters",
"operationId": "ApplicationService_RunResourceActionV2",
"parameters": [
{
"type": "string",
"name": "name",
"in": "path",
"required": true
},
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/applicationResourceActionRunRequestV2"
"$ref": "#/definitions/applicationResourceActionRunRequest"
}
}
],
@@ -5074,20 +4944,6 @@
}
}
},
"applicationApplicationServerSideDiffResponse": {
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1ResourceDiff"
}
},
"modified": {
"type": "boolean"
}
}
},
"applicationApplicationSyncRequest": {
"type": "object",
"title": "ApplicationSyncRequest is a request to apply the config state to live state",
@@ -5271,7 +5127,7 @@
}
}
},
"applicationResourceActionRunRequestV2": {
"applicationResourceActionRunRequest": {
"type": "object",
"properties": {
"action": {
@@ -7322,11 +7178,6 @@
"items": {
"$ref": "#/definitions/applicationv1alpha1ResourceStatus"
}
},
"resourcesCount": {
"description": "ResourcesCount is the total number of resources managed by this application set. The count may be higher than actual number of items in the Resources field when\nthe number of managed resources exceeds the limit imposed by the controller (to avoid making the status field too large).",
"type": "integer",
"format": "int64"
}
}
},
@@ -7334,10 +7185,6 @@
"description": "ApplicationSetStrategy configures how generated Applications are updated in sequence.",
"type": "object",
"properties": {
"deletionOrder": {
"type": "string",
"title": "DeletionOrder allows specifying the order for deleting generated apps when progressive sync is enabled.\naccepts values \"AllAtOnce\" and \"Reverse\""
},
"rollingSync": {
"$ref": "#/definitions/v1alpha1ApplicationSetRolloutStrategy"
},
@@ -8713,20 +8560,12 @@
"title": "KustomizeOptions are options for kustomize to use when building manifests",
"properties": {
"binaryPath": {
"description": "Deprecated: Use settings.Settings instead. See: settings.Settings.KustomizeVersions.\nIf this field is set, it will be used as the Kustomize binary path.\nOtherwise, Versions is used.",
"type": "string",
"title": "BinaryPath holds optional path to kustomize binary"
},
"buildOptions": {
"type": "string",
"title": "BuildOptions is a string of build parameters to use when calling `kustomize build`"
},
"versions": {
"description": "Versions is a list of Kustomize versions and their corresponding binary paths and build options.",
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1KustomizeVersion"
}
}
}
},
@@ -8790,24 +8629,6 @@
}
}
},
"v1alpha1KustomizeVersion": {
"type": "object",
"title": "KustomizeVersion holds information about additional Kustomize versions",
"properties": {
"buildOptions": {
"type": "string",
"title": "BuildOptions that are specific to a Kustomize version"
},
"name": {
"type": "string",
"title": "Name holds Kustomize version name"
},
"path": {
"type": "string",
"title": "Path holds the corresponding binary path"
}
}
},
"v1alpha1ListGenerator": {
"type": "object",
"title": "ListGenerator include items info",
@@ -9129,10 +8950,6 @@
"bitbucketServer": {
"$ref": "#/definitions/v1alpha1PullRequestGeneratorBitbucketServer"
},
"continueOnRepoNotFoundError": {
"description": "ContinueOnRepoNotFoundError is a flag to continue the ApplicationSet Pull Request generator parameters generation even if the repository is not found.",
"type": "boolean"
},
"filters": {
"description": "Filters for which pull requests should be considered.",
"type": "array",
@@ -9262,9 +9079,6 @@
},
"targetBranchMatch": {
"type": "string"
},
"titleMatch": {
"type": "string"
}
}
},
@@ -9668,9 +9482,21 @@
"description": "ResourceActionParam represents a parameter for a resource action.\nIt includes a name, value, type, and an optional default value for the parameter.",
"type": "object",
"properties": {
"default": {
"description": "Default is the default value of the parameter, if any.",
"type": "string"
},
"name": {
"description": "Name is the name of the parameter.",
"type": "string"
},
"type": {
"description": "Type is the type of the parameter (e.g., string, integer).",
"type": "string"
},
"value": {
"description": "Value is the value of the parameter.",
"type": "string"
}
}
},
@@ -9970,10 +9796,6 @@
"description": "Limit is the maximum number of attempts for retrying a failed sync. If set to 0, no retries will be performed.",
"type": "integer",
"format": "int64"
},
"refresh": {
"type": "boolean",
"title": "Refresh indicates if the latest revision should be used on retry instead of the initial one (default: false)"
}
}
},
@@ -10554,7 +10376,7 @@
"type": "boolean",
"title": "AllowEmpty allows apps have zero live resources (default: false)"
},
"enabled": {
"enable": {
"type": "boolean",
"title": "Enable allows apps to explicitly control automated sync"
},
@@ -10573,12 +10395,12 @@
"type": "object",
"properties": {
"path": {
"description": "Path is a directory path within the git repository where hydrated manifests should be committed to and synced\nfrom. The Path should never point to the root of the repo. If hydrateTo is set, this is just the path from which\nhydrated manifests will be synced.\n\n+kubebuilder:validation:Required\n+kubebuilder:validation:MinLength=1\n+kubebuilder:validation:Pattern=`^.{2,}|[^./]$`",
"description": "Path is a directory path within the git repository where hydrated manifests should be committed to and synced\nfrom. If hydrateTo is set, this is just the path from which hydrated manifests will be synced.",
"type": "string"
},
"targetBranch": {
"description": "TargetBranch is the branch from which hydrated manifests will be synced.\nIf HydrateTo is not set, this is also the branch to which hydrated manifests are committed.",
"type": "string"
"type": "string",
"title": "TargetBranch is the branch to which hydrated manifests should be committed"
}
}
},

View File

@@ -14,7 +14,6 @@ import (
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
logutils "github.com/argoproj/argo-cd/v3/util/log"
"github.com/argoproj/argo-cd/v3/util/profile"
"github.com/argoproj/argo-cd/v3/util/tls"
"github.com/argoproj/argo-cd/v3/applicationset/controllers"
@@ -80,7 +79,6 @@ func NewCommand() *cobra.Command {
enableScmProviders bool
webhookParallelism int
tokenRefStrictMode bool
maxResourcesStatusCount int
)
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
@@ -171,15 +169,6 @@ func NewCommand() *cobra.Command {
log.Error(err, "unable to start manager")
os.Exit(1)
}
pprofMux := http.NewServeMux()
profile.RegisterProfiler(pprofMux)
// This looks a little strange. Eg, not using ctrl.Options PprofBindAddress and then adding the pprof mux
// to the metrics server. However, it allows for the controller to dynamically expose the pprof endpoints
// and use the existing metrics server, the same pattern that the application controller and api-server follow.
if err = mgr.AddMetricsServerExtraHandler("/debug/pprof/", pprofMux); err != nil {
log.Error(err, "failed to register pprof handlers")
}
dynamicClient, err := dynamic.NewForConfig(mgr.GetConfig())
errors.CheckError(err)
k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig())
@@ -242,7 +231,6 @@ func NewCommand() *cobra.Command {
GlobalPreservedAnnotations: globalPreservedAnnotations,
GlobalPreservedLabels: globalPreservedLabels,
Metrics: &metrics,
MaxResourcesStatusCount: maxResourcesStatusCount,
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
os.Exit(1)
@@ -280,14 +268,13 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, math.MaxInt), "Max concurrent reconciliations limit for the controller")
command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, 100), "Max concurrent reconciliations limit for the controller")
command.Flags().StringVar(&scmRootCAPath, "scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")
command.Flags().StringSliceVar(&globalPreservedAnnotations, "preserved-annotations", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS", []string{}, ","), "Sets global preserved field values for annotations")
command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels")
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
command.Flags().StringSliceVar(&metricsAplicationsetLabels, "metrics-applicationset-labels", []string{}, "List of Application labels that will be added to the argocd_applicationset_labels metric")
command.Flags().BoolVar(&enableGitHubAPIMetrics, "enable-github-api-metrics", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_GITHUB_API_METRICS", false), "Enable GitHub API metrics for generators that use the GitHub API")
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 0, 0, math.MaxInt), "Max number of resources stored in appset status.")
return &command
}

View File

@@ -35,7 +35,7 @@ func NewCommand() *cobra.Command {
if nonce == "" {
errors.CheckError(fmt.Errorf("%s is not set", askpass.ASKPASS_NONCE_ENV))
}
conn, err := grpc_util.BlockingNewClient(ctx, "unix", askpass.SocketPath, nil, grpc.WithTransportCredentials(insecure.NewCredentials()))
conn, err := grpc_util.BlockingDial(ctx, "unix", askpass.SocketPath, nil, grpc.WithTransportCredentials(insecure.NewCredentials()))
errors.CheckError(err)
defer utilio.Close(conn)
client := askpass.NewAskPassServiceClient(conn)

View File

@@ -1,5 +1,3 @@
//go:build !darwin || (cgo && darwin)
package commands
import (

View File

@@ -1,25 +0,0 @@
//go:build darwin && !cgo
// Package commands
// This file is used when the GOOS is darwin and CGO is not enabled.
// It provides a no-op implementation of newAzureCommand to allow goreleaser to build
// a darwin binary on a linux machine.
package commands
import (
"log"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v3/util/workloadidentity"
)
func newAzureCommand() *cobra.Command {
command := &cobra.Command{
Use: "azure",
Run: func(c *cobra.Command, _ []string) {
log.Fatalf(workloadidentity.CGOError)
},
}
return command
}

View File

@@ -11,6 +11,16 @@ import (
"sync"
"syscall"
"github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
"github.com/argoproj/argo-cd/v3/util/env"
"github.com/argoproj/argo-cd/v3/util/errors"
service "github.com/argoproj/argo-cd/v3/util/notification/argocd"
"github.com/argoproj/argo-cd/v3/util/tls"
notificationscontroller "github.com/argoproj/argo-cd/v3/notification_controller/controller"
"github.com/argoproj/notifications-engine/pkg/controller"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
@@ -20,25 +30,27 @@ import (
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v3/common"
notificationscontroller "github.com/argoproj/argo-cd/v3/notification_controller/controller"
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
"github.com/argoproj/argo-cd/v3/util/cli"
"github.com/argoproj/argo-cd/v3/util/env"
"github.com/argoproj/argo-cd/v3/util/errors"
service "github.com/argoproj/argo-cd/v3/util/notification/argocd"
"github.com/argoproj/argo-cd/v3/util/tls"
)
const (
defaultMetricsPort = 9001
)
func addK8SFlagsToCmd(cmd *cobra.Command) clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := clientcmd.ConfigOverrides{}
kflags := clientcmd.RecommendedConfigOverrideFlags("")
cmd.PersistentFlags().StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster")
clientcmd.BindOverrideFlags(&overrides, cmd.PersistentFlags(), kflags)
return clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
}
func NewCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
processorsCount int
namespace string
appLabelSelector string
logLevel string
logFormat string
@@ -163,9 +175,10 @@ func NewCommand() *cobra.Command {
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
clientConfig = addK8SFlagsToCmd(&command)
command.Flags().IntVar(&processorsCount, "processors-count", 1, "Processors count.")
command.Flags().StringVar(&appLabelSelector, "app-label-selector", "", "App label selector.")
command.Flags().StringVar(&namespace, "namespace", "", "Namespace which controller handles. Current namespace if empty.")
command.Flags().StringVar(&logLevel, "loglevel", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
command.Flags().StringVar(&logFormat, "logformat", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGFORMAT", "json"), "Set the logging format. One of: json|text")
command.Flags().IntVar(&metricsPort, "metrics-port", defaultMetricsPort, "Metrics port")

View File

@@ -415,6 +415,7 @@ func reconcileApplications(
},
settingsMgr,
stateCache,
projInformer,
server,
cache,
time.Second,
@@ -463,7 +464,7 @@ func reconcileApplications(
sources = append(sources, app.Spec.GetSource())
revisions = append(revisions, app.Spec.GetSource().TargetRevision)
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false, false)
if err != nil {
return nil, fmt.Errorf("error comparing app states: %w", err)
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/redis/go-redis/v9"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
@@ -609,31 +608,7 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
clientConfig := clientcmd.NewDefaultClientConfig(*cfgAccess, &overrides)
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
// Seed a minimal in-memory Argo CD environment so settings retrieval succeeds
argoCDCM := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: ArgoCDNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
}
argoCDSecret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: ArgoCDNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"server.secretkey": []byte("test"),
},
}
kubeClientset := fake.NewClientset(argoCDCM, argoCDSecret)
kubeClientset := fake.NewClientset()
var awsAuthConf *v1alpha1.AWSAuthConfig
var execProviderConf *v1alpha1.ExecProviderConfig

View File

@@ -24,24 +24,24 @@ func TestRun_SignalHandling_GracefulShutdown(t *testing.T) {
},
}
var runErr error
var err error
doneCh := make(chan struct{})
go func() {
runErr = d.Run(t.Context(), &DashboardConfig{ClientOpts: &apiclient.ClientOptions{}})
err = d.Run(t.Context(), &DashboardConfig{ClientOpts: &apiclient.ClientOptions{}})
close(doneCh)
}()
// Allow some time for the dashboard to register the signal handler
time.Sleep(50 * time.Millisecond)
proc, procErr := os.FindProcess(os.Getpid())
require.NoErrorf(t, procErr, "failed to find process: %v", procErr)
sigErr := proc.Signal(syscall.SIGINT)
require.NoErrorf(t, sigErr, "failed to send SIGINT: %v", sigErr)
proc, err := os.FindProcess(os.Getpid())
require.NoErrorf(t, err, "failed to find process: %v", err)
err = proc.Signal(syscall.SIGINT)
require.NoErrorf(t, err, "failed to send SIGINT: %v", err)
select {
case <-doneCh:
require.NoError(t, runErr)
require.NoError(t, err)
case <-time.After(500 * time.Millisecond):
t.Fatal("timeout: dashboard.Run did not exit after SIGINT")
}

View File

@@ -30,12 +30,11 @@ func NewNotificationsCommand() *cobra.Command {
)
var argocdService service.Service
toolsCommand := cmd.NewToolsCommand(
"notifications",
"argocd admin notifications",
applications,
settings.GetFactorySettingsForCLI(func() service.Service { return argocdService }, "argocd-notifications-secret", "argocd-notifications-cm", false),
settings.GetFactorySettingsForCLI(argocdService, "argocd-notifications-secret", "argocd-notifications-cm", false),
func(clientConfig clientcmd.ClientConfig) {
k8sCfg, err := clientConfig.ClientConfig()
if err != nil {

View File

@@ -39,13 +39,9 @@ import (
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/utils"
cmdutil "github.com/argoproj/argo-cd/v3/cmd/util"
argocommon "github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/controller"
argocdclient "github.com/argoproj/argo-cd/v3/pkg/apiclient"
"github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
clusterpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster"
projectpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/project"
"github.com/argoproj/argo-cd/v3/pkg/apiclient/settings"
@@ -99,7 +95,6 @@ func NewApplicationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
command.AddCommand(NewApplicationTerminateOpCommand(clientOpts))
command.AddCommand(NewApplicationEditCommand(clientOpts))
command.AddCommand(NewApplicationPatchCommand(clientOpts))
command.AddCommand(NewApplicationGetResourceCommand(clientOpts))
command.AddCommand(NewApplicationPatchResourceCommand(clientOpts))
command.AddCommand(NewApplicationDeleteResourceCommand(clientOpts))
command.AddCommand(NewApplicationResourceActionsCommand(clientOpts))
@@ -353,7 +348,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
command := &cobra.Command{
Use: "get APPNAME",
Short: "Get application details",
Example: templates.Examples(`
Example: templates.Examples(`
# Get basic details about the application "my-app" in wide format
argocd app get my-app -o wide
@@ -383,7 +378,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
# Get application details and display them in a tree format
argocd app get my-app --output tree
# Get application details and display them in a detailed tree format
argocd app get my-app --output tree=detailed
`),
@@ -541,7 +536,7 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
command := &cobra.Command{
Use: "logs APPNAME",
Short: "Get logs of application pods",
Example: templates.Examples(`
Example: templates.Examples(`
# Get logs of pods associated with the application "my-app"
argocd app logs my-app
@@ -855,7 +850,7 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
command := &cobra.Command{
Use: "set APPNAME",
Short: "Set application parameters",
Example: templates.Examples(`
Example: templates.Examples(`
# Set application parameters for the application "my-app"
argocd app set my-app --parameter key1=value1 --parameter key2=value2
@@ -1286,7 +1281,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
revision string
localRepoRoot string
serverSideGenerate bool
serverSideDiff bool
localIncludes []string
appNamespace string
revisions []string
@@ -1349,22 +1343,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
argoSettings, err := settingsIf.Get(ctx, &settings.SettingsQuery{})
errors.CheckError(err)
diffOption := &DifferenceOption{}
hasServerSideDiffAnnotation := resourceutil.HasAnnotationOption(app, argocommon.AnnotationCompareOptions, "ServerSideDiff=true")
// Use annotation if flag not explicitly set
if !c.Flags().Changed("server-side-diff") {
serverSideDiff = hasServerSideDiffAnnotation
} else if serverSideDiff && !hasServerSideDiffAnnotation {
// Flag explicitly set to true, but app annotation is not set
fmt.Fprintf(os.Stderr, "Warning: Application does not have ServerSideDiff=true annotation.\n")
}
// Server side diff with local requires server side generate to be set as there will be a mismatch with client-generated manifests.
if serverSideDiff && local != "" && !serverSideGenerate {
log.Fatal("--server-side-diff with --local requires --server-side-generate.")
}
switch {
case app.Spec.HasMultipleSources() && len(revisions) > 0 && len(sourcePositions) > 0:
numOfSources := int64(len(app.Spec.GetSources()))
@@ -1379,7 +1357,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
AppNamespace: &appNs,
Revisions: revisions,
SourcePositions: sourcePositions,
NoCache: &hardRefresh,
}
res, err := appIf.GetManifests(ctx, &q)
errors.CheckError(err)
@@ -1391,7 +1368,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
Name: &appName,
Revision: &revision,
AppNamespace: &appNs,
NoCache: &hardRefresh,
}
res, err := appIf.GetManifests(ctx, &q)
errors.CheckError(err)
@@ -1422,8 +1398,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
}
}
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
foundDiffs := findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, app.GetName(), app.GetNamespace())
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
if foundDiffs && exitCode {
os.Exit(diffExitCode)
}
@@ -1432,12 +1407,11 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff. May also return non-zero exit code if there is an error.")
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20 but use another exit code if you want to differentiate from the generic exit code (20) returned by all CLI commands.")
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20.")
command.Flags().StringVar(&local, "local", "", "Compare live app to a local manifests")
command.Flags().StringVar(&revision, "revision", "", "Compare live app to a particular revision")
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
command.Flags().BoolVar(&serverSideGenerate, "server-side-generate", false, "Used with --local, this will send your manifests to the server for diffing")
command.Flags().BoolVar(&serverSideDiff, "server-side-diff", false, "Use server-side diff to calculate the diff. This will default to true if the ServerSideDiff annotation is set on the application.")
command.Flags().StringArrayVar(&localIncludes, "local-include", []string{"*.yaml", "*.yml", "*.json"}, "Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path.")
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only render the difference in namespace")
command.Flags().StringArrayVar(&revisions, "revisions", []string{}, "Show manifests at specific revisions for source position in source-positions")
@@ -1447,101 +1421,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
return command
}
// printResourceDiff prints the diff header and calls cli.PrintDiff for a resource
func printResourceDiff(group, kind, namespace, name string, live, target *unstructured.Unstructured) {
fmt.Printf("\n===== %s/%s %s/%s ======\n", group, kind, namespace, name)
_ = cli.PrintDiff(name, live, target)
}
// findAndPrintServerSideDiff performs a server-side diff by making requests to the api server and prints the response
func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application, items []objKeyLiveTarget, resources *application.ManagedResourcesResponse, appIf application.ApplicationServiceClient, appName, appNs string) bool {
// Process each item for server-side diff
foundDiffs := false
for _, item := range items {
if item.target != nil && hook.IsHook(item.target) || item.live != nil && hook.IsHook(item.live) {
continue
}
// For server-side diff, we need to create aligned arrays for this specific resource
var liveResource *argoappv1.ResourceDiff
var targetManifest string
if item.live != nil {
for _, res := range resources.Items {
if res.Group == item.key.Group && res.Kind == item.key.Kind &&
res.Namespace == item.key.Namespace && res.Name == item.key.Name {
liveResource = res
break
}
}
}
if liveResource == nil {
// Create empty live resource for creation case
liveResource = &argoappv1.ResourceDiff{
Group: item.key.Group,
Kind: item.key.Kind,
Namespace: item.key.Namespace,
Name: item.key.Name,
LiveState: "",
TargetState: "",
Modified: true,
}
}
if item.target != nil {
jsonBytes, err := json.Marshal(item.target)
if err != nil {
errors.CheckError(fmt.Errorf("error marshaling target object: %w", err))
}
targetManifest = string(jsonBytes)
}
// Call server-side diff for this individual resource
serverSideDiffQuery := &application.ApplicationServerSideDiffQuery{
AppName: &appName,
AppNamespace: &appNs,
Project: &app.Spec.Project,
LiveResources: []*argoappv1.ResourceDiff{liveResource},
TargetManifests: []string{targetManifest},
}
serverSideDiffRes, err := appIf.ServerSideDiff(ctx, serverSideDiffQuery)
if err != nil {
errors.CheckError(err)
}
// Extract diff for this resource
for _, resultItem := range serverSideDiffRes.Items {
if resultItem.Hook || (!resultItem.Modified && resultItem.TargetState != "" && resultItem.LiveState != "") {
continue
}
if resultItem.Modified || resultItem.TargetState == "" || resultItem.LiveState == "" {
var live, target *unstructured.Unstructured
if resultItem.TargetState != "" && resultItem.TargetState != "null" {
target = &unstructured.Unstructured{}
err = json.Unmarshal([]byte(resultItem.TargetState), target)
errors.CheckError(err)
}
if resultItem.LiveState != "" && resultItem.LiveState != "null" {
live = &unstructured.Unstructured{}
err = json.Unmarshal([]byte(resultItem.LiveState), live)
errors.CheckError(err)
}
// Print resulting diff for this resource
foundDiffs = true
printResourceDiff(resultItem.Group, resultItem.Kind, resultItem.Namespace, resultItem.Name, live, target)
}
}
}
return foundDiffs
}
// DifferenceOption struct to store diff options
type DifferenceOption struct {
local string
@@ -1553,15 +1432,47 @@ type DifferenceOption struct {
revisions []string
}
// findAndPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
func findAndPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, useServerSideDiff bool, appIf application.ApplicationServiceClient, appName, appNs string) bool {
// findandPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) bool {
var foundDiffs bool
items, err := prepareObjectsForDiff(ctx, app, proj, resources, argoSettings, diffOptions)
liveObjs, err := cmdutil.LiveObjects(resources.Items)
errors.CheckError(err)
items := make([]objKeyLiveTarget, 0)
switch {
case diffOptions.local != "":
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
case diffOptions.revision != "" || len(diffOptions.revisions) > 0:
var unstructureds []*unstructured.Unstructured
for _, mfst := range diffOptions.res.Manifests {
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
errors.CheckError(err)
unstructureds = append(unstructureds, obj)
}
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
case diffOptions.serversideRes != nil:
var unstructureds []*unstructured.Unstructured
for _, mfst := range diffOptions.serversideRes.Manifests {
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
errors.CheckError(err)
unstructureds = append(unstructureds, obj)
}
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
default:
for i := range resources.Items {
res := resources.Items[i]
live := &unstructured.Unstructured{}
err := json.Unmarshal([]byte(res.NormalizedLiveState), &live)
errors.CheckError(err)
if useServerSideDiff {
return findAndPrintServerSideDiff(ctx, app, items, resources, appIf, appName, appNs)
target := &unstructured.Unstructured{}
err = json.Unmarshal([]byte(res.TargetState), &target)
errors.CheckError(err)
items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target})
}
}
for _, item := range items {
@@ -1588,6 +1499,7 @@ func findAndPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
errors.CheckError(err)
if diffRes.Modified || item.target == nil || item.live == nil {
fmt.Printf("\n===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name)
var live *unstructured.Unstructured
var target *unstructured.Unstructured
if item.target != nil && item.live != nil {
@@ -1599,8 +1511,10 @@ func findAndPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
live = item.live
target = item.target
}
foundDiffs = true
printResourceDiff(item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name, live, target)
if !foundDiffs {
foundDiffs = true
}
_ = cli.PrintDiff(item.key.Name, live, target)
}
}
return foundDiffs
@@ -2087,7 +2001,6 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
applyOutOfSyncOnly bool
async bool
retryLimit int64
retryRefresh bool
retryBackoffDuration time.Duration
retryBackoffMaxDuration time.Duration
retryBackoffFactor int64
@@ -2359,10 +2272,9 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
default:
log.Fatalf("Unknown sync strategy: '%s'", strategy)
}
if retryLimit != 0 {
if retryLimit > 0 {
syncReq.RetryStrategy = &argoappv1.RetryStrategy{
Limit: retryLimit,
Refresh: retryRefresh,
Limit: retryLimit,
Backoff: &argoappv1.Backoff{
Duration: retryBackoffDuration.String(),
MaxDuration: retryBackoffMaxDuration.String(),
@@ -2384,11 +2296,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
fmt.Printf("====== Previewing differences between live and desired state of application %s ======\n", appQualifiedName)
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
// Check if application has ServerSideDiff annotation
serverSideDiff := resourceutil.HasAnnotationOption(app, argocommon.AnnotationCompareOptions, "ServerSideDiff=true")
foundDiffs = findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, appName, appNs)
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
if !foundDiffs {
fmt.Printf("====== No Differences found ======\n")
// if no differences found, then no need to sync
@@ -2431,7 +2339,6 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
command.Flags().StringArrayVar(&labels, "label", []string{}, "Sync only specific resources with a label. This option may be specified repeatedly.")
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
command.Flags().Int64Var(&retryLimit, "retry-limit", 0, "Max number of allowed sync retries")
command.Flags().BoolVar(&retryRefresh, "retry-refresh", false, "Indicates if the latest revision should be used on retry instead of the initial one")
command.Flags().DurationVar(&retryBackoffDuration, "retry-backoff-duration", argoappv1.DefaultSyncRetryDuration, "Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)")
command.Flags().DurationVar(&retryBackoffMaxDuration, "retry-backoff-max-duration", argoappv1.DefaultSyncRetryMaxDuration, "Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)")
command.Flags().Int64Var(&retryBackoffFactor, "retry-backoff-factor", argoappv1.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed retry")
@@ -3489,7 +3396,7 @@ func NewApplicationRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *
Short: "Remove a source from multiple sources application.",
Example: ` # Remove the source at position 1 from application's sources. Counting starts at 1.
argocd app remove-source myapplication --source-position 1
# Remove the source named "test" from application's sources.
argocd app remove-source myapplication --source-name test`,
Run: func(c *cobra.Command, args []string) {
@@ -3612,60 +3519,3 @@ func NewApplicationConfirmDeletionCommand(clientOpts *argocdclient.ClientOptions
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Namespace of the target application where the source will be appended")
return command
}
// prepareObjectsForDiff prepares objects for diffing using the switch statement
// to handle different diff options and building the objKeyLiveTarget items
func prepareObjectsForDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption) ([]objKeyLiveTarget, error) {
liveObjs, err := cmdutil.LiveObjects(resources.Items)
if err != nil {
return nil, err
}
items := make([]objKeyLiveTarget, 0)
switch {
case diffOptions.local != "":
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
case diffOptions.revision != "" || len(diffOptions.revisions) > 0:
var unstructureds []*unstructured.Unstructured
for _, mfst := range diffOptions.res.Manifests {
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
if err != nil {
return nil, err
}
unstructureds = append(unstructureds, obj)
}
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
case diffOptions.serversideRes != nil:
var unstructureds []*unstructured.Unstructured
for _, mfst := range diffOptions.serversideRes.Manifests {
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
if err != nil {
return nil, err
}
unstructureds = append(unstructureds, obj)
}
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
default:
for i := range resources.Items {
res := resources.Items[i]
live := &unstructured.Unstructured{}
err := json.Unmarshal([]byte(res.NormalizedLiveState), &live)
if err != nil {
return nil, err
}
target := &unstructured.Unstructured{}
err = json.Unmarshal([]byte(res.TargetState), &target)
if err != nil {
return nil, err
}
items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target})
}
}
return items, nil
}

View File

@@ -8,23 +8,23 @@ import (
"strconv"
"text/tabwriter"
"github.com/argoproj/argo-cd/v3/util/templates"
"github.com/argoproj/argo-cd/v3/cmd/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"k8s.io/utils/ptr"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v3/cmd/util"
argocdclient "github.com/argoproj/argo-cd/v3/pkg/apiclient"
applicationpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/util/argo"
"github.com/argoproj/argo-cd/v3/util/errors"
"github.com/argoproj/argo-cd/v3/util/grpc"
utilio "github.com/argoproj/argo-cd/v3/util/io"
"github.com/argoproj/argo-cd/v3/util/templates"
)
type DisplayedAction struct {
@@ -192,26 +192,7 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti
obj := filteredObjects[i]
gvk := obj.GroupVersionKind()
objResourceName := obj.GetName()
_, err := appIf.RunResourceActionV2(ctx, &applicationpkg.ResourceActionRunRequestV2{
Name: &appName,
AppNamespace: &appNs,
Namespace: ptr.To(obj.GetNamespace()),
ResourceName: ptr.To(objResourceName),
Group: ptr.To(gvk.Group),
Kind: ptr.To(gvk.Kind),
Version: ptr.To(gvk.GroupVersion().Version),
Action: ptr.To(actionName),
// TODO: add support for parameters
})
if err == nil {
continue
}
if grpc.UnwrapGRPCStatus(err).Code() != codes.Unimplemented {
errors.CheckError(err)
}
fmt.Println("RunResourceActionV2 is not supported by the server, falling back to RunResourceAction.")
//nolint:staticcheck // RunResourceAction is deprecated, but we still need to support it for backward compatibility.
_, err = appIf.RunResourceAction(ctx, &applicationpkg.ResourceActionRunRequest{
_, err := appIf.RunResourceAction(ctx, &applicationpkg.ResourceActionRunRequest{
Name: &appName,
AppNamespace: &appNs,
Namespace: ptr.To(obj.GetNamespace()),

View File

@@ -2,14 +2,11 @@ package commands
import (
"bytes"
"encoding/json"
"strings"
"testing"
"text/tabwriter"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
)
@@ -120,561 +117,3 @@ func TestPrintResourcesTree(t *testing.T) {
assert.Equal(t, expectation, output)
}
func TestFilterFieldsFromObject(t *testing.T) {
tests := []struct {
name string
obj unstructured.Unstructured
filteredFields []string
expectedFields []string
unexpectedFields []string
}{
{
name: "filter nested field",
obj: unstructured.Unstructured{
Object: map[string]any{
"apiVersion": "vX",
"kind": "kind",
"metadata": map[string]any{
"name": "test",
},
"spec": map[string]any{
"testfield": map[string]any{
"nestedtest": "test",
},
"testfield2": "test",
},
},
},
filteredFields: []string{"spec.testfield.nestedtest"},
expectedFields: []string{"spec.testfield.nestedtest"},
unexpectedFields: []string{"spec.testfield2"},
},
{
name: "filter multiple fields",
obj: unstructured.Unstructured{
Object: map[string]any{
"apiVersion": "vX",
"kind": "kind",
"metadata": map[string]any{
"name": "test",
},
"spec": map[string]any{
"testfield": map[string]any{
"nestedtest": "test",
},
"testfield2": "test",
"testfield3": "deleteme",
},
},
},
filteredFields: []string{"spec.testfield.nestedtest", "spec.testfield3"},
expectedFields: []string{"spec.testfield.nestedtest"},
unexpectedFields: []string{"spec.testfield2"},
},
{
name: "filter nested list object",
obj: unstructured.Unstructured{
Object: map[string]any{
"apiVersion": "vX",
"kind": "kind",
"metadata": map[string]any{
"name": "test",
},
"spec": map[string]any{
"testfield": map[string]any{
"nestedtest": "test",
},
"testfield2": "test",
},
},
},
filteredFields: []string{"spec.testfield.nestedtest"},
expectedFields: []string{"spec.testfield.nestedtest"},
unexpectedFields: []string{"spec.testfield2"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.obj.SetName("test-object")
filtered := filterFieldsFromObject(&tt.obj, tt.filteredFields)
for _, field := range tt.expectedFields {
fieldPath := strings.Split(field, ".")
_, exists, err := unstructured.NestedFieldCopy(filtered.Object, fieldPath...)
require.NoError(t, err)
assert.True(t, exists, "Expected field %s to exist", field)
}
for _, field := range tt.unexpectedFields {
fieldPath := strings.Split(field, ".")
_, exists, err := unstructured.NestedFieldCopy(filtered.Object, fieldPath...)
require.NoError(t, err)
assert.False(t, exists, "Expected field %s to not exist", field)
}
assert.Equal(t, tt.obj.GetName(), filtered.GetName())
})
}
}
func TestExtractNestedItem(t *testing.T) {
tests := []struct {
name string
obj map[string]any
fields []string
depth int
expected map[string]any
}{
{
name: "extract simple nested item",
obj: map[string]any{
"listofitems": []any{
map[string]any{
"extract": "123",
"dontextract": "abc",
},
map[string]any{
"extract": "456",
"dontextract": "def",
},
map[string]any{
"extract": "789",
"dontextract": "ghi",
},
},
},
fields: []string{"listofitems", "extract"},
depth: 0,
expected: map[string]any{
"listofitems": []any{
map[string]any{
"extract": "123",
},
map[string]any{
"extract": "456",
},
map[string]any{
"extract": "789",
},
},
},
},
{
name: "double nested list of objects",
obj: map[string]any{
"listofitems": []any{
map[string]any{
"doublenested": []any{
map[string]any{
"extract": "123",
},
},
"dontextract": "abc",
},
map[string]any{
"doublenested": []any{
map[string]any{
"extract": "456",
},
},
"dontextract": "def",
},
map[string]any{
"doublenested": []any{
map[string]any{
"extract": "789",
},
},
"dontextract": "ghi",
},
},
},
fields: []string{"listofitems", "doublenested", "extract"},
depth: 0,
expected: map[string]any{
"listofitems": []any{
map[string]any{
"doublenested": []any{
map[string]any{
"extract": "123",
},
},
},
map[string]any{
"doublenested": []any{
map[string]any{
"extract": "456",
},
},
},
map[string]any{
"doublenested": []any{
map[string]any{
"extract": "789",
},
},
},
},
},
},
{
name: "depth is greater then list of field size",
obj: map[string]any{"test1": "1234567890"},
fields: []string{"test1"},
depth: 4,
expected: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filteredObj := extractNestedItem(tt.obj, tt.fields, tt.depth)
assert.Equal(t, tt.expected, filteredObj, "Did not get the correct filtered obj")
})
}
}
func TestExtractItemsFromList(t *testing.T) {
tests := []struct {
name string
list []any
fields []string
expected []any
}{
{
name: "test simple field",
list: []any{
map[string]any{"extract": "value1", "dontextract": "valueA"},
map[string]any{"extract": "value2", "dontextract": "valueB"},
map[string]any{"extract": "value3", "dontextract": "valueC"},
},
fields: []string{"extract"},
expected: []any{
map[string]any{"extract": "value1"},
map[string]any{"extract": "value2"},
map[string]any{"extract": "value3"},
},
},
{
name: "test simple field with some depth",
list: []any{
map[string]any{
"test1": map[string]any{
"test2": map[string]any{
"extract": "123",
"dontextract": "abc",
},
},
},
map[string]any{
"test1": map[string]any{
"test2": map[string]any{
"extract": "456",
"dontextract": "def",
},
},
},
map[string]any{
"test1": map[string]any{
"test2": map[string]any{
"extract": "789",
"dontextract": "ghi",
},
},
},
},
fields: []string{"test1", "test2", "extract"},
expected: []any{
map[string]any{
"test1": map[string]any{
"test2": map[string]any{
"extract": "123",
},
},
},
map[string]any{
"test1": map[string]any{
"test2": map[string]any{
"extract": "456",
},
},
},
map[string]any{
"test1": map[string]any{
"test2": map[string]any{
"extract": "789",
},
},
},
},
},
{
name: "test a missing field",
list: []any{
map[string]any{"test1": "123"},
map[string]any{"test1": "456"},
map[string]any{"test1": "789"},
},
fields: []string{"test2"},
expected: nil,
},
{
name: "test getting an object",
list: []any{
map[string]any{
"extract": map[string]any{
"keyA": "valueA",
"keyB": "valueB",
"keyC": "valueC",
},
"dontextract": map[string]any{
"key1": "value1",
"key2": "value2",
"key3": "value3",
},
},
map[string]any{
"extract": map[string]any{
"keyD": "valueD",
"keyE": "valueE",
"keyF": "valueF",
},
"dontextract": map[string]any{
"key4": "value4",
"key5": "value5",
"key6": "value6",
},
},
},
fields: []string{"extract"},
expected: []any{
map[string]any{
"extract": map[string]any{
"keyA": "valueA",
"keyB": "valueB",
"keyC": "valueC",
},
},
map[string]any{
"extract": map[string]any{
"keyD": "valueD",
"keyE": "valueE",
"keyF": "valueF",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
extractedList := extractItemsFromList(tt.list, tt.fields)
assert.Equal(t, tt.expected, extractedList, "Lists were not equal")
})
}
}
func TestReconstructObject(t *testing.T) {
tests := []struct {
name string
extracted []any
fields []string
depth int
expected map[string]any
}{
{
name: "simple single field at depth 0",
extracted: []any{"value1", "value2"},
fields: []string{"items"},
depth: 0,
expected: map[string]any{
"items": []any{"value1", "value2"},
},
},
{
name: "object nested at depth 1",
extracted: []any{map[string]any{"key": "value"}},
fields: []string{"test1", "test2"},
depth: 1,
expected: map[string]any{
"test1": map[string]any{
"test2": []any{map[string]any{"key": "value"}},
},
},
},
{
name: "empty list of extracted items",
extracted: []any{},
fields: []string{"test1"},
depth: 0,
expected: map[string]any{
"test1": []any{},
},
},
{
name: "complex object nesteed at depth 2",
extracted: []any{map[string]any{
"obj1": map[string]any{
"key1": "value1",
"key2": "value2",
},
"obj2": map[string]any{
"keyA": "valueA",
"keyB": "valueB",
},
}},
fields: []string{"test1", "test2", "test3"},
depth: 2,
expected: map[string]any{
"test1": map[string]any{
"test2": map[string]any{
"test3": []any{
map[string]any{
"obj1": map[string]any{
"key1": "value1",
"key2": "value2",
},
"obj2": map[string]any{
"keyA": "valueA",
"keyB": "valueB",
},
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filteredObj := reconstructObject(tt.extracted, tt.fields, tt.depth)
assert.Equal(t, tt.expected, filteredObj, "objects were not equal")
})
}
}
func TestPrintManifests(t *testing.T) {
obj := unstructured.Unstructured{
Object: map[string]any{
"apiVersion": "vX",
"kind": "test",
"metadata": map[string]any{
"name": "unit-test",
},
"spec": map[string]any{
"testfield": "testvalue",
},
},
}
expectedYAML := `apiVersion: vX
kind: test
metadata:
name: unit-test
spec:
testfield: testvalue
`
output, _ := captureOutput(func() error {
printManifests(&[]unstructured.Unstructured{obj}, false, true, "yaml")
return nil
})
assert.Equal(t, expectedYAML+"\n", output, "Incorrect yaml output for printManifests")
output, _ = captureOutput(func() error {
printManifests(&[]unstructured.Unstructured{obj, obj}, false, true, "yaml")
return nil
})
assert.Equal(t, expectedYAML+"\n---\n"+expectedYAML+"\n", output, "Incorrect yaml output with multiple objs.")
expectedJSON := `{
"apiVersion": "vX",
"kind": "test",
"metadata": {
"name": "unit-test"
},
"spec": {
"testfield": "testvalue"
}
}`
output, _ = captureOutput(func() error {
printManifests(&[]unstructured.Unstructured{obj}, false, true, "json")
return nil
})
assert.Equal(t, expectedJSON+"\n", output, "Incorrect json output.")
output, _ = captureOutput(func() error {
printManifests(&[]unstructured.Unstructured{obj, obj}, false, true, "json")
return nil
})
assert.Equal(t, expectedJSON+"\n---\n"+expectedJSON+"\n", output, "Incorrect json output with multiple objs.")
output, _ = captureOutput(func() error {
printManifests(&[]unstructured.Unstructured{obj}, true, true, "wide")
return nil
})
assert.Contains(t, output, "FIELD RESOURCE NAME VALUE", "Missing or incorrect header line for table print with showing names.")
assert.Contains(t, output, "apiVersion unit-test vX", "Missing or incorrect row in table related to apiVersion with showing names.")
assert.Contains(t, output, "kind unit-test test", "Missing or incorrect line in the table related to kind with showing names.")
assert.Contains(t, output, "spec.testfield unit-test testvalue", "Missing or incorrect line in the table related to spec.testfield with showing names.")
assert.NotContains(t, output, "metadata.name unit-test testvalue", "Missing or incorrect line in the table related to metadata.name with showing names.")
output, _ = captureOutput(func() error {
printManifests(&[]unstructured.Unstructured{obj}, true, false, "wide")
return nil
})
assert.Contains(t, output, "FIELD VALUE", "Missing or incorrect header line for table print with not showing names.")
assert.Contains(t, output, "apiVersion vX", "Missing or incorrect row in table related to apiVersion with not showing names.")
assert.Contains(t, output, "kind test", "Missing or incorrect row in the table related to kind with not showing names.")
assert.Contains(t, output, "spec.testfield testvalue", "Missing or incorrect row in the table related to spec.testefield with not showing names.")
assert.NotContains(t, output, "metadata.name testvalue", "Missing or incorrect row in the tbale related to metadata.name with not showing names.")
}
func TestPrintManifests_FilterNestedListObject_Wide(t *testing.T) {
obj := unstructured.Unstructured{
Object: map[string]any{
"apiVersion": "vX",
"kind": "kind",
"metadata": map[string]any{
"name": "unit-test",
},
"status": map[string]any{
"podIPs": []map[string]any{
{
"IP": "127.0.0.1",
},
{
"IP": "127.0.0.2",
},
},
},
},
}
output, _ := captureOutput(func() error {
v, err := json.Marshal(&obj)
if err != nil {
return nil
}
var obj2 *unstructured.Unstructured
err = json.Unmarshal([]byte(v), &obj2)
if err != nil {
return nil
}
printManifests(&[]unstructured.Unstructured{*obj2}, false, true, "wide")
return nil
})
// Verify table header
assert.Contains(t, output, "FIELD RESOURCE NAME VALUE", "Missing a line in the table")
assert.Contains(t, output, "apiVersion unit-test vX", "Test for apiVersion field failed for wide output")
assert.Contains(t, output, "kind unit-test kind", "Test for kind field failed for wide output")
assert.Contains(t, output, "status.podIPs[0].IP unit-test 127.0.0.1", "Test for podIP array index 0 field failed for wide output")
assert.Contains(t, output, "status.podIPs[1].IP unit-test 127.0.0.2", "Test for podIP array index 1 field failed for wide output")
}

View File

@@ -1,22 +1,16 @@
package commands
import (
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"text/tabwriter"
"gopkg.in/yaml.v3"
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/utils"
"github.com/argoproj/argo-cd/v3/cmd/util"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
@@ -28,273 +22,15 @@ import (
utilio "github.com/argoproj/argo-cd/v3/util/io"
)
// NewApplicationGetResourceCommand returns a new instance of the `app get-resource` command
func NewApplicationGetResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
resourceName string
kind string
project string
filteredFields []string
showManagedFields bool
output string
)
command := &cobra.Command{
Use: "get-resource APPNAME",
Short: "Get details about the live Kubernetes manifests of a resource in an application. The filter-fields flag can be used to only display fields you want to see.",
Example: `
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in wide format
argocd app get-resource my-app --kind Pod --resource-name my-app-pod
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in yaml format
argocd app get-resource my-app --kind Pod --resource-name my-app-pod -o yaml
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in json format
argocd app get-resource my-app --kind Pod --resource-name my-app-pod -o json
# Get details about all Pods in the application
argocd app get-resource my-app --kind Pod
# Get a specific resource with managed fields, Pod my-app-pod, in 'my-app' by name in wide format
argocd app get-resource my-app --kind Pod --resource-name my-app-pod --show-managed-fields
# Get the the details of a specific field in a resource in 'my-app' in the wide format
argocd app get-resource my-app --kind Pod --filter-fields status.podIP
# Get the details of multiple specific fields in a specific resource in 'my-app' in the wide format
argocd app get-resource my-app --kind Pod --resource-name my-app-pod --filter-fields status.podIP,status.hostIP`,
}
command.Run = func(c *cobra.Command, args []string) {
ctx := c.Context()
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName, appNs := argo.ParseFromQualifiedName(args[0], "")
conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie()
defer utilio.Close(conn)
tree, err := appIf.ResourceTree(ctx, &applicationpkg.ResourcesQuery{
ApplicationName: &appName,
AppNamespace: &appNs,
})
errors.CheckError(err)
// Get manifests of resources
// If resource name is "" find all resources of that kind
var resources []unstructured.Unstructured
var fetchedStr string
for _, r := range tree.Nodes {
if (resourceName != "" && r.Name != resourceName) || r.Kind != kind {
continue
}
resource, err := appIf.GetResource(ctx, &applicationpkg.ApplicationResourceRequest{
Name: &appName,
AppNamespace: &appNs,
Group: &r.Group,
Kind: &r.Kind,
Namespace: &r.Namespace,
Project: &project,
ResourceName: &r.Name,
Version: &r.Version,
})
errors.CheckError(err)
manifest := resource.GetManifest()
var obj *unstructured.Unstructured
err = json.Unmarshal([]byte(manifest), &obj)
errors.CheckError(err)
if !showManagedFields {
unstructured.RemoveNestedField(obj.Object, "metadata", "managedFields")
}
if len(filteredFields) != 0 {
obj = filterFieldsFromObject(obj, filteredFields)
}
fetchedStr += obj.GetName() + ", "
resources = append(resources, *obj)
}
printManifests(&resources, len(filteredFields) > 0, resourceName == "", output)
if fetchedStr != "" {
fetchedStr = strings.TrimSuffix(fetchedStr, ", ")
}
log.Infof("Resources '%s' fetched", fetchedStr)
}
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource, if none is included will output details of all resources with specified kind")
command.Flags().StringVar(&kind, "kind", "", "Kind of resource [REQUIRED]")
err := command.MarkFlagRequired("kind")
errors.CheckError(err)
command.Flags().StringVar(&project, "project", "", "Project of resource")
command.Flags().StringSliceVar(&filteredFields, "filter-fields", nil, "A comma separated list of fields to display, if not provided will output the entire manifest")
command.Flags().BoolVar(&showManagedFields, "show-managed-fields", false, "Show managed fields in the output manifest")
command.Flags().StringVarP(&output, "output", "o", "wide", "Format of the output, wide, yaml, or json")
return command
}
// filterFieldsFromObject creates a new unstructured object containing only the specified fields from the source object.
func filterFieldsFromObject(obj *unstructured.Unstructured, filteredFields []string) *unstructured.Unstructured {
var filteredObj unstructured.Unstructured
filteredObj.Object = make(map[string]any)
for _, f := range filteredFields {
fields := strings.Split(f, ".")
value, exists, err := unstructured.NestedFieldCopy(obj.Object, fields...)
if exists {
errors.CheckError(err)
err = unstructured.SetNestedField(filteredObj.Object, value, fields...)
errors.CheckError(err)
} else {
// If doesn't exist assume its a nested inside a list of objects
value := extractNestedItem(obj.Object, fields, 0)
filteredObj.Object = value
}
}
filteredObj.SetName(obj.GetName())
return &filteredObj
}
// extractNestedItem recursively extracts an item that may be nested inside a list of objects.
func extractNestedItem(obj map[string]any, fields []string, depth int) map[string]any {
if depth >= len(fields) {
return nil
}
value, exists, _ := unstructured.NestedFieldCopy(obj, fields[:depth+1]...)
list, ok := value.([]any)
if !exists || !ok {
return extractNestedItem(obj, fields, depth+1)
}
extractedItems := extractItemsFromList(list, fields[depth+1:])
if len(extractedItems) == 0 {
for _, e := range list {
if o, ok := e.(map[string]any); ok {
result := extractNestedItem(o, fields[depth+1:], 0)
extractedItems = append(extractedItems, result)
}
}
}
filteredObj := reconstructObject(extractedItems, fields, depth)
return filteredObj
}
// extractItemsFromList processes a list of objects and extracts specific fields from each item.
func extractItemsFromList(list []any, fields []string) []any {
var extratedObjs []any
for _, e := range list {
extractedObj := make(map[string]any)
if o, ok := e.(map[string]any); ok {
value, exists, _ := unstructured.NestedFieldCopy(o, fields...)
if !exists {
continue
}
err := unstructured.SetNestedField(extractedObj, value, fields...)
errors.CheckError(err)
extratedObjs = append(extratedObjs, extractedObj)
}
}
return extratedObjs
}
// reconstructObject rebuilds the original object structure by placing extracted items back into their proper nested location.
func reconstructObject(extracted []any, fields []string, depth int) map[string]any {
obj := make(map[string]any)
err := unstructured.SetNestedField(obj, extracted, fields[:depth+1]...)
errors.CheckError(err)
return obj
}
// printManifests outputs resource manifests in the specified format (wide, JSON, or YAML).
func printManifests(objs *[]unstructured.Unstructured, filteredFields bool, showName bool, output string) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
if showName {
fmt.Fprintf(w, "FIELD\tRESOURCE NAME\tVALUE\n")
} else {
fmt.Fprintf(w, "FIELD\tVALUE\n")
}
for i, o := range *objs {
if output == "json" || output == "yaml" {
var formattedManifest []byte
var err error
if output == "json" {
formattedManifest, err = json.MarshalIndent(o.Object, "", " ")
} else {
formattedManifest, err = yaml.Marshal(o.Object)
}
errors.CheckError(err)
fmt.Println(string(formattedManifest))
if len(*objs) > 1 && i != len(*objs)-1 {
fmt.Println("---")
}
} else {
name := o.GetName()
if filteredFields {
unstructured.RemoveNestedField(o.Object, "metadata", "name")
}
printManifestAsTable(w, name, showName, o.Object, "")
}
}
if output != "json" && output != "yaml" {
err := w.Flush()
errors.CheckError(err)
}
}
// printManifestAsTable recursively prints a manifest object as a tabular view with nested fields flattened.
func printManifestAsTable(w *tabwriter.Writer, name string, showName bool, obj map[string]any, parentField string) {
for key, value := range obj {
field := parentField + key
switch v := value.(type) {
case map[string]any:
printManifestAsTable(w, name, showName, v, field+".")
case []any:
for i, e := range v {
index := "[" + strconv.Itoa(i) + "]"
if innerObj, ok := e.(map[string]any); ok {
printManifestAsTable(w, name, showName, innerObj, field+index+".")
} else {
if showName {
fmt.Fprintf(w, "%v\t%v\t%v\n", field+index, name, e)
} else {
fmt.Fprintf(w, "%v\t%v\n", field+index, e)
}
}
}
default:
if showName {
fmt.Fprintf(w, "%v\t%v\t%v\n", field, name, v)
} else {
fmt.Fprintf(w, "%v\t%v\n", field, v)
}
}
}
}
func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
patch string
patchType string
resourceName string
namespace string
kind string
group string
all bool
project string
)
var patch string
var patchType string
var resourceName string
var namespace string
var kind string
var group string
var all bool
var project string
command := &cobra.Command{
Use: "patch-resource APPNAME",
Short: "Patch resource in an application",
@@ -354,16 +90,14 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
}
func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
resourceName string
namespace string
kind string
group string
force bool
orphan bool
all bool
project string
)
var resourceName string
var namespace string
var kind string
var group string
var force bool
var orphan bool
var all bool
var project string
command := &cobra.Command{
Use: "delete-resource APPNAME",
Short: "Delete resource in an application",
@@ -519,16 +253,13 @@ func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.Appli
}
}
}
err := w.Flush()
errors.CheckError(err)
_ = w.Flush()
}
func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
orphaned bool
output string
project string
)
var orphaned bool
var output string
var project string
command := &cobra.Command{
Use: "resources APPNAME",
Short: "List resource of application",

View File

@@ -2228,15 +2228,10 @@ func (c *fakeAppServiceClient) ListResourceActions(_ context.Context, _ *applica
return nil, nil
}
// nolint:staticcheck // ResourceActionRunRequest is deprecated, but we still need to implement it to satisfy the server interface.
func (c *fakeAppServiceClient) RunResourceAction(_ context.Context, _ *applicationpkg.ResourceActionRunRequest, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
return nil, nil
}
func (c *fakeAppServiceClient) RunResourceActionV2(_ context.Context, _ *applicationpkg.ResourceActionRunRequestV2, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
return nil, nil
}
func (c *fakeAppServiceClient) DeleteResource(_ context.Context, _ *applicationpkg.ApplicationResourceDeleteRequest, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
return nil, nil
}
@@ -2253,10 +2248,6 @@ func (c *fakeAppServiceClient) ListResourceLinks(_ context.Context, _ *applicati
return nil, nil
}
func (c *fakeAppServiceClient) ServerSideDiff(_ context.Context, _ *applicationpkg.ApplicationServerSideDiffQuery, _ ...grpc.CallOption) (*applicationpkg.ApplicationServerSideDiffResponse, error) {
return nil, nil
}
type fakeAcdClient struct {
simulateTimeout uint
}

View File

@@ -6,8 +6,6 @@ import (
"github.com/spf13/cobra"
"golang.org/x/crypto/bcrypt"
"github.com/argoproj/argo-cd/v3/util/cli"
)
// NewBcryptCmd represents the bcrypt command
@@ -17,25 +15,22 @@ func NewBcryptCmd() *cobra.Command {
Use: "bcrypt",
Short: "Generate bcrypt hash for any password",
Example: `# Generate bcrypt hash for any password
argocd account bcrypt --password YOUR_PASSWORD
# Prompt for password input
argocd account bcrypt
# Read password from stdin
echo -e "password" | argocd account bcrypt`,
argocd account bcrypt --password YOUR_PASSWORD`,
Run: func(cmd *cobra.Command, _ []string) {
password = cli.PromptPassword(password)
bytePassword := []byte(password)
// Hashing the password
hash, err := bcrypt.GenerateFromPassword(bytePassword, bcrypt.DefaultCost)
if err != nil {
log.Fatalf("Failed to generate bcrypt hash: %v", err)
log.Fatalf("Failed to genarate bcrypt hash: %v", err)
}
fmt.Fprint(cmd.OutOrStdout(), string(hash))
},
}
bcryptCmd.Flags().StringVar(&password, "password", "", "Password for which bcrypt hash is generated")
err := bcryptCmd.MarkFlagRequired("password")
if err != nil {
return nil
}
return bcryptCmd
}

View File

@@ -2,11 +2,9 @@ package commands
import (
"bytes"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/bcrypt"
)
@@ -22,27 +20,3 @@ func TestGeneratePassword(t *testing.T) {
err = bcrypt.CompareHashAndPassword(output.Bytes(), []byte("abc"))
assert.NoError(t, err)
}
func TestGeneratePasswordWithStdin(t *testing.T) {
oldStdin := os.Stdin
defer func() {
os.Stdin = oldStdin
}()
input := bytes.NewBufferString("abc\n")
r, w, _ := os.Pipe()
_, _ = w.Write(input.Bytes())
w.Close()
os.Stdin = r
bcryptCmd := NewBcryptCmd()
bcryptCmd.SetArgs([]string{})
output := new(bytes.Buffer)
bcryptCmd.SetOut(output)
err := bcryptCmd.Execute()
require.NoError(t, err)
err = bcrypt.CompareHashAndPassword(output.Bytes(), []byte("abc"))
assert.NoError(t, err)
}

View File

@@ -2,7 +2,6 @@ package commands
import (
"fmt"
"os"
"strconv"
"github.com/spf13/cobra"
@@ -28,10 +27,6 @@ argocd configure --prompts-enabled=false`,
Run: func(_ *cobra.Command, _ []string) {
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
errors.CheckError(err)
if localCfg == nil {
fmt.Println("No local configuration found")
os.Exit(1)
}
localCfg.PromptsEnabled = promptsEnabled

View File

@@ -42,7 +42,6 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
username string
password string
sso bool
callback string
ssoPort int
skipTestTLS bool
ssoLaunchBrowser bool
@@ -139,7 +138,7 @@ argocd login cd.argoproj.io --core`,
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, callback, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
}
parser := jwt.NewParser(jwt.WithoutClaimsValidation())
claims := jwt.MapClaims{}
@@ -186,8 +185,8 @@ argocd login cd.argoproj.io --core`,
command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate")
command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application")
command.Flags().StringVar(&callback, "callback", "", "Scheme, Host and Port for the callback URL")
command.Flags().BoolVar(&skipTestTLS, "skip-test-tls", false, "Skip testing whether the server is configured with TLS (this can help when the command hangs for no apparent reason)")
command.Flags().
BoolVar(&skipTestTLS, "skip-test-tls", false, "Skip testing whether the server is configured with TLS (this can help when the command hangs for no apparent reason)")
command.Flags().BoolVar(&ssoLaunchBrowser, "sso-launch-browser", true, "Automatically launch the system default browser when performing SSO login")
return command
}
@@ -206,19 +205,13 @@ func userDisplayName(claims jwt.MapClaims) string {
// returns the JWT token and a refresh token (if supported)
func oauth2Login(
ctx context.Context,
callback string,
port int,
oidcSettings *settingspkg.OIDCConfig,
oauth2conf *oauth2.Config,
provider *oidc.Provider,
ssoLaunchBrowser bool,
) (string, string) {
redirectBase := callback
if redirectBase == "" {
redirectBase = "http://localhost:" + strconv.Itoa(port)
}
oauth2conf.RedirectURL = redirectBase + "/auth/callback"
oauth2conf.RedirectURL = fmt.Sprintf("http://localhost:%d/auth/callback", port)
oidcConf, err := oidcutil.ParseConfig(provider)
errors.CheckError(err)
log.Debug("OIDC Configuration:")

View File

@@ -19,12 +19,9 @@ func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comma
Use: "logout CONTEXT",
Short: "Log out from Argo CD",
Long: "Log out from Argo CD",
Example: `# Logout from the active Argo CD context
Example: `# To log out of argocd
$ argocd logout
# This can be helpful for security reasons or when you want to switch between different Argo CD contexts or accounts.
argocd logout CONTEXT
# Logout from a specific context named 'cd.argoproj.io'
argocd logout cd.argoproj.io
`,
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {

View File

@@ -605,17 +605,8 @@ ID ISSUED-AT EXPIRES-AT
fmt.Printf(printRoleFmtStr, "Description:", role.Description)
fmt.Printf("Policies:\n")
fmt.Printf("%s\n", proj.ProjectPoliciesString())
fmt.Printf("Groups:\n")
// if the group exists in the role
// range over each group and print it
if v1alpha1.RoleGroupExists(role) {
for _, group := range role.Groups {
fmt.Printf(" - %s\n", group)
}
} else {
fmt.Println("<none>")
}
fmt.Printf("JWT Tokens:\n")
// TODO(jessesuen): print groups
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ID\tISSUED-AT\tEXPIRES-AT\n")
for _, token := range proj.Status.JWTTokensByRole[roleName].Items {

View File

@@ -21,7 +21,6 @@ import (
func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
password string
callback string
ssoPort int
ssoLaunchBrowser bool
)
@@ -74,7 +73,7 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, callback, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
}
localCfg.UpsertUser(localconfig.User{
@@ -101,7 +100,6 @@ argocd login cd.argoproj.io --core
}
command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application")
command.Flags().StringVar(&callback, "callback", "", "Host and Port for the callback URL")
command.Flags().BoolVar(&ssoLaunchBrowser, "sso-launch-browser", true, "Automatically launch the default browser when performing SSO login")
return command
}

View File

@@ -270,19 +270,6 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
command := &cobra.Command{
Use: "rm REPO ...",
Short: "Remove configured repositories",
Example: `
# Remove a single repository
argocd repo rm https://github.com/yourusername/your-repo.git
# Remove multiple repositories
argocd repo rm https://github.com/yourusername/your-repo.git https://git.example.com/repo2.git
# Remove repositories for a specific project
argocd repo rm https://github.com/yourusername/your-repo.git --project myproject
# Remove repository using SSH URL
argocd repo rm git@github.com:yourusername/your-repo.git
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -343,44 +330,22 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
command := &cobra.Command{
Use: "list",
Short: "List configured repositories",
Example: `
# List all repositories
argocd repo list
# List repositories in wide format
argocd repo list -o wide
# List repositories in YAML format
argocd repo list -o yaml
# List repositories in JSON format
argocd repo list -o json
# List urls of repositories
argocd repo list -o url
# Force refresh of cached repository connection status
argocd repo list --refresh hard
`,
Run: func(c *cobra.Command, _ []string) {
ctx := c.Context()
conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoClientOrDie()
defer utilio.Close(conn)
forceRefresh := false
switch refresh {
case "":
case "hard":
forceRefresh = true
default:
err := fmt.Errorf("unknown refresh value: %s. Supported values: hard", refresh)
err := stderrors.New("--refresh must be one of: 'hard'")
errors.CheckError(err)
}
repos, err := repoIf.ListRepositories(ctx, &repositorypkg.RepoQuery{ForceRefresh: forceRefresh})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(repos.Items, output, false)
@@ -391,12 +356,12 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
case "wide", "":
printRepoTable(repos.Items)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s. Supported formats: yaml|json|url|wide", output))
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. Supported formats: yaml|json|url|wide")
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status. Supported values: hard")
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
return command
}
@@ -407,26 +372,9 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
refresh string
project string
)
// For better readability and easier formatting
repoGetExamples := `
# Get Git or Helm repository details in wide format (default, '-o wide')
argocd repo get https://git.example.com/repos/repo
# Get repository details in YAML format
argocd repo get https://git.example.com/repos/repo -o yaml
# Get repository details in JSON format
argocd repo get https://git.example.com/repos/repo -o json
# Get repository URL
argocd repo get https://git.example.com/repos/repo -o url
`
command := &cobra.Command{
Use: "get REPO",
Short: "Get a configured repository by URL",
Example: repoGetExamples,
Use: "get REPO",
Short: "Get a configured repository by URL",
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -445,12 +393,11 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
case "hard":
forceRefresh = true
default:
err := fmt.Errorf("unknown refresh value: %s. Supported values: hard", refresh)
err := stderrors.New("--refresh must be one of: 'hard'")
errors.CheckError(err)
}
repo, err := repoIf.Get(ctx, &repositorypkg.RepoQuery{Repo: repoURL, ForceRefresh: forceRefresh, AppProject: project})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResource(repo, output)
@@ -461,13 +408,13 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
case "wide", "":
printRepoTable(appsv1.Repositories{repo})
default:
errors.CheckError(fmt.Errorf("unknown output format: %s. Supported formats: yaml|json|url|wide", output))
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVar(&project, "project", "", "project of the repository")
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status. Supported values: hard")
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
return command
}

View File

@@ -20,6 +20,7 @@ import (
reposerver "github.com/argoproj/argo-cd/v3/cmd/argocd-repo-server/commands"
apiserver "github.com/argoproj/argo-cd/v3/cmd/argocd-server/commands"
cli "github.com/argoproj/argo-cd/v3/cmd/argocd/commands"
"github.com/argoproj/argo-cd/v3/cmd/util"
"github.com/argoproj/argo-cd/v3/util/log"
)
@@ -73,6 +74,7 @@ func main() {
command = cli.NewCommand()
isArgocdCLI = true
}
util.SetAutoMaxProcs(isArgocdCLI)
if isArgocdCLI {
// silence errors and usages since we'll be printing them manually.

View File

@@ -10,6 +10,8 @@ import (
"strings"
"time"
"go.uber.org/automaxprocs/maxprocs"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
@@ -90,7 +92,6 @@ type AppOptions struct {
retryBackoffDuration time.Duration
retryBackoffMaxDuration time.Duration
retryBackoffFactor int64
retryRefresh bool
ref string
SourceName string
drySourceRepo string
@@ -101,6 +102,19 @@ type AppOptions struct {
hydrateToBranch string
}
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
// It suppresses logs for CLI binaries and logs the setting for services.
func SetAutoMaxProcs(isCLI bool) {
if isCLI {
_, _ = maxprocs.Set() // Intentionally ignore errors for CLI binaries
} else {
_, err := maxprocs.Set(maxprocs.Logger(log.Infof))
if err != nil {
log.Errorf("Error setting GOMAXPROCS: %v", err)
}
}
}
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
command.Flags().StringVar(&opts.repoURL, "repo", "", "Repository URL, ignored if a file is set")
command.Flags().StringVar(&opts.appPath, "path", "", "Path in repository to the app directory, ignored if a file is set")
@@ -169,7 +183,6 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
command.Flags().DurationVar(&opts.retryBackoffDuration, "sync-retry-backoff-duration", argoappv1.DefaultSyncRetryDuration, "Sync retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)")
command.Flags().DurationVar(&opts.retryBackoffMaxDuration, "sync-retry-backoff-max-duration", argoappv1.DefaultSyncRetryMaxDuration, "Max sync retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)")
command.Flags().Int64Var(&opts.retryBackoffFactor, "sync-retry-backoff-factor", argoappv1.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed sync retry")
command.Flags().BoolVar(&opts.retryRefresh, "sync-retry-refresh", false, "Indicates if the latest revision should be used on retry instead of the initial one")
command.Flags().StringVar(&opts.ref, "ref", "", "Ref is reference to another source within sources field")
command.Flags().StringVar(&opts.SourceName, "source-name", "", "Name of the source from the list of sources of the app.")
}
@@ -263,7 +276,6 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
MaxDuration: appOpts.retryBackoffMaxDuration.String(),
Factor: ptr.To(appOpts.retryBackoffFactor),
},
Refresh: appOpts.retryRefresh,
}
case appOpts.retryLimit == 0:
if spec.SyncPolicy.IsZero() {
@@ -274,14 +286,6 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
default:
log.Fatalf("Invalid sync-retry-limit [%d]", appOpts.retryLimit)
}
case "sync-retry-refresh":
if spec.SyncPolicy == nil {
spec.SyncPolicy = &argoappv1.SyncPolicy{}
}
if spec.SyncPolicy.Retry == nil {
spec.SyncPolicy.Retry = &argoappv1.RetryStrategy{}
}
spec.SyncPolicy.Retry.Refresh = appOpts.retryRefresh
}
})
if flags.Changed("auto-prune") {

View File

@@ -1,6 +1,7 @@
package util
import (
"bytes"
"log"
"os"
"testing"
@@ -274,13 +275,6 @@ func Test_setAppSpecOptions(t *testing.T) {
require.NoError(t, f.SetFlag("sync-retry-limit", "0"))
assert.Nil(t, f.spec.SyncPolicy.Retry)
})
t.Run("RetryRefresh", func(t *testing.T) {
require.NoError(t, f.SetFlag("sync-retry-refresh", "true"))
assert.True(t, f.spec.SyncPolicy.Retry.Refresh)
require.NoError(t, f.SetFlag("sync-retry-refresh", "false"))
assert.False(t, f.spec.SyncPolicy.Retry.Refresh)
})
t.Run("Kustomize", func(t *testing.T) {
require.NoError(t, f.SetFlag("kustomize-replica", "my-deployment=2"))
require.NoError(t, f.SetFlag("kustomize-replica", "my-statefulset=4"))
@@ -579,3 +573,27 @@ func TestFilterResources(t *testing.T) {
assert.Nil(t, filteredResources)
})
}
func TestSetAutoMaxProcs(t *testing.T) {
t.Run("CLI mode ignores errors", func(t *testing.T) {
logBuffer := &bytes.Buffer{}
oldLogger := log.Default()
log.SetOutput(logBuffer)
defer log.SetOutput(oldLogger.Writer())
SetAutoMaxProcs(true)
assert.Empty(t, logBuffer.String(), "Expected no log output when isCLI is true")
})
t.Run("Non-CLI mode logs error on failure", func(t *testing.T) {
logBuffer := &bytes.Buffer{}
oldLogger := log.Default()
log.SetOutput(logBuffer)
defer log.SetOutput(oldLogger.Writer())
SetAutoMaxProcs(false)
assert.NotContains(t, logBuffer.String(), "Error setting GOMAXPROCS", "Unexpected log output detected")
})
}

View File

@@ -10,7 +10,6 @@ import (
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry"
log "github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -45,14 +44,15 @@ func NewConnection(address string) (*grpc.ClientConn, error) {
}
unaryInterceptors := []grpc.UnaryClientInterceptor{grpc_retry.UnaryClientInterceptor(retryOpts...)}
dialOpts := []grpc.DialOption{
grpc.WithStreamInterceptor(grpc_util.RetryOnlyForServerStreamInterceptor(retryOpts...)),
grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)),
grpc.WithChainUnaryInterceptor(unaryInterceptors...),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize), grpc.MaxCallSendMsgSize(MaxGRPCMessageSize)),
grpc.WithStatsHandler(otelgrpc.NewClientHandler()),
grpc.WithUnaryInterceptor(grpc_util.OTELUnaryClientInterceptor()),
grpc.WithStreamInterceptor(grpc_util.OTELStreamClientInterceptor()),
}
dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
conn, err := grpc_util.BlockingNewClient(context.Background(), "unix", address, nil, dialOpts...)
conn, err := grpc_util.BlockingDial(context.Background(), "unix", address, nil, dialOpts...)
if err != nil {
log.Errorf("Unable to connect to config management plugin service with address %s", address)
return nil, err

View File

@@ -49,11 +49,13 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
serverLog := log.NewEntry(log.StandardLogger())
streamInterceptors := []grpc.StreamServerInterceptor{
otelgrpc.StreamServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
logging.StreamServerInterceptor(grpc_util.InterceptorLogger(serverLog)),
serverMetrics.StreamServerInterceptor(),
recovery.StreamServerInterceptor(recovery.WithRecoveryHandler(grpc_util.LoggerRecoveryHandler(serverLog))),
}
unaryInterceptors := []grpc.UnaryServerInterceptor{
otelgrpc.UnaryServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
logging.UnaryServerInterceptor(grpc_util.InterceptorLogger(serverLog)),
serverMetrics.UnaryServerInterceptor(),
recovery.UnaryServerInterceptor(recovery.WithRecoveryHandler(grpc_util.LoggerRecoveryHandler(serverLog))),
@@ -69,7 +71,6 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
MinTime: common.GetGRPCKeepAliveEnforcementMinimum(),
},
),
grpc.StatsHandler(otelgrpc.NewServerHandler()),
}
return &ArgoCDCMPServer{

View File

@@ -40,7 +40,9 @@ func NewConnection(address string) (*grpc.ClientConn, error) {
var opts []grpc.DialOption
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
conn, err := grpc.NewClient(address, opts...)
// TODO: switch to grpc.NewClient.
//nolint:staticcheck
conn, err := grpc.Dial(address, opts...)
if err != nil {
log.Errorf("Unable to connect to commit service with address %s", address)
return nil, err

View File

@@ -1,14 +1,101 @@
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package mocks
import (
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
utilio "github.com/argoproj/argo-cd/v3/util/io"
"github.com/argoproj/argo-cd/v3/util/io"
mock "github.com/stretchr/testify/mock"
)
type Clientset struct {
CommitServiceClient apiclient.CommitServiceClient
// NewClientset creates a new instance of Clientset. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewClientset(t interface {
mock.TestingT
Cleanup(func())
}) *Clientset {
mock := &Clientset{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
func (c *Clientset) NewCommitServerClient() (utilio.Closer, apiclient.CommitServiceClient, error) {
return utilio.NopCloser, c.CommitServiceClient, nil
// Clientset is an autogenerated mock type for the Clientset type
type Clientset struct {
mock.Mock
}
type Clientset_Expecter struct {
mock *mock.Mock
}
func (_m *Clientset) EXPECT() *Clientset_Expecter {
return &Clientset_Expecter{mock: &_m.Mock}
}
// NewCommitServerClient provides a mock function for the type Clientset
func (_mock *Clientset) NewCommitServerClient() (io.Closer, apiclient.CommitServiceClient, error) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for NewCommitServerClient")
}
var r0 io.Closer
var r1 apiclient.CommitServiceClient
var r2 error
if returnFunc, ok := ret.Get(0).(func() (io.Closer, apiclient.CommitServiceClient, error)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() io.Closer); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(io.Closer)
}
}
if returnFunc, ok := ret.Get(1).(func() apiclient.CommitServiceClient); ok {
r1 = returnFunc()
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(apiclient.CommitServiceClient)
}
}
if returnFunc, ok := ret.Get(2).(func() error); ok {
r2 = returnFunc()
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// Clientset_NewCommitServerClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewCommitServerClient'
type Clientset_NewCommitServerClient_Call struct {
*mock.Call
}
// NewCommitServerClient is a helper method to define mock.On call
func (_e *Clientset_Expecter) NewCommitServerClient() *Clientset_NewCommitServerClient_Call {
return &Clientset_NewCommitServerClient_Call{Call: _e.mock.On("NewCommitServerClient")}
}
func (_c *Clientset_NewCommitServerClient_Call) Run(run func()) *Clientset_NewCommitServerClient_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *Clientset_NewCommitServerClient_Call) Return(closer io.Closer, commitServiceClient apiclient.CommitServiceClient, err error) *Clientset_NewCommitServerClient_Call {
_c.Call.Return(closer, commitServiceClient, err)
return _c
}
func (_c *Clientset_NewCommitServerClient_Call) RunAndReturn(run func() (io.Closer, apiclient.CommitServiceClient, error)) *Clientset_NewCommitServerClient_Call {
_c.Call.Return(run)
return _c
}

View File

@@ -7,8 +7,6 @@ import (
"os"
"time"
"github.com/argoproj/argo-cd/v3/controller/hydrator"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
@@ -33,43 +31,6 @@ func NewService(gitCredsStore git.CredsStore, metricsServer *metrics.Server) *Se
}
}
type hydratorMetadataFile struct {
RepoURL string `json:"repoURL,omitempty"`
DrySHA string `json:"drySha,omitempty"`
Commands []string `json:"commands,omitempty"`
Author string `json:"author,omitempty"`
Date string `json:"date,omitempty"`
// Subject is the subject line of the DRY commit message, i.e. `git show --format=%s`.
Subject string `json:"subject,omitempty"`
// Body is the body of the DRY commit message, excluding the subject line, i.e. `git show --format=%b`.
// Known Argocd- trailers with valid values are removed, but all other trailers are kept.
Body string `json:"body,omitempty"`
References []v1alpha1.RevisionReference `json:"references,omitempty"`
}
// TODO: make this configurable via ConfigMap.
var manifestHydrationReadmeTemplate = `# Manifest Hydration
To hydrate the manifests in this repository, run the following commands:
` + "```shell" + `
git clone {{ .RepoURL }}
# cd into the cloned directory
git checkout {{ .DrySHA }}
{{ range $command := .Commands -}}
{{ $command }}
{{ end -}}` + "```" + `
{{ if .References -}}
## References
{{ range $ref := .References -}}
{{ if $ref.Commit -}}
* [{{ $ref.Commit.SHA | mustRegexFind "[0-9a-f]+" | trunc 7 }}]({{ $ref.Commit.RepoURL }}): {{ $ref.Commit.Subject }} ({{ $ref.Commit.Author }})
{{ end -}}
{{ end -}}
{{ end -}}`
// CommitHydratedManifests handles a commit request. It clones the repository, checks out the sync branch, checks out
// the target branch, clears the repository contents, writes the manifests to the repository, commits the changes, and
// pushes the changes. It returns the hydrated revision SHA and an error if one occurred.
@@ -157,25 +118,10 @@ func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydr
return out, "", fmt.Errorf("failed to checkout target branch: %w", err)
}
logCtx.Debug("Clearing and preparing paths")
var pathsToClear []string
// range over the paths configured and skip those application
// paths that are referencing to root path
for _, p := range r.Paths {
if hydrator.IsRootPath(p.Path) {
// skip adding paths that are referencing root directory
logCtx.Debugf("Path %s is referencing root directory, ignoring the path", p.Path)
continue
}
pathsToClear = append(pathsToClear, p.Path)
}
if len(pathsToClear) > 0 {
logCtx.Debugf("Clearing paths: %v", pathsToClear)
out, err := gitClient.RemoveContents(pathsToClear)
if err != nil {
return out, "", fmt.Errorf("failed to clear paths %v: %w", pathsToClear, err)
}
logCtx.Debug("Clearing repo contents")
out, err = gitClient.RemoveContents()
if err != nil {
return out, "", fmt.Errorf("failed to clear repo: %w", err)
}
logCtx.Debug("Writing manifests")
@@ -264,3 +210,39 @@ func (s *Service) initGitClient(logCtx *log.Entry, r *apiclient.CommitHydratedMa
return gitClient, dirPath, cleanupOrLog, nil
}
type hydratorMetadataFile struct {
RepoURL string `json:"repoURL,omitempty"`
DrySHA string `json:"drySha,omitempty"`
Commands []string `json:"commands,omitempty"`
Author string `json:"author,omitempty"`
Date string `json:"date,omitempty"`
// Subject is the subject line of the DRY commit message, i.e. `git show --format=%s`.
Subject string `json:"subject,omitempty"`
// Body is the body of the DRY commit message, excluding the subject line, i.e. `git show --format=%b`.
Body string `json:"body,omitempty"`
References []v1alpha1.RevisionReference `json:"references,omitempty"`
}
// TODO: make this configurable via ConfigMap.
var manifestHydrationReadmeTemplate = `# Manifest Hydration
To hydrate the manifests in this repository, run the following commands:
` + "```shell" + `
git clone {{ .RepoURL }}
# cd into the cloned directory
git checkout {{ .DrySHA }}
{{ range $command := .Commands -}}
{{ $command }}
{{ end -}}` + "```" + `
{{ if .References -}}
## References
{{ range $ref := .References -}}
{{ if $ref.Commit -}}
* [{{ $ref.Commit.SHA | mustRegexFind "[0-9a-f]+" | trunc 7 }}]({{ $ref.Commit.RepoURL }}): {{ $ref.Commit.Subject }} ({{ $ref.Commit.Author }})
{{ end -}}
{{ end -}}
{{ end -}}`

View File

@@ -99,6 +99,7 @@ func Test_CommitHydratedManifests(t *testing.T) {
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
mockGitClient.On("RemoveContents").Return("", nil).Once()
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
mockGitClient.On("CommitSHA").Return("it-worked!", nil).Once()
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
@@ -108,178 +109,6 @@ func Test_CommitHydratedManifests(t *testing.T) {
require.NotNil(t, resp)
assert.Equal(t, "it-worked!", resp.HydratedSha)
})
t.Run("root path with dot and blank - no directory removal", func(t *testing.T) {
t.Parallel()
service, mockRepoClientFactory := newServiceWithMocks(t)
mockGitClient := gitmocks.NewClient(t)
mockGitClient.On("Init").Return(nil).Once()
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
mockGitClient.On("CommitSHA").Return("root-and-blank-sha", nil).Once()
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
requestWithRootAndBlank := &apiclient.CommitHydratedManifestsRequest{
Repo: &v1alpha1.Repository{
Repo: "https://github.com/argoproj/argocd-example-apps.git",
},
TargetBranch: "main",
SyncBranch: "env/test",
CommitMessage: "test commit message",
Paths: []*apiclient.PathDetails{
{
Path: ".",
Manifests: []*apiclient.HydratedManifestDetails{
{
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-dot"}}`,
},
},
},
{
Path: "",
Manifests: []*apiclient.HydratedManifestDetails{
{
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-blank"}}`,
},
},
},
},
}
resp, err := service.CommitHydratedManifests(t.Context(), requestWithRootAndBlank)
require.NoError(t, err)
require.NotNil(t, resp)
assert.Equal(t, "root-and-blank-sha", resp.HydratedSha)
})
t.Run("subdirectory path - triggers directory removal", func(t *testing.T) {
t.Parallel()
service, mockRepoClientFactory := newServiceWithMocks(t)
mockGitClient := gitmocks.NewClient(t)
mockGitClient.On("Init").Return(nil).Once()
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
mockGitClient.On("RemoveContents", []string{"apps/staging"}).Return("", nil).Once()
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
mockGitClient.On("CommitSHA").Return("subdir-path-sha", nil).Once()
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
requestWithSubdirPath := &apiclient.CommitHydratedManifestsRequest{
Repo: &v1alpha1.Repository{
Repo: "https://github.com/argoproj/argocd-example-apps.git",
},
TargetBranch: "main",
SyncBranch: "env/test",
CommitMessage: "test commit message",
Paths: []*apiclient.PathDetails{
{
Path: "apps/staging", // subdirectory path
Manifests: []*apiclient.HydratedManifestDetails{
{
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"test-app"}}`,
},
},
},
},
}
resp, err := service.CommitHydratedManifests(t.Context(), requestWithSubdirPath)
require.NoError(t, err)
require.NotNil(t, resp)
assert.Equal(t, "subdir-path-sha", resp.HydratedSha)
})
t.Run("mixed paths - root and subdirectory", func(t *testing.T) {
t.Parallel()
service, mockRepoClientFactory := newServiceWithMocks(t)
mockGitClient := gitmocks.NewClient(t)
mockGitClient.On("Init").Return(nil).Once()
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
mockGitClient.On("RemoveContents", []string{"apps/production", "apps/staging"}).Return("", nil).Once()
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
mockGitClient.On("CommitSHA").Return("mixed-paths-sha", nil).Once()
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
requestWithMixedPaths := &apiclient.CommitHydratedManifestsRequest{
Repo: &v1alpha1.Repository{
Repo: "https://github.com/argoproj/argocd-example-apps.git",
},
TargetBranch: "main",
SyncBranch: "env/test",
CommitMessage: "test commit message",
Paths: []*apiclient.PathDetails{
{
Path: ".", // root path - should NOT trigger removal
Manifests: []*apiclient.HydratedManifestDetails{
{
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"global-config"}}`,
},
},
},
{
Path: "apps/production", // subdirectory path - SHOULD trigger removal
Manifests: []*apiclient.HydratedManifestDetails{
{
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"prod-app"}}`,
},
},
},
{
Path: "apps/staging", // another subdirectory path - SHOULD trigger removal
Manifests: []*apiclient.HydratedManifestDetails{
{
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"staging-app"}}`,
},
},
},
},
}
resp, err := service.CommitHydratedManifests(t.Context(), requestWithMixedPaths)
require.NoError(t, err)
require.NotNil(t, resp)
assert.Equal(t, "mixed-paths-sha", resp.HydratedSha)
})
t.Run("empty paths array", func(t *testing.T) {
t.Parallel()
service, mockRepoClientFactory := newServiceWithMocks(t)
mockGitClient := gitmocks.NewClient(t)
mockGitClient.On("Init").Return(nil).Once()
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
mockGitClient.On("CommitSHA").Return("it-worked!", nil).Once()
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
requestWithEmptyPaths := &apiclient.CommitHydratedManifestsRequest{
Repo: &v1alpha1.Repository{
Repo: "https://github.com/argoproj/argocd-example-apps.git",
},
TargetBranch: "main",
SyncBranch: "env/test",
CommitMessage: "test commit message",
}
resp, err := service.CommitHydratedManifests(t.Context(), requestWithEmptyPaths)
require.NoError(t, err)
require.NotNil(t, resp)
assert.Equal(t, "it-worked!", resp.HydratedSha)
})
}
func newServiceWithMocks(t *testing.T) (*Service, *mocks.RepoClientFactory) {

View File

@@ -2,10 +2,14 @@ package commit
import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/Masterminds/sprig/v3"
log "github.com/sirupsen/logrus"
@@ -13,17 +17,12 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
"github.com/argoproj/argo-cd/v3/common"
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/util/hydrator"
"github.com/argoproj/argo-cd/v3/util/io"
)
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
const gitAttributesContents = `*/README.md linguist-generated=true
*/hydrator.metadata linguist-generated=true`
func init() {
// Avoid allowing the user to learn things about the environment.
delete(sprigFuncMap, "env")
@@ -34,35 +33,36 @@ func init() {
// WriteForPaths writes the manifests, hydrator.metadata, and README.md files for each path in the provided paths. It
// also writes a root-level hydrator.metadata file containing the repo URL and dry SHA.
func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *appv1.RevisionMetadata, paths []*apiclient.PathDetails) error { //nolint:revive //FIXME(var-naming)
hydratorMetadata, err := hydrator.GetCommitMetadata(repoUrl, drySha, dryCommitMetadata)
if err != nil {
return fmt.Errorf("failed to retrieve hydrator metadata: %w", err)
author := ""
message := ""
date := ""
var references []appv1.RevisionReference
if dryCommitMetadata != nil {
author = dryCommitMetadata.Author
message = dryCommitMetadata.Message
if dryCommitMetadata.Date != nil {
date = dryCommitMetadata.Date.Format(time.RFC3339)
}
references = dryCommitMetadata.References
}
subject, body, _ := strings.Cut(message, "\n\n")
// Write the top-level readme.
err = writeMetadata(root, "", hydratorMetadata)
err := writeMetadata(root, "", hydratorMetadataFile{DrySHA: drySha, RepoURL: repoUrl, Author: author, Subject: subject, Body: body, Date: date, References: references})
if err != nil {
return fmt.Errorf("failed to write top-level hydrator metadata: %w", err)
}
// Write .gitattributes
err = writeGitAttributes(root)
if err != nil {
return fmt.Errorf("failed to write git attributes: %w", err)
}
for _, p := range paths {
hydratePath := p.Path
if hydratePath == "." {
hydratePath = ""
}
// Only create directory if path is not empty (root directory case)
if hydratePath != "" {
err = root.MkdirAll(hydratePath, 0o755)
if err != nil {
return fmt.Errorf("failed to create path: %w", err)
}
err = mkdirAll(root, hydratePath)
if err != nil {
return fmt.Errorf("failed to create path: %w", err)
}
// Write the manifests
@@ -72,7 +72,7 @@ func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *app
}
// Write hydrator.metadata containing information about the hydration process.
hydratorMetadata := hydrator.HydratorCommitMetadata{
hydratorMetadata := hydratorMetadataFile{
Commands: p.Commands,
DrySHA: drySha,
RepoURL: repoUrl,
@@ -92,7 +92,7 @@ func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *app
}
// writeMetadata writes the metadata to the hydrator.metadata file.
func writeMetadata(root *os.Root, dirPath string, metadata hydrator.HydratorCommitMetadata) error {
func writeMetadata(root *os.Root, dirPath string, metadata hydratorMetadataFile) error {
hydratorMetadataPath := filepath.Join(dirPath, "hydrator.metadata")
f, err := root.Create(hydratorMetadataPath)
if err != nil {
@@ -111,7 +111,7 @@ func writeMetadata(root *os.Root, dirPath string, metadata hydrator.HydratorComm
}
// writeReadme writes the readme to the README.md file.
func writeReadme(root *os.Root, dirPath string, metadata hydrator.HydratorCommitMetadata) error {
func writeReadme(root *os.Root, dirPath string, metadata hydratorMetadataFile) error {
readmeTemplate, err := template.New("readme").Funcs(sprigFuncMap).Parse(manifestHydrationReadmeTemplate)
if err != nil {
return fmt.Errorf("failed to parse readme template: %w", err)
@@ -134,30 +134,6 @@ func writeReadme(root *os.Root, dirPath string, metadata hydrator.HydratorCommit
return nil
}
func writeGitAttributes(root *os.Root) error {
gitAttributesFile, err := root.Create(".gitattributes")
if err != nil {
return fmt.Errorf("failed to create git attributes file: %w", err)
}
defer func() {
err = gitAttributesFile.Close()
if err != nil {
log.WithFields(log.Fields{
common.SecurityField: common.SecurityMedium,
common.SecurityCWEField: common.SecurityCWEMissingReleaseOfFileDescriptor,
}).Errorf("error closing file %q: %v", gitAttributesFile.Name(), err)
}
}()
_, err = gitAttributesFile.WriteString(gitAttributesContents)
if err != nil {
return fmt.Errorf("failed to write git attributes: %w", err)
}
return nil
}
// writeManifests writes the manifests to the manifest.yaml file, truncating the file if it exists and appending the
// manifests in the order they are provided.
func writeManifests(root *os.Root, dirPath string, manifests []*apiclient.HydratedManifestDetails) error {
@@ -199,3 +175,25 @@ func writeManifests(root *os.Root, dirPath string, manifests []*apiclient.Hydrat
return nil
}
// mkdirAll creates the directory and all its parents if they do not exist. It returns an error if the directory
// cannot be.
func mkdirAll(root *os.Root, dirPath string) error {
parts := strings.Split(dirPath, string(os.PathSeparator))
builtPath := ""
for _, part := range parts {
if part == "" {
continue
}
builtPath = filepath.Join(builtPath, part)
err := root.Mkdir(builtPath, os.ModePerm)
if err != nil {
if errors.Is(err, fs.ErrExist) {
log.WithError(err).Warnf("path %s already exists, skipping", dirPath)
continue
}
return fmt.Errorf("failed to create path: %w", err)
}
}
return nil
}

View File

@@ -9,6 +9,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
@@ -18,7 +19,6 @@ import (
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
appsv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/util/hydrator"
)
// tempRoot creates a temporary directory and returns an os.Root object for it.
@@ -73,13 +73,9 @@ func TestWriteForPaths(t *testing.T) {
now := metav1.NewTime(time.Now())
metadata := &appsv1.RevisionMetadata{
Author: "test-author",
Date: &now,
Message: `test-message
Signed-off-by: Test User <test@example.com>
Argocd-reference-commit-sha: abc123
`,
Author: "test-author",
Date: &now,
Message: "test-message",
References: []appsv1.RevisionReference{
{
Commit: &appsv1.CommitMetadata{
@@ -101,15 +97,16 @@ Argocd-reference-commit-sha: abc123
topMetadataBytes, err := os.ReadFile(topMetadataPath)
require.NoError(t, err)
expectedSubject, expectedBody, _ := strings.Cut(metadata.Message, "\n\n")
var topMetadata hydratorMetadataFile
err = json.Unmarshal(topMetadataBytes, &topMetadata)
require.NoError(t, err)
assert.Equal(t, repoURL, topMetadata.RepoURL)
assert.Equal(t, drySha, topMetadata.DrySHA)
assert.Equal(t, metadata.Author, topMetadata.Author)
assert.Equal(t, "test-message", topMetadata.Subject)
// The body should exclude the Argocd- trailers.
assert.Equal(t, "Signed-off-by: Test User <test@example.com>\n", topMetadata.Body)
assert.Equal(t, expectedSubject, topMetadata.Subject)
assert.Equal(t, expectedBody, topMetadata.Body)
assert.Equal(t, metadata.Date.Format(time.RFC3339), topMetadata.Date)
assert.Equal(t, metadata.References, topMetadata.References)
@@ -145,7 +142,7 @@ Argocd-reference-commit-sha: abc123
func TestWriteMetadata(t *testing.T) {
root := tempRoot(t)
metadata := hydrator.HydratorCommitMetadata{
metadata := hydratorMetadataFile{
RepoURL: "https://github.com/example/repo",
DrySHA: "abc123",
}
@@ -157,7 +154,7 @@ func TestWriteMetadata(t *testing.T) {
metadataBytes, err := os.ReadFile(metadataPath)
require.NoError(t, err)
var readMetadata hydrator.HydratorCommitMetadata
var readMetadata hydratorMetadataFile
err = json.Unmarshal(metadataBytes, &readMetadata)
require.NoError(t, err)
assert.Equal(t, metadata, readMetadata)
@@ -172,7 +169,7 @@ func TestWriteReadme(t *testing.T) {
hash := sha256.Sum256(randomData)
sha := hex.EncodeToString(hash[:])
metadata := hydrator.HydratorCommitMetadata{
metadata := hydratorMetadataFile{
RepoURL: "https://github.com/example/repo",
DrySHA: "abc123",
References: []appsv1.RevisionReference{
@@ -224,16 +221,3 @@ func TestWriteManifests(t *testing.T) {
require.NoError(t, err)
assert.Contains(t, string(manifestBytes), "kind")
}
func TestWriteGitAttributes(t *testing.T) {
root := tempRoot(t)
err := writeGitAttributes(root)
require.NoError(t, err)
gitAttributesPath := filepath.Join(root.Name(), ".gitattributes")
gitAttributesBytes, err := os.ReadFile(gitAttributesPath)
require.NoError(t, err)
assert.Contains(t, string(gitAttributesBytes), "*/README.md linguist-generated=true")
assert.Contains(t, string(gitAttributesBytes), "*/hydrator.metadata linguist-generated=true")
}

View File

@@ -100,12 +100,6 @@ const (
PluginConfigFileName = "plugin.yaml"
)
// consts for podrequests metrics in cache/info
const (
PodRequestsCPU = "cpu"
PodRequestsMEM = "memory"
)
// Argo CD application related constants
const (
@@ -192,8 +186,6 @@ const (
LabelValueSecretTypeRepoCreds = "repo-creds"
// LabelValueSecretTypeRepositoryWrite indicates a secret type of repository credentials for writing
LabelValueSecretTypeRepositoryWrite = "repository-write"
// LabelValueSecretTypeRepoCredsWrite indicates a secret type of repository credentials for writing for templating
LabelValueSecretTypeRepoCredsWrite = "repo-write-creds"
// LabelValueSecretTypeSCMCreds indicates a secret type of SCM credentials
LabelValueSecretTypeSCMCreds = "scm-creds"

View File

@@ -1,82 +0,0 @@
package common
import (
"runtime"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetVersion(t *testing.T) {
tests := []struct {
name string
inputGitCommit string
inputGitTag string
inputTreeState string
inputVersion string
expected string
}{
{
name: "Official release with tag and clean state",
inputGitCommit: "abcdef123456",
inputGitTag: "v1.2.3",
inputTreeState: "clean",
inputVersion: "1.2.3",
expected: "v1.2.3",
},
{
name: "Dirty state with commit",
inputGitCommit: "deadbeefcafebabe",
inputGitTag: "",
inputTreeState: "dirty",
inputVersion: "2.0.1",
expected: "v2.0.1+deadbee.dirty",
},
{
name: "Clean state with commit, no tag",
inputGitCommit: "cafebabedeadbeef",
inputGitTag: "",
inputTreeState: "clean",
inputVersion: "2.1.0",
expected: "v2.1.0+cafebab",
},
{
name: "Missing commit and tag",
inputGitCommit: "",
inputGitTag: "",
inputTreeState: "clean",
inputVersion: "3.1.0",
expected: "v3.1.0+unknown",
},
{
name: "Short commit",
inputGitCommit: "abc",
inputGitTag: "",
inputTreeState: "clean",
inputVersion: "4.0.0",
expected: "v4.0.0+unknown",
},
}
for _, tt := range tests {
gitCommit = tt.inputGitCommit
gitTag = tt.inputGitTag
gitTreeState = tt.inputTreeState
version = tt.inputVersion
buildDate = "2025-06-26"
kubectlVersion = "v1.30.0"
extraBuildInfo = "test-build"
got := GetVersion()
assert.Equal(t, tt.expected, got.Version)
assert.Equal(t, buildDate, got.BuildDate)
assert.Equal(t, tt.inputGitCommit, got.GitCommit)
assert.Equal(t, tt.inputGitTag, got.GitTag)
assert.Equal(t, tt.inputTreeState, got.GitTreeState)
assert.Equal(t, runtime.Version(), got.GoVersion)
assert.Equal(t, runtime.Compiler, got.Compiler)
assert.Equal(t, runtime.GOOS+"/"+runtime.GOARCH, got.Platform)
assert.Equal(t, kubectlVersion, got.KubectlVersion)
assert.Equal(t, extraBuildInfo, got.ExtraBuildInfo)
}
}

View File

@@ -47,7 +47,6 @@ import (
"github.com/argoproj/argo-cd/v3/common"
statecache "github.com/argoproj/argo-cd/v3/controller/cache"
"github.com/argoproj/argo-cd/v3/controller/hydrator"
hydratortypes "github.com/argoproj/argo-cd/v3/controller/hydrator/types"
"github.com/argoproj/argo-cd/v3/controller/metrics"
"github.com/argoproj/argo-cd/v3/controller/sharding"
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
@@ -116,7 +115,7 @@ type ApplicationController struct {
appOperationQueue workqueue.TypedRateLimitingInterface[string]
projectRefreshQueue workqueue.TypedRateLimitingInterface[string]
appHydrateQueue workqueue.TypedRateLimitingInterface[string]
hydrationQueue workqueue.TypedRateLimitingInterface[hydratortypes.HydrationQueueKey]
hydrationQueue workqueue.TypedRateLimitingInterface[hydrator.HydrationQueueKey]
appInformer cache.SharedIndexInformer
appLister applisters.ApplicationLister
projInformer cache.SharedIndexInformer
@@ -126,7 +125,7 @@ type ApplicationController struct {
statusHardRefreshTimeout time.Duration
statusRefreshJitter time.Duration
selfHealTimeout time.Duration
selfHealBackoff *wait.Backoff
selfHealBackOff *wait.Backoff
selfHealBackoffCooldown time.Duration
syncTimeout time.Duration
db db.ArgoDB
@@ -199,7 +198,7 @@ func NewApplicationController(
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
appComparisonTypeRefreshQueue: workqueue.NewTypedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig)),
appHydrateQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_hydration_queue"}),
hydrationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[hydratortypes.HydrationQueueKey](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[hydratortypes.HydrationQueueKey]{Name: "manifest_hydration_queue"}),
hydrationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[hydrator.HydrationQueueKey](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[hydrator.HydrationQueueKey]{Name: "manifest_hydration_queue"}),
db: db,
statusRefreshTimeout: appResyncPeriod,
statusHardRefreshTimeout: appHardResyncPeriod,
@@ -209,7 +208,7 @@ func NewApplicationController(
auditLogger: argo.NewAuditLogger(kubeClientset, common.ApplicationController, enableK8sEvent),
settingsMgr: settingsMgr,
selfHealTimeout: selfHealTimeout,
selfHealBackoff: selfHealBackoff,
selfHealBackOff: selfHealBackoff,
selfHealBackoffCooldown: selfHealBackoffCooldown,
syncTimeout: syncTimeout,
clusterSharding: clusterSharding,
@@ -329,7 +328,7 @@ func NewApplicationController(
}
}
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.onKubectlRun, ctrl.settingsMgr, stateCache, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.onKubectlRun, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
ctrl.appInformer = appInformer
ctrl.appLister = appLister
ctrl.projInformer = projInformer
@@ -603,9 +602,6 @@ func (ctrl *ApplicationController) getResourceTree(destCluster *appv1.Cluster, a
Group: managedResource.Group,
Namespace: managedResource.Namespace,
},
Health: &appv1.HealthStatus{
Status: health.HealthStatusMissing,
},
})
} else {
managedResourcesKeys = append(managedResourcesKeys, kube.GetResourceKey(live))
@@ -1206,7 +1202,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
if err != nil {
logCtx.Warnf("Unable to get destination cluster: %v", err)
app.UnSetCascadedDeletion()
app.UnSetPostDeleteFinalizerAll()
app.UnSetPostDeleteFinalizer()
if err := ctrl.updateFinalizers(app); err != nil {
return err
}
@@ -1395,55 +1391,42 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
logCtx.Debug("Finished processing requested app operation")
}()
terminatingCause := ""
terminating := false
if isOperationInProgress(app) {
state = app.Status.OperationState.DeepCopy()
terminating = state.Phase == synccommon.OperationTerminating
// Failed operation with retry strategy might have be in-progress and has completion time
switch {
case state.Phase == synccommon.OperationTerminating:
logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
case ctrl.syncTimeout != time.Duration(0) && time.Now().After(state.StartedAt.Add(ctrl.syncTimeout)):
state.Phase = synccommon.OperationTerminating
state.Message = "operation is terminating due to timeout"
terminatingCause = "controller sync timeout"
ctrl.setOperationState(app, state)
logCtx.Infof("Terminating in-progress operation due to timeout. Started at: %v, timeout: %v", state.StartedAt, ctrl.syncTimeout)
case state.Phase == synccommon.OperationRunning && state.FinishedAt != nil:
// Failed operation with retry strategy might be in-progress and has completion time
case state.FinishedAt != nil && !terminating:
retryAt, err := app.Status.OperationState.Operation.Retry.NextRetryAt(state.FinishedAt.Time, state.RetryCount)
if err != nil {
state.Phase = synccommon.OperationError
state.Phase = synccommon.OperationFailed
state.Message = err.Error()
ctrl.setOperationState(app, state)
return
}
retryAfter := time.Until(retryAt)
if retryAfter > 0 {
logCtx.Infof("Skipping retrying in-progress operation. Attempting again at: %s", retryAt.Format(time.RFC3339))
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
return
}
// Remove the desired revisions if the sync failed and we are retrying. The latest revision from the source will be used.
extraMsg := ""
if state.Operation.Retry.Refresh {
extraMsg += " with latest revisions"
state.Operation.Sync.Revision = ""
state.Operation.Sync.Revisions = nil
}
// Get rid of sync results and null out previous operation completion time
// This will start the retry attempt
state.Message = fmt.Sprintf("Retrying operation%s. Attempt #%d", extraMsg, state.RetryCount)
// retrying operation. remove previous failure time in app since it is used as a trigger
// that previous failed and operation should be retried
state.FinishedAt = nil
state.SyncResult = nil
ctrl.setOperationState(app, state)
logCtx.Infof("Retrying operation%s. Attempt #%d", extraMsg, state.RetryCount)
// Get rid of sync results and null out previous operation completion time
state.SyncResult = nil
case ctrl.syncTimeout != time.Duration(0) && time.Now().After(state.StartedAt.Add(ctrl.syncTimeout)) && !terminating:
state.Phase = synccommon.OperationTerminating
state.Message = "operation is terminating due to timeout"
ctrl.setOperationState(app, state)
logCtx.Infof("Terminating in-progress operation due to timeout. Started at: %v, timeout: %v", state.StartedAt, ctrl.syncTimeout)
default:
logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
}
} else {
state = NewOperationState(*app.Operation)
state = &appv1.OperationState{Phase: synccommon.OperationRunning, Operation: *app.Operation, StartedAt: metav1.Now()}
ctrl.setOperationState(app, state)
if ctrl.syncTimeout != time.Duration(0) {
// Schedule a check during which the timeout would be checked.
@@ -1453,16 +1436,22 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
}
ts.AddCheckpoint("initial_operation_stage_ms")
terminating := state.Phase == synccommon.OperationTerminating
project, err := ctrl.getAppProj(app)
if err == nil {
// Start or resume the sync
ctrl.appStateManager.SyncAppState(app, project, state)
// Call GetDestinationCluster to validate the destination cluster.
if _, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db); err != nil {
state.Phase = synccommon.OperationFailed
state.Message = err.Error()
} else {
state.Phase = synccommon.OperationError
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
ctrl.appStateManager.SyncAppState(app, state)
}
ts.AddCheckpoint("validate_and_sync_app_state_ms")
// Check whether application is allowed to use project
_, err := ctrl.getAppProj(app)
ts.AddCheckpoint("get_app_proj_ms")
if err != nil {
state.Phase = synccommon.OperationError
state.Message = err.Error()
}
ts.AddCheckpoint("sync_app_state_ms")
switch state.Phase {
case synccommon.OperationRunning:
@@ -1470,6 +1459,12 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
// to clobber the Terminated state with Running. Get the latest app state to check for this.
freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, metav1.GetOptions{})
if err == nil {
// App may have lost permissions to use the project meanwhile.
_, err = ctrl.getAppProj(freshApp)
if err != nil {
state.Phase = synccommon.OperationFailed
state.Message = fmt.Sprintf("operation not allowed: %v", err)
}
if freshApp.Status.OperationState != nil && freshApp.Status.OperationState.Phase == synccommon.OperationTerminating {
state.Phase = synccommon.OperationTerminating
state.Message = "operation is terminating"
@@ -1481,24 +1476,17 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
case synccommon.OperationFailed, synccommon.OperationError:
if !terminating && (state.RetryCount < state.Operation.Retry.Limit || state.Operation.Retry.Limit < 0) {
now := metav1.Now()
state.FinishedAt = &now
if retryAt, err := state.Operation.Retry.NextRetryAt(now.Time, state.RetryCount); err != nil {
state.Phase = synccommon.OperationError
state.Phase = synccommon.OperationFailed
state.Message = fmt.Sprintf("%s (failed to retry: %v)", state.Message, err)
} else {
// Set FinishedAt explicitly on a Running phase. This is a unique condition that will allow this
// function to perform a retry the next time the operation is processed.
state.Phase = synccommon.OperationRunning
state.FinishedAt = &now
state.RetryCount++
state.Message = fmt.Sprintf("%s. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
}
} else {
if terminating && terminatingCause != "" {
state.Message = fmt.Sprintf("%s, triggered by %s", state.Message, terminatingCause)
}
if state.RetryCount > 0 {
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
}
} else if state.RetryCount > 0 {
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
}
}
@@ -1772,7 +1760,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
sources = append(sources, app.Spec.GetSource())
}
compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources, refreshType == appv1.RefreshTypeHard, comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources)
compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources, refreshType == appv1.RefreshTypeHard, comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources, false)
ts.AddCheckpoint("compare_app_state_ms")
@@ -1798,7 +1786,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
canSync, _ := project.Spec.SyncWindows.Matches(app).CanSync(false)
if canSync {
syncErrCond, opDuration := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionsMayHaveChanges)
syncErrCond, opDuration := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionUpdated)
setOpDuration = opDuration
if syncErrCond != nil {
app.Status.SetConditions(
@@ -1878,7 +1866,7 @@ func (ctrl *ApplicationController) processAppHydrateQueueItem() (processNext boo
return
}
ctrl.hydrator.ProcessAppHydrateQueueItem(origApp.DeepCopy())
ctrl.hydrator.ProcessAppHydrateQueueItem(origApp)
log.WithFields(applog.GetAppLogFields(origApp)).Debug("Successfully processed app hydrate queue item")
return
@@ -2093,7 +2081,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
}
// autoSync will initiate a sync operation for an application configured with automated sync
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, shouldCompareRevisions bool) (*appv1.ApplicationCondition, time.Duration) {
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, revisionUpdated bool) (*appv1.ApplicationCondition, time.Duration) {
logCtx := log.WithFields(applog.GetAppLogFields(app))
ts := stats.NewTimingStats()
defer func() {
@@ -2137,70 +2125,65 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
}
}
source := ptr.To(app.Spec.GetSource())
desiredRevisions := []string{syncStatus.Revision}
if app.Spec.HasMultipleSources() {
source = nil
desiredRevisions = syncStatus.Revisions
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
// Multi-Source Apps with selfHeal disabled should not trigger an autosync if
// the last sync revision and the new sync revision is the same.
if app.Spec.HasMultipleSources() && !selfHeal && reflect.DeepEqual(app.Status.Sync.Revisions, syncStatus.Revisions) {
logCtx.Infof("Skipping auto-sync: selfHeal disabled and sync caused by object update")
return nil, 0
}
desiredCommitSHA := syncStatus.Revision
desiredCommitSHAsMS := syncStatus.Revisions
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources(), revisionUpdated)
ts.AddCheckpoint("already_attempted_sync_ms")
op := appv1.Operation{
Sync: &appv1.SyncOperation{
Source: source,
Revision: syncStatus.Revision,
Revision: desiredCommitSHA,
Prune: app.Spec.SyncPolicy.Automated.Prune,
SyncOptions: app.Spec.SyncPolicy.SyncOptions,
Sources: app.Spec.Sources,
Revisions: syncStatus.Revisions,
Revisions: desiredCommitSHAsMS,
},
InitiatedBy: appv1.OperationInitiator{Automated: true},
Retry: appv1.RetryStrategy{Limit: 5},
}
if app.Spec.SyncPolicy.Retry != nil {
op.Retry = *app.Spec.SyncPolicy.Retry
}
// It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g.
// auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an
// application in an infinite loop. To detect this, we only attempt the Sync if the revision
// and parameter overrides are different from our most recent sync operation.
alreadyAttempted, lastAttemptedRevisions, lastAttemptedPhase := alreadyAttemptedSync(app, desiredRevisions, shouldCompareRevisions)
ts.AddCheckpoint("already_attempted_sync_ms")
if alreadyAttempted {
if !lastAttemptedPhase.Successful() {
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s and will not retry for %s", lastAttemptedRevisions, desiredRevisions)
message := fmt.Sprintf("Failed last sync attempt to %s: %s", lastAttemptedRevisions, app.Status.OperationState.Message)
if alreadyAttempted && (!selfHeal || !attemptPhase.Successful()) {
if !attemptPhase.Successful() {
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA)
message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
}
if !app.Spec.SyncPolicy.Automated.SelfHeal {
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredRevisions)
return nil, 0
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
return nil, 0
} else if selfHeal {
shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app, alreadyAttempted)
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
}
// Self heal will trigger a new sync operation when the desired state changes and cause the application to
// be OutOfSync when it was previously synced Successfully. This means SelfHeal should only ever be attempted
// when the revisions have not changed, and where the previous sync to these revision was successful
// Only carry SelfHealAttemptsCount to be increased when the selfHealBackoffCooldown has not elapsed yet
if !ctrl.selfHealBackoffCooldownElapsed(app) {
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
if alreadyAttempted {
if !shouldSelfHeal {
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
return nil, 0
}
}
if remainingTime := ctrl.selfHealRemainingBackoff(app, int(op.Sync.SelfHealAttemptsCount)); remainingTime > 0 {
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", lastAttemptedRevisions, ctrl.selfHealTimeout, remainingTime)
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &remainingTime)
return nil, 0
}
op.Sync.SelfHealAttemptsCount++
for _, resource := range resources {
if resource.Status != appv1.SyncStatusCodeSynced {
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
Kind: resource.Kind,
Group: resource.Group,
Name: resource.Name,
})
op.Sync.SelfHealAttemptsCount++
for _, resource := range resources {
if resource.Status != appv1.SyncStatusCodeSynced {
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
Kind: resource.Kind,
Group: resource.Group,
Name: resource.Name,
})
}
}
}
}
@@ -2214,7 +2197,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
}
}
if bAllNeedPrune {
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredRevisions)
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
logCtx.Warn(message)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
}
@@ -2230,65 +2213,62 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
if stderrors.Is(err, argo.ErrAnotherOperationInProgress) {
// skipping auto-sync because another operation is in progress and was not noticed due to stale data in informer
// it is safe to skip auto-sync because it is already running
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredRevisions, err)
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
return nil, 0
}
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredRevisions, err)
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
}
ctrl.writeBackToInformer(updatedApp)
ts.AddCheckpoint("write_back_to_informer_ms")
message := fmt.Sprintf("Initiated automated sync to %s", desiredRevisions)
var target string
if updatedApp.Spec.HasMultipleSources() {
target = strings.Join(desiredCommitSHAsMS, ", ")
} else {
target = desiredCommitSHA
}
message := fmt.Sprintf("Initiated automated sync to '%s'", target)
ctrl.logAppEvent(context.TODO(), app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: corev1.EventTypeNormal}, message)
logCtx.Info(message)
return nil, setOpTime
}
// alreadyAttemptedSync returns whether the most recently synced revision(s) exactly match the given desiredRevisions
// and for the same application source. If the revision(s) have changed or the Application source configuration has been updated,
// it will return false, indicating that a new sync should be attempted.
// When newRevisionHasChanges is false, due to commits not having direct changes on the application, it will not compare the revision(s), but only the sources.
// It also returns the last synced revisions if any, and the result of that last sync operation.
func alreadyAttemptedSync(app *appv1.Application, desiredRevisions []string, newRevisionHasChanges bool) (bool, []string, synccommon.OperationPhase) {
if app.Status.OperationState == nil {
// The operation state may be removed when new operations are triggered
return false, []string{}, ""
// alreadyAttemptedSync returns whether the most recent sync was performed against the
// commitSHA and with the same app source config which are currently set in the app.
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool, revisionUpdated bool) (bool, synccommon.OperationPhase) {
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
return false, ""
}
if app.Status.OperationState.SyncResult == nil {
// If the sync has completed without result, it is very likely that an error happened
// We don't want to resync with auto-sync indefinitely. We should have retried the configured amount of time already
// In this case, a manual action to restore the app may be required
log.WithFields(applog.GetAppLogFields(app)).Warn("Already attempted sync: sync does not have any results")
return app.Status.OperationState.Phase.Completed(), []string{}, app.Status.OperationState.Phase
}
if newRevisionHasChanges {
log.WithFields(applog.GetAppLogFields(app)).Infof("Already attempted sync: comparing synced revisions to %s", desiredRevisions)
if app.Spec.HasMultipleSources() {
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, desiredRevisions) {
return false, app.Status.OperationState.SyncResult.Revisions, app.Status.OperationState.Phase
if hasMultipleSources {
if revisionUpdated {
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
return false, ""
}
} else {
if len(desiredRevisions) != 1 || app.Status.OperationState.SyncResult.Revision != desiredRevisions[0] {
return false, []string{app.Status.OperationState.SyncResult.Revision}, app.Status.OperationState.Phase
}
log.WithFields(applog.GetAppLogFields(app)).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
}
} else {
log.WithFields(applog.GetAppLogFields(app)).Debugf("Already attempted sync: revisions %s have no changes", desiredRevisions)
if revisionUpdated {
log.WithFields(applog.GetAppLogFields(app)).Infof("Executing compare of syncResult.Revision and commitSha because manifest changed: %v", commitSHA)
if app.Status.OperationState.SyncResult.Revision != commitSHA {
return false, ""
}
} else {
log.WithFields(applog.GetAppLogFields(app)).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
}
}
log.WithFields(applog.GetAppLogFields(app)).Debug("Already attempted sync: comparing sources")
if app.Spec.HasMultipleSources() {
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.SyncResult.Revisions, app.Status.OperationState.Phase
if hasMultipleSources {
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.Phase
}
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), []string{app.Status.OperationState.SyncResult.Revision}, app.Status.OperationState.Phase
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
}
func (ctrl *ApplicationController) selfHealRemainingBackoff(app *appv1.Application, selfHealAttemptsCount int) time.Duration {
func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application, alreadyAttempted bool) (bool, time.Duration) {
if app.Status.OperationState == nil {
return time.Duration(0)
return true, time.Duration(0)
}
var timeSinceOperation *time.Duration
@@ -2296,41 +2276,34 @@ func (ctrl *ApplicationController) selfHealRemainingBackoff(app *appv1.Applicati
timeSinceOperation = ptr.To(time.Since(app.Status.OperationState.FinishedAt.Time))
}
// Reset counter if the prior sync was successful and the cooldown period is over OR if the revision has changed
if !alreadyAttempted || (timeSinceOperation != nil && *timeSinceOperation >= ctrl.selfHealBackoffCooldown && app.Status.Sync.Status == appv1.SyncStatusCodeSynced) {
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = 0
}
var retryAfter time.Duration
if ctrl.selfHealBackoff == nil {
if ctrl.selfHealBackOff == nil {
if timeSinceOperation == nil {
retryAfter = ctrl.selfHealTimeout
} else {
retryAfter = ctrl.selfHealTimeout - *timeSinceOperation
}
} else {
backOff := *ctrl.selfHealBackoff
backOff.Steps = selfHealAttemptsCount
backOff := *ctrl.selfHealBackOff
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
var delay time.Duration
steps := backOff.Steps
for i := 0; i < steps; i++ {
delay = backOff.Step()
}
if timeSinceOperation == nil {
retryAfter = delay
} else {
retryAfter = delay - *timeSinceOperation
}
}
return retryAfter
}
// selfHealBackoffCooldownElapsed returns true when the last successful sync has occurred since longer
// than then self heal cooldown. This means that the application has been in sync for long enough to
// reset the self healing backoff to its initial state
func (ctrl *ApplicationController) selfHealBackoffCooldownElapsed(app *appv1.Application) bool {
if app.Status.OperationState == nil || app.Status.OperationState.FinishedAt == nil {
// Something is in progress, or about to be. In that case, selfHeal attempt should be zero anyway
return true
}
timeSinceLastOperation := time.Since(app.Status.OperationState.FinishedAt.Time)
return timeSinceLastOperation >= ctrl.selfHealBackoffCooldown && app.Status.OperationState.Phase.Successful()
return retryAfter <= 0, retryAfter
}
// isAppNamespaceAllowed returns whether the application is allowed in the

View File

@@ -95,10 +95,10 @@ func (m *MockKubectl) DeleteResource(ctx context.Context, config *rest.Config, g
}
func newFakeController(data *fakeData, repoErr error) *ApplicationController {
return newFakeControllerWithResync(data, time.Minute, repoErr, nil)
return newFakeControllerWithResync(data, time.Minute, repoErr)
}
func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration, repoErr, revisionPathsErr error) *ApplicationController {
func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration, repoErr error) *ApplicationController {
var clust corev1.Secret
err := yaml.Unmarshal([]byte(fakeCluster), &clust)
if err != nil {
@@ -124,11 +124,7 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
}
}
if revisionPathsErr != nil {
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(nil, revisionPathsErr)
} else {
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(data.updateRevisionForPathsResponse, nil)
}
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(data.updateRevisionForPathsResponse, nil)
mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient}
@@ -348,13 +344,10 @@ status:
- cccccccccccccccccccccccccccccccccccccccc
sources:
- path: some/path
helm:
valueFiles:
- $values_test/values.yaml
repoURL: https://github.com/argoproj/argocd-example-apps.git
- path: some/other/path
repoURL: https://github.com/argoproj/argocd-example-apps-fake.git
- ref: values_test
- path: some/other/path
repoURL: https://github.com/argoproj/argocd-example-apps-fake-ref.git
`
@@ -628,13 +621,13 @@ func TestAutoSyncEnabledSetToTrue(t *testing.T) {
assert.False(t, app.Operation.Sync.Prune)
}
func TestAutoSyncMultiSourceWithoutSelfHeal(t *testing.T) {
func TestMultiSourceSelfHeal(t *testing.T) {
// Simulate OutOfSync caused by object change in cluster
// So our Sync Revisions and SyncStatus Revisions should deep equal
t.Run("ClusterObjectChangeShouldNotTriggerAutoSync", func(t *testing.T) {
app := newFakeMultiSourceApp()
app.Spec.SyncPolicy.Automated.SelfHeal = false
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
app.Status.Sync.Revisions = []string{"z", "x", "v"}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
@@ -646,14 +639,15 @@ func TestAutoSyncMultiSourceWithoutSelfHeal(t *testing.T) {
require.NoError(t, err)
assert.Nil(t, app.Operation)
})
t.Run("NewRevisionChangeShouldTriggerAutoSync", func(t *testing.T) {
app := newFakeMultiSourceApp()
app.Spec.SyncPolicy.Automated.SelfHeal = false
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
app.Status.Sync.Revisions = []string{"a", "b", "c"}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revisions: []string{"a", "b", "c"},
Revisions: []string{"z", "x", "v"},
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
assert.Nil(t, cond)
@@ -796,30 +790,6 @@ func TestSkipAutoSync(t *testing.T) {
assert.Nil(t, app.Operation)
})
t.Run("PreviousSyncAttemptError", func(t *testing.T) {
app := newFakeApp()
app.Status.OperationState = &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
},
Phase: synccommon.OperationError,
SyncResult: &v1alpha1.SyncOperationResult{
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
Source: *app.Spec.Source.DeepCopy(),
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
assert.Nil(t, app.Operation)
})
t.Run("NeedsToPruneResourcesOnlyButAutomatedPruneDisabled", func(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
@@ -874,78 +844,45 @@ func TestAutoSyncIndicateError(t *testing.T) {
// TestAutoSyncParameterOverrides verifies we auto-sync if revision is same but parameter overrides are different
func TestAutoSyncParameterOverrides(t *testing.T) {
t.Run("Single source", func(t *testing.T) {
app := newFakeApp()
app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "a",
Value: "1",
},
app := newFakeApp()
app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "a",
Value: "1",
},
}
app.Status.OperationState = &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "a",
Value: "2", // this value changed
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
app.Status.OperationState = &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "a",
Value: "2", // this value changed
},
},
},
},
},
Phase: synccommon.OperationFailed,
SyncResult: &v1alpha1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
}
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
},
Phase: synccommon.OperationFailed,
SyncResult: &v1alpha1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
assert.NotNil(t, app.Operation)
})
t.Run("Multi sources", func(t *testing.T) {
app := newFakeMultiSourceApp()
app.Spec.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "a",
Value: "1",
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
app.Status.OperationState.SyncResult.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "a",
Value: "2", // this value changed
},
},
}
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revisions: []string{"z", "x", "v"},
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
assert.NotNil(t, app.Operation)
})
},
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
assert.NotNil(t, app.Operation)
}
// TestFinalizeAppDeletion verifies application deletion
@@ -1373,9 +1310,6 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
managedDeploy := v1alpha1.ResourceNode{
ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "nginx-deployment", Version: "v1"},
Health: &v1alpha1.HealthStatus{
Status: health.HealthStatusMissing,
},
}
orphanedDeploy1 := v1alpha1.ResourceNode{
ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "deploy1"},
@@ -1928,7 +1862,7 @@ apps/Deployment:
hs = {}
hs.status = ""
hs.message = ""
if obj.metadata ~= nil then
if obj.metadata.labels ~= nil then
current_status = obj.metadata.labels["status"]
@@ -1964,7 +1898,7 @@ apps/Deployment:
{},
{},
},
}, time.Millisecond*10, nil, nil)
}, time.Millisecond*10, nil)
testCases := []struct {
name string
@@ -2096,9 +2030,7 @@ func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
ctrl.processRequestedAppOperation(app)
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
assert.Equal(t, string(synccommon.OperationError), phase)
assert.Equal(t, "Failed to load application project: error getting app project \"default\": appproject.argoproj.io \"default\" not found", message)
}
func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
@@ -2127,8 +2059,8 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
ctrl.processRequestedAppOperation(app)
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
assert.Equal(t, string(synccommon.OperationFailed), phase)
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
assert.Equal(t, string(synccommon.OperationError), phase)
assert.Contains(t, message, "application destination can't have both name and server defined: another-cluster https://localhost:6443")
}
@@ -2152,24 +2084,20 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
ctrl.processRequestedAppOperation(app)
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
retryCount, _, _ := unstructured.NestedFloat64(receivedPatch, "status", "operationState", "retryCount")
assert.Equal(t, string(synccommon.OperationRunning), phase)
assert.Contains(t, message, "Failed to load application project: error getting app project \"invalid-project\": appproject.argoproj.io \"invalid-project\" not found. Retrying attempt #1")
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
assert.Contains(t, message, "Retrying attempt #1")
retryCount, _, _ := unstructured.NestedFloat64(receivedPatch, "status", "operationState", "retryCount")
assert.InEpsilon(t, float64(1), retryCount, 0.0001)
}
func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
failedAttemptFinisedAt := time.Now().Add(-time.Minute * 5)
app := newFakeApp()
app.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
Retry: v1alpha1.RetryStrategy{Limit: 1},
}
app.Status.OperationState.Operation = *app.Operation
app.Status.OperationState.Phase = synccommon.OperationRunning
app.Status.OperationState.RetryCount = 1
app.Status.OperationState.FinishedAt = &metav1.Time{Time: failedAttemptFinisedAt}
app.Status.OperationState.SyncResult.Resources = []*v1alpha1.ResourceResult{{
Name: "guestbook",
Kind: "Deployment",
@@ -2199,58 +2127,7 @@ func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
ctrl.processRequestedAppOperation(app)
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
finishedAtStr, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "finishedAt")
finishedAt, err := time.Parse(time.RFC3339, finishedAtStr)
require.NoError(t, err)
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
assert.Equal(t, "successfully synced (no more tasks)", message)
assert.Truef(t, finishedAt.After(failedAttemptFinisedAt), "finishedAt was expected to be updated. The retry was not performed.")
}
func TestProcessRequestedAppOperation_RunningPreviouslyFailedBackoff(t *testing.T) {
failedAttemptFinisedAt := time.Now().Add(-time.Second)
app := newFakeApp()
app.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
Retry: v1alpha1.RetryStrategy{
Limit: 1,
Backoff: &v1alpha1.Backoff{
Duration: "1h",
Factor: ptr.To(int64(100)),
MaxDuration: "1h",
},
},
}
app.Status.OperationState.Operation = *app.Operation
app.Status.OperationState.Phase = synccommon.OperationRunning
app.Status.OperationState.Message = "pending retry"
app.Status.OperationState.RetryCount = 1
app.Status.OperationState.FinishedAt = &metav1.Time{Time: failedAttemptFinisedAt}
app.Status.OperationState.SyncResult.Resources = []*v1alpha1.ResourceResult{{
Name: "guestbook",
Kind: "Deployment",
Group: "apps",
Status: synccommon.ResultCodeSyncFailed,
}}
data := &fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
}
ctrl := newFakeController(data, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.PrependReactor("patch", "*", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
require.FailNow(t, "A patch should not have been called if the backoff has not passed")
return true, &v1alpha1.Application{}, nil
})
ctrl.processRequestedAppOperation(app)
}
func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
@@ -2259,7 +2136,6 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
Sync: &v1alpha1.SyncOperation{},
Retry: v1alpha1.RetryStrategy{Limit: 10},
}
app.Status.OperationState.Operation = *app.Operation
app.Status.OperationState.Phase = synccommon.OperationTerminating
data := &fakeData{
@@ -2284,9 +2160,7 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
ctrl.processRequestedAppOperation(app)
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
assert.Equal(t, string(synccommon.OperationFailed), phase)
assert.Equal(t, "Operation terminated", message)
}
func TestProcessRequestedAppOperation_Successful(t *testing.T) {
@@ -2313,91 +2187,12 @@ func TestProcessRequestedAppOperation_Successful(t *testing.T) {
ctrl.processRequestedAppOperation(app)
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
assert.Equal(t, "successfully synced (no more tasks)", message)
ok, level := ctrl.isRefreshRequested(ctrl.toAppKey(app.Name))
assert.True(t, ok)
assert.Equal(t, CompareWithLatestForceResolve, level)
}
func TestProcessRequestedAppOperation_SyncTimeout(t *testing.T) {
testCases := []struct {
name string
startedSince time.Duration
syncTimeout time.Duration
retryAttempt int
currentPhase synccommon.OperationPhase
expectedPhase synccommon.OperationPhase
expectedMessage string
}{{
name: "Continue when running operation has not exceeded timeout",
syncTimeout: time.Minute,
startedSince: 30 * time.Second,
currentPhase: synccommon.OperationRunning,
expectedPhase: synccommon.OperationSucceeded,
expectedMessage: "successfully synced (no more tasks)",
}, {
name: "Continue when terminating operation has exceeded timeout",
syncTimeout: time.Minute,
startedSince: 2 * time.Minute,
currentPhase: synccommon.OperationTerminating,
expectedPhase: synccommon.OperationFailed,
expectedMessage: "Operation terminated",
}, {
name: "Terminate when running operation exceeded timeout",
syncTimeout: time.Minute,
startedSince: 2 * time.Minute,
currentPhase: synccommon.OperationRunning,
expectedPhase: synccommon.OperationFailed,
expectedMessage: "Operation terminated, triggered by controller sync timeout",
}, {
name: "Terminate when retried operation exceeded timeout",
syncTimeout: time.Minute,
startedSince: 15 * time.Minute,
currentPhase: synccommon.OperationRunning,
retryAttempt: 1,
expectedPhase: synccommon.OperationFailed,
expectedMessage: "Operation terminated, triggered by controller sync timeout (retried 1 times).",
}}
for i := range testCases {
tc := testCases[i]
t.Run(fmt.Sprintf("case %d: %s", i, tc.name), func(t *testing.T) {
app := newFakeApp()
app.Spec.Project = "default"
app.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Revision: "HEAD",
},
}
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponses: []*apiclient.ManifestResponse{{
Manifests: []string{},
}},
}, nil)
ctrl.syncTimeout = tc.syncTimeout
app.Status.OperationState = &v1alpha1.OperationState{
Operation: *app.Operation,
Phase: tc.currentPhase,
StartedAt: metav1.NewTime(time.Now().Add(-tc.startedSince)),
}
if tc.retryAttempt > 0 {
app.Status.OperationState.FinishedAt = ptr.To(metav1.NewTime(time.Now().Add(-tc.startedSince)))
app.Status.OperationState.RetryCount = int64(tc.retryAttempt)
}
ctrl.processRequestedAppOperation(app)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace).Get(t.Context(), app.Name, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, tc.expectedPhase, app.Status.OperationState.Phase)
assert.Equal(t, tc.expectedMessage, app.Status.OperationState.Message)
})
}
}
func TestGetAppHosts(t *testing.T) {
app := newFakeApp()
data := &fakeData{
@@ -2664,71 +2459,35 @@ func TestAppStatusIsReplaced(t *testing.T) {
func TestAlreadyAttemptSync(t *testing.T) {
app := newFakeApp()
defaultRevision := app.Status.OperationState.SyncResult.Revision
t.Run("no operation state", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState = nil
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
assert.False(t, attempted)
})
t.Run("no sync result for running sync", func(t *testing.T) {
t.Run("no sync operation", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult = nil
app.Status.OperationState.Phase = synccommon.OperationRunning
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
app.Status.OperationState.Operation.Sync = nil
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
assert.False(t, attempted)
})
t.Run("no sync result for completed sync", func(t *testing.T) {
t.Run("no sync result", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult = nil
app.Status.OperationState.Phase = synccommon.OperationError
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
assert.True(t, attempted)
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
assert.False(t, attempted)
})
t.Run("single source", func(t *testing.T) {
t.Run("no revision", func(t *testing.T) {
attempted, _, _ := alreadyAttemptedSync(app, []string{}, true)
assert.False(t, attempted)
})
t.Run("empty revision", func(t *testing.T) {
attempted, _, _ := alreadyAttemptedSync(app, []string{""}, true)
assert.False(t, attempted)
})
t.Run("too many revision", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Revision = "sha"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha", "sha2"}, true)
assert.False(t, attempted)
})
t.Run("same manifest, same SHA with changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Revision = "sha"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, true)
t.Run("same manifest with sync result", func(t *testing.T) {
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
assert.True(t, attempted)
})
t.Run("same manifest, different SHA with changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Revision = "sha1"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, true)
assert.False(t, attempted)
})
t.Run("same manifest, different SHA without changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Revision = "sha1"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, false)
assert.True(t, attempted)
})
t.Run("different manifest, same SHA with changes", func(t *testing.T) {
t.Run("same manifest with sync result different targetRevision, same SHA", func(t *testing.T) {
// This test represents the case where the user changed a source's target revision to a new branch, but it
// points to the same revision as the old branch. We currently do not consider this as having been "already
// attempted." In the future we may want to short-circuit the auto-sync in these cases.
@@ -2736,101 +2495,55 @@ func TestAlreadyAttemptSync(t *testing.T) {
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{TargetRevision: "branch1"}
app.Spec.Source = &v1alpha1.ApplicationSource{TargetRevision: "branch2"}
app.Status.OperationState.SyncResult.Revision = "sha"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, true)
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
assert.False(t, attempted)
})
t.Run("different manifest, different SHA with changes", func(t *testing.T) {
t.Run("different manifest with sync result, different SHA", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
app.Status.OperationState.SyncResult.Revision = "sha1"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, true)
attempted, _ := alreadyAttemptedSync(app, "sha2", []string{}, false, true)
assert.False(t, attempted)
})
t.Run("different manifest, different SHA without changes", func(t *testing.T) {
t.Run("different manifest with sync result, same SHA", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
app.Status.OperationState.SyncResult.Revision = "sha1"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, false)
assert.False(t, attempted)
})
t.Run("different manifest, same SHA without changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
app.Status.OperationState.SyncResult.Revision = "sha"
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, false)
assert.False(t, attempted)
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, true)
assert.True(t, attempted)
})
})
t.Run("multi-source", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
t.Run("same manifest, same SHAs with changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b"}, true)
t.Run("same manifest with sync result", func(t *testing.T) {
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha"}, true, false)
assert.True(t, attempted)
})
t.Run("same manifest, different SHAs with changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, true)
assert.False(t, attempted)
})
t.Run("same manifest, different SHA without changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, false)
assert.True(t, attempted)
})
t.Run("different manifest, same SHA with changes", func(t *testing.T) {
t.Run("same manifest with sync result, different targetRevision, same SHA", func(t *testing.T) {
// This test represents the case where the user changed a source's target revision to a new branch, but it
// points to the same revision as the old branch. We currently do not consider this as having been "already
// attempted." In the future we may want to short-circuit the auto-sync in these cases.
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}, {TargetRevision: "branch2"}}
app.Spec.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}, {TargetRevision: "branch3"}}
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_2", "sha_b_2"}
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, false)
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}}
app.Spec.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch2"}}
app.Status.OperationState.SyncResult.Revisions = []string{"sha"}
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha"}, true, false)
assert.False(t, attempted)
})
t.Run("different manifest, different SHA with changes", func(t *testing.T) {
t.Run("different manifest with sync result, different SHAs", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b_2"}, true)
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha_a_2", "sha_b_2"}, true, true)
assert.False(t, attempted)
})
t.Run("different manifest, different SHA without changes", func(t *testing.T) {
t.Run("different manifest with sync result, same SHAs", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b_2"}, false)
assert.False(t, attempted)
})
t.Run("different manifest, same SHA without changes", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b"}, false)
assert.False(t, attempted)
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha_a", "sha_b"}, true, true)
assert.True(t, attempted)
})
})
}
@@ -2842,13 +2555,14 @@ func assertDurationAround(t *testing.T, expected time.Duration, actual time.Dura
assert.LessOrEqual(t, expected, actual+delta)
}
func TestSelfHealRemainingBackoff(t *testing.T) {
func TestSelfHealExponentialBackoff(t *testing.T) {
ctrl := newFakeController(&fakeData{}, nil)
ctrl.selfHealBackoff = &wait.Backoff{
ctrl.selfHealBackOff = &wait.Backoff{
Factor: 3,
Duration: 2 * time.Second,
Cap: 2 * time.Minute,
}
app := &v1alpha1.Application{
Status: v1alpha1.ApplicationStatus{
OperationState: &v1alpha1.OperationState{
@@ -2860,108 +2574,156 @@ func TestSelfHealRemainingBackoff(t *testing.T) {
}
testCases := []struct {
attempts int
attempts int64
expectedAttempts int64
finishedAt *metav1.Time
expectedDuration time.Duration
shouldSelfHeal bool
alreadyAttempted bool
syncStatus v1alpha1.SyncStatusCode
}{{
attempts: 0,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 0,
shouldSelfHeal: true,
alreadyAttempted: true,
expectedAttempts: 0,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, {
attempts: 1,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 2 * time.Second,
shouldSelfHeal: false,
alreadyAttempted: true,
expectedAttempts: 1,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, {
attempts: 2,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 6 * time.Second,
shouldSelfHeal: false,
alreadyAttempted: true,
expectedAttempts: 2,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, {
attempts: 3,
finishedAt: nil,
expectedDuration: 18 * time.Second,
shouldSelfHeal: false,
alreadyAttempted: true,
expectedAttempts: 3,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, {
attempts: 4,
finishedAt: nil,
expectedDuration: 54 * time.Second,
shouldSelfHeal: false,
alreadyAttempted: true,
expectedAttempts: 4,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, {
attempts: 5,
finishedAt: nil,
expectedDuration: 120 * time.Second,
shouldSelfHeal: false,
alreadyAttempted: true,
expectedAttempts: 5,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, {
attempts: 6,
finishedAt: nil,
expectedDuration: 120 * time.Second,
shouldSelfHeal: false,
alreadyAttempted: true,
expectedAttempts: 6,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, {
attempts: 6,
finishedAt: nil,
expectedDuration: 0,
shouldSelfHeal: true,
alreadyAttempted: false,
expectedAttempts: 0,
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
}, { // backoff will not reset as finished tme isn't >= cooldown
attempts: 6,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 120 * time.Second,
shouldSelfHeal: false,
}, {
alreadyAttempted: true,
expectedAttempts: 6,
syncStatus: v1alpha1.SyncStatusCodeSynced,
}, { // backoff will reset as finished time is >= cooldown
attempts: 40,
finishedAt: &metav1.Time{Time: time.Now().Add(-1 * time.Minute)},
expectedDuration: 60 * time.Second,
shouldSelfHeal: false,
finishedAt: &metav1.Time{Time: time.Now().Add(-(1 * time.Minute))},
expectedDuration: -60 * time.Second,
shouldSelfHeal: true,
alreadyAttempted: true,
expectedAttempts: 0,
syncStatus: v1alpha1.SyncStatusCodeSynced,
}}
for i := range testCases {
tc := testCases[i]
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
app.Status.OperationState.FinishedAt = tc.finishedAt
duration := ctrl.selfHealRemainingBackoff(app, tc.attempts)
shouldSelfHeal := duration <= 0
require.Equal(t, tc.shouldSelfHeal, shouldSelfHeal)
app.Status.Sync.Status = tc.syncStatus
ok, duration := ctrl.shouldSelfHeal(app, tc.alreadyAttempted)
require.Equal(t, ok, tc.shouldSelfHeal)
require.Equal(t, tc.expectedAttempts, app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
assertDurationAround(t, tc.expectedDuration, duration)
})
}
}
func TestSelfHealBackoffCooldownElapsed(t *testing.T) {
cooldown := time.Second * 30
ctrl := newFakeController(&fakeData{}, nil)
ctrl.selfHealBackoffCooldown = cooldown
func TestSyncTimeout(t *testing.T) {
testCases := []struct {
delta time.Duration
expectedPhase synccommon.OperationPhase
expectedMessage string
}{{
delta: 2 * time.Minute,
expectedPhase: synccommon.OperationFailed,
expectedMessage: "Operation terminated",
}, {
delta: 30 * time.Second,
expectedPhase: synccommon.OperationSucceeded,
expectedMessage: "successfully synced (no more tasks)",
}}
for i := range testCases {
tc := testCases[i]
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
app := newFakeApp()
app.Spec.Project = "default"
app.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Revision: "HEAD",
},
}
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponses: []*apiclient.ManifestResponse{{
Manifests: []string{},
}},
}, nil)
app := &v1alpha1.Application{
Status: v1alpha1.ApplicationStatus{
OperationState: &v1alpha1.OperationState{
Phase: synccommon.OperationSucceeded,
},
},
ctrl.syncTimeout = time.Minute
app.Status.OperationState = &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Revision: "HEAD",
},
},
Phase: synccommon.OperationRunning,
StartedAt: metav1.NewTime(time.Now().Add(-tc.delta)),
}
ctrl.processRequestedAppOperation(app)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace).Get(t.Context(), app.Name, metav1.GetOptions{})
require.NoError(t, err)
require.Equal(t, tc.expectedPhase, app.Status.OperationState.Phase)
require.Equal(t, tc.expectedMessage, app.Status.OperationState.Message)
})
}
t.Run("operation not completed", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.FinishedAt = nil
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
assert.True(t, elapsed)
})
t.Run("successful operation finised after cooldown", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now().Add(-cooldown)}
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
assert.True(t, elapsed)
})
t.Run("unsuccessful operation finised after cooldown", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.Phase = synccommon.OperationFailed
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now().Add(-cooldown)}
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
assert.False(t, elapsed)
})
t.Run("successful operation finised before cooldown", func(t *testing.T) {
app := app.DeepCopy()
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now()}
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
assert.False(t, elapsed)
})
}

View File

@@ -137,6 +137,8 @@ type LiveStateCache interface {
IsNamespaced(server *appv1.Cluster, gk schema.GroupKind) (bool, error)
// Returns synced cluster cache
GetClusterCache(server *appv1.Cluster) (clustercache.ClusterCache, error)
// Executes give callback against resource specified by the key and all its children
IterateHierarchy(server *appv1.Cluster, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
// Executes give callback against resources specified by the keys and all its children
IterateHierarchyV2(server *appv1.Cluster, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
// Returns state of live nodes which correspond for target nodes of specified application.
@@ -667,6 +669,17 @@ func (c *liveStateCache) IsNamespaced(server *appv1.Cluster, gk schema.GroupKind
return clusterInfo.IsNamespaced(gk)
}
func (c *liveStateCache) IterateHierarchy(server *appv1.Cluster, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return err
}
clusterInfo.IterateHierarchy(key, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) bool {
return action(asResourceNode(resource), getApp(resource, namespaceResources))
})
return nil
}
func (c *liveStateCache) IterateHierarchyV2(server *appv1.Cluster, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {

Some files were not shown because too many files have changed in this diff Show More