Compare commits
1 Commits
commit-ser
...
temp-cherr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e45593764b |
156
.github/workflows/ci-build.yaml
vendored
@@ -1,5 +1,5 @@
|
||||
name: Integration tests
|
||||
on:
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.22'
|
||||
GOLANG_VERSION: '1.21'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -23,37 +23,12 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
backend: ${{ steps.filter.outputs.backend_any_changed }}
|
||||
frontend: ${{ steps.filter.outputs.frontend_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- uses: tj-actions/changed-files@90a06d6ba9543371ab4df8eeca0be07ca6054959 # v42.0.2
|
||||
id: filter
|
||||
with:
|
||||
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
|
||||
files_yaml: |
|
||||
backend:
|
||||
- '!ui/**'
|
||||
- '!**.md'
|
||||
- '!**/*.md'
|
||||
- '!docs/**'
|
||||
frontend:
|
||||
- 'ui/**'
|
||||
- Dockerfile
|
||||
docs:
|
||||
- 'docs/**'
|
||||
check-go:
|
||||
name: Ensure Go modules synchronicity
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
@@ -61,20 +36,17 @@ jobs:
|
||||
- name: Download all Go modules
|
||||
run: |
|
||||
go mod download
|
||||
- name: Check for tidiness of go.mod and go.sum
|
||||
- name: Check for tidyness of go.mod and go.sum
|
||||
run: |
|
||||
go mod tidy
|
||||
git diff --exit-code -- .
|
||||
|
||||
build-go:
|
||||
name: Build & cache Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
@@ -95,13 +67,10 @@ jobs:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Lint Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
@@ -114,19 +83,17 @@ jobs:
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
@@ -171,31 +138,29 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
|
||||
test-go-race:
|
||||
name: Run unit tests with -race for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
@@ -240,20 +205,17 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
|
||||
codegen:
|
||||
name: Check changes to generated code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.docs == 'true'}}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
@@ -298,17 +260,14 @@ jobs:
|
||||
|
||||
build-ui:
|
||||
name: Build, test & lint UI code
|
||||
if: ${{ needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
|
||||
with:
|
||||
node-version: '21.6.1'
|
||||
node-version: '20.7.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
@@ -333,17 +292,15 @@ jobs:
|
||||
|
||||
analyze:
|
||||
name: Process & analyze test artifacts
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- test-go
|
||||
- build-ui
|
||||
- changes
|
||||
env:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
@@ -358,12 +315,12 @@ jobs:
|
||||
- name: Create test-results directory
|
||||
run: |
|
||||
mkdir -p test-results
|
||||
- name: Get code coverage artifact
|
||||
uses: actions/download-artifact@8caf195ad4b1dee92908e23f56eeb0696f1dd42d # v4.1.5
|
||||
- name: Get code coverage artifiact
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: code-coverage
|
||||
- name: Get test result artifact
|
||||
uses: actions/download-artifact@8caf195ad4b1dee92908e23f56eeb0696f1dd42d # v4.1.5
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -379,37 +336,35 @@ jobs:
|
||||
SCANNER_PATH: /tmp/cache/scanner
|
||||
OS: linux
|
||||
run: |
|
||||
# We do not use the provided action, because it does contain an old
|
||||
# version of the scanner, and also takes time to build.
|
||||
set -e
|
||||
mkdir -p ${SCANNER_PATH}
|
||||
export SONAR_USER_HOME=${SCANNER_PATH}/.sonar
|
||||
if [[ ! -x "${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner" ]]; then
|
||||
curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip
|
||||
unzip -qq -o sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip -d ${SCANNER_PATH}
|
||||
fi
|
||||
|
||||
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
|
||||
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/jre/bin/java
|
||||
|
||||
# Explicitly set NODE_MODULES
|
||||
export NODE_MODULES=${PWD}/ui/node_modules
|
||||
export NODE_PATH=${PWD}/ui/node_modules
|
||||
|
||||
${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
|
||||
# We do not use the provided action, because it does contain an old
|
||||
# version of the scanner, and also takes time to build.
|
||||
set -e
|
||||
mkdir -p ${SCANNER_PATH}
|
||||
export SONAR_USER_HOME=${SCANNER_PATH}/.sonar
|
||||
if [[ ! -x "${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner" ]]; then
|
||||
curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip
|
||||
unzip -qq -o sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip -d ${SCANNER_PATH}
|
||||
fi
|
||||
|
||||
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
|
||||
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/jre/bin/java
|
||||
|
||||
# Explicitly set NODE_MODULES
|
||||
export NODE_MODULES=${PWD}/ui/node_modules
|
||||
export NODE_PATH=${PWD}/ui/node_modules
|
||||
|
||||
${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
|
||||
if: env.sonar_secret != ''
|
||||
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k3s-version: [v1.29.1, v1.28.6, v1.27.10, v1.26.13, v1.25.16]
|
||||
needs:
|
||||
k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14]
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
GOPATH: /home/runner/go
|
||||
ARGOCD_FAKE_IN_CLUSTER: "true"
|
||||
@@ -422,10 +377,10 @@ jobs:
|
||||
ARGOCD_APPLICATION_NAMESPACES: "argocd-e2e-external,argocd-e2e-external-2"
|
||||
ARGOCD_SERVER: "127.0.0.1:8088"
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
@@ -472,7 +427,7 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.38.0
|
||||
docker pull ghcr.io/dexidp/dex:v2.37.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.15-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
@@ -502,31 +457,8 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
if: ${{ failure() }}
|
||||
|
||||
# workaround for status checks -- check this one job instead of each individual E2E job in the matrix
|
||||
# this allows us to skip the entire matrix when it doesn't need to run while still having accurate status checks
|
||||
# see:
|
||||
# https://github.com/argoproj/argo-workflows/pull/12006
|
||||
# https://github.com/orgs/community/discussions/9141#discussioncomment-2296809
|
||||
# https://github.com/orgs/community/discussions/26822#discussioncomment-3305794
|
||||
test-e2e-composite-result:
|
||||
name: E2E Tests - Composite result
|
||||
if: ${{ always() }}
|
||||
needs:
|
||||
- test-e2e
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- run: |
|
||||
result="${{ needs.test-e2e.result }}"
|
||||
# mark as successful even if skipped
|
||||
if [[ $result == "success" || $result == "skipped" ]]; then
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
2
.github/workflows/codeql.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
|
||||
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
|
||||
- name: Setup Golang
|
||||
|
||||
14
.github/workflows/image-reuse.yaml
vendored
@@ -58,14 +58,14 @@ jobs:
|
||||
image-digest: ${{ steps.image.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.ref_type == 'tag'}}
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.quay_username }}
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
if: ${{ inputs.quay_image_name && inputs.push }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.ghcr_username }}
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
if: ${{ inputs.ghcr_image_name && inputs.push }}
|
||||
|
||||
- name: Login to dockerhub Container Registry
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
username: ${{ secrets.docker_username }}
|
||||
password: ${{ secrets.docker_password }}
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
@@ -143,7 +143,7 @@ jobs:
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 #v5.3.0
|
||||
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 #v5.1.0
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ inputs.platforms }}
|
||||
|
||||
10
.github/workflows/image.yaml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
image-tag: ${{ steps.image.outputs.tag}}
|
||||
platforms: ${{ steps.platforms.outputs.platforms }}
|
||||
steps:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
|
||||
- name: Set image tag for ghcr
|
||||
run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.22
|
||||
go-version: 1.21
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:latest
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.22
|
||||
go-version: 1.21
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.10.0
|
||||
with:
|
||||
image: ghcr.io/argoproj/argo-cd/argocd
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
|
||||
4
.github/workflows/init-release.yaml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
git stash pop
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@9153d834b60caba6d51c9b9510b087acf9f33f83 # v6.0.4
|
||||
uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
2
.github/workflows/pr-title-check.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
name: Validate PR Title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@1d8cd483a2b73118406a187f54dca8a9415f1375 # v1.4.2
|
||||
- uses: thehanimo/pr-title-checker@0cf5902181e78341bb97bb06646396e5bd354b3f # v1.4.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
|
||||
20
.github/workflows/release.yaml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.22' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.21' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.22
|
||||
go-version: 1.21
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.10.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
@@ -128,7 +128,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
@@ -147,7 +147,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -212,7 +212,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
@@ -230,7 +230,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -295,7 +295,7 @@ jobs:
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
- name: Create PR to update VERSION on master branch
|
||||
uses: peter-evans/create-pull-request@9153d834b60caba6d51c9b9510b087acf9f33f83 # v6.0.4
|
||||
uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2
|
||||
with:
|
||||
commit-message: Bump version in master
|
||||
title: "chore: Bump version in master"
|
||||
|
||||
8
.github/workflows/scorecard.yaml
vendored
@@ -30,12 +30,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
|
||||
uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # v2.2.0
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@@ -62,6 +62,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@83a02f7883b12e0e4e1a146174f5e2292a01e601 # v2.16.4
|
||||
uses: github/codeql-action/upload-sarif@3ebbd71c74ef574dbc558c82f70e52732c8b44fe # v2.2.1
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
2
.github/workflows/update-snyk.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
|
||||
1
.gitignore
vendored
@@ -19,7 +19,6 @@ node_modules/
|
||||
./test/cmp/*.sock
|
||||
.envrc.remote
|
||||
.*.swp
|
||||
rerunreport.txt
|
||||
|
||||
# ignore built binaries
|
||||
cmd/argocd/argocd
|
||||
|
||||
@@ -2,10 +2,9 @@
|
||||
** @argoproj/argocd-approvers
|
||||
|
||||
# Docs
|
||||
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/README.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/mkdocs.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/mkdocs.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
|
||||
# CI
|
||||
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
|
||||
10
Dockerfile
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fca
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.22.1@sha256:0b55ab82ac2a54a6f8f85ec8b943b9e470c39e32c109b766bbc1b801f3fa8d3b AS builder
|
||||
FROM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS builder
|
||||
|
||||
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -28,7 +28,7 @@ WORKDIR /tmp
|
||||
COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
|
||||
RUN ./install.sh helm && \
|
||||
RUN ./install.sh helm-linux && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
|
||||
####################################################################################################
|
||||
@@ -51,7 +51,7 @@ RUN groupadd -g $ARGOCD_USER_ID argocd && \
|
||||
apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install -y \
|
||||
git git-lfs tini gpg tzdata connect-proxy && \
|
||||
git git-lfs tini gpg tzdata && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
@@ -83,7 +83,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:21.6.2@sha256:65998e325b06014d4f1417a8a6afb1540d1ac66521cca76f2221a6953947f9ee AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:20.6.1@sha256:14bd39208dbc0eb171cbfb26ccb9ac09fa1b2eba04ccd528ab5d12983fd9ee24 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.22.1@sha256:0b55ab82ac2a54a6f8f85ec8b943b9e470c39e32c109b766bbc1b801f3fa8d3b AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
100
Makefile
@@ -3,7 +3,6 @@ CURRENT_DIR=$(shell pwd)
|
||||
DIST_DIR=${CURRENT_DIR}/dist
|
||||
CLI_NAME=argocd
|
||||
BIN_NAME=argocd
|
||||
CGO_FLAG=0
|
||||
|
||||
GEN_RESOURCES_CLI_NAME=argocd-resources-gen
|
||||
|
||||
@@ -23,21 +22,14 @@ KUBECTL_VERSION=$(shell go list -m k8s.io/client-go | head -n 1 | rev | cut -d'
|
||||
GOPATH?=$(shell if test -x `which go`; then go env GOPATH; else echo "$(HOME)/go"; fi)
|
||||
GOCACHE?=$(HOME)/.cache/go-build
|
||||
|
||||
# Docker command to use
|
||||
DOCKER?=docker
|
||||
ifeq ($(DOCKER),podman)
|
||||
PODMAN_ARGS=--userns keep-id
|
||||
else
|
||||
PODMAN_ARGS=
|
||||
endif
|
||||
|
||||
DOCKER_SRCDIR?=$(GOPATH)/src
|
||||
DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
ARGOCD_PROCFILE?=Procfile
|
||||
|
||||
# pointing to python 3.7 to match https://github.com/argoproj/argo-cd/blob/master/.readthedocs.yml
|
||||
MKDOCS_DOCKER_IMAGE?=python:3.7-alpine
|
||||
# Strict mode has been disabled in latest versions of mkdocs-material.
|
||||
# Thus pointing to the older image of mkdocs-material matching the version used by argo-cd.
|
||||
MKDOCS_DOCKER_IMAGE?=squidfunk/mkdocs-material:4.1.1
|
||||
MKDOCS_RUN_ARGS?=
|
||||
|
||||
# Configuration for building argocd-test-tools image
|
||||
@@ -84,7 +76,7 @@ SUDO?=
|
||||
# Runs any command in the argocd-test-utils container in server mode
|
||||
# Server mode container will start with uid 0 and drop privileges during runtime
|
||||
define run-in-test-server
|
||||
$(SUDO) $(DOCKER) run --rm -it \
|
||||
$(SUDO) docker run --rm -it \
|
||||
--name argocd-test-server \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-e USER_ID=$(CONTAINER_UID) \
|
||||
@@ -109,14 +101,13 @@ define run-in-test-server
|
||||
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
|
||||
-p 4000:4000 \
|
||||
-p 5000:5000 \
|
||||
$(PODMAN_ARGS) \
|
||||
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
|
||||
bash -c "$(1)"
|
||||
endef
|
||||
|
||||
# Runs any command in the argocd-test-utils container in client mode
|
||||
define run-in-test-client
|
||||
$(SUDO) $(DOCKER) run --rm -it \
|
||||
$(SUDO) docker run --rm -it \
|
||||
--name argocd-test-client \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-e HOME=/home/user \
|
||||
@@ -131,14 +122,13 @@ define run-in-test-client
|
||||
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
|
||||
-v /tmp:/tmp${VOLUME_MOUNT} \
|
||||
-w ${DOCKER_WORKDIR} \
|
||||
$(PODMAN_ARGS) \
|
||||
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
|
||||
bash -c "$(1)"
|
||||
endef
|
||||
|
||||
#
|
||||
define exec-in-test-server
|
||||
$(SUDO) $(DOCKER) exec -it -u $(CONTAINER_UID):$(CONTAINER_GID) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
$(SUDO) docker exec -it -u $(CONTAINER_UID):$(CONTAINER_GID) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
endef
|
||||
|
||||
PATH:=$(PATH):$(PWD)/hack
|
||||
@@ -185,21 +175,29 @@ endif
|
||||
.PHONY: all
|
||||
all: cli image
|
||||
|
||||
# We have some legacy requirements for being checked out within $GOPATH.
|
||||
# The ensure-gopath target can be used as dependency to ensure we are running
|
||||
# within these boundaries.
|
||||
.PHONY: ensure-gopath
|
||||
ensure-gopath:
|
||||
ifneq ("$(PWD)","$(LEGACY_PATH)")
|
||||
@echo "Due to legacy requirements for codegen, repository needs to be checked out within \$$GOPATH"
|
||||
@echo "Location of this repo should be '$(LEGACY_PATH)' but is '$(PWD)'"
|
||||
@exit 1
|
||||
endif
|
||||
|
||||
.PHONY: gogen
|
||||
gogen:
|
||||
gogen: ensure-gopath
|
||||
export GO111MODULE=off
|
||||
go generate ./util/argo/...
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: mod-vendor-local protogen-fast
|
||||
|
||||
.PHONY: protogen-fast
|
||||
protogen-fast:
|
||||
protogen: ensure-gopath mod-vendor-local
|
||||
export GO111MODULE=off
|
||||
./hack/generate-proto.sh
|
||||
|
||||
.PHONY: openapigen
|
||||
openapigen:
|
||||
openapigen: ensure-gopath
|
||||
export GO111MODULE=off
|
||||
./hack/update-openapi.sh
|
||||
|
||||
@@ -214,22 +212,19 @@ notification-docs:
|
||||
|
||||
|
||||
.PHONY: clientgen
|
||||
clientgen:
|
||||
clientgen: ensure-gopath
|
||||
export GO111MODULE=off
|
||||
./hack/update-codegen.sh
|
||||
|
||||
.PHONY: clidocsgen
|
||||
clidocsgen:
|
||||
clidocsgen: ensure-gopath
|
||||
go run tools/cmd-docs/main.go
|
||||
|
||||
|
||||
.PHONY: codegen-local
|
||||
codegen-local: mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
|
||||
codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
|
||||
rm -rf vendor/
|
||||
|
||||
.PHONY: codegen-local-fast
|
||||
codegen-local-fast: gogen protogen-fast clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: test-tools-image
|
||||
$(call run-in-test-client,make codegen-local)
|
||||
@@ -240,11 +235,11 @@ cli: test-tools-image
|
||||
|
||||
.PHONY: cli-local
|
||||
cli-local: clean-debug
|
||||
CGO_ENABLED=${CGO_FLAG} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
|
||||
.PHONY: gen-resources-cli-local
|
||||
gen-resources-cli-local: clean-debug
|
||||
CGO_ENABLED=${CGO_FLAG} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
|
||||
.PHONY: release-cli
|
||||
release-cli: clean-debug build-ui
|
||||
@@ -259,8 +254,8 @@ release-cli: clean-debug build-ui
|
||||
.PHONY: test-tools-image
|
||||
test-tools-image:
|
||||
ifndef SKIP_TEST_TOOLS_IMAGE
|
||||
$(SUDO) $(DOCKER) build --build-arg UID=$(CONTAINER_UID) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
$(SUDO) $(DOCKER) tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
$(SUDO) docker build --build-arg UID=$(CONTAINER_UID) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
$(SUDO) docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
endif
|
||||
|
||||
.PHONY: manifests-local
|
||||
@@ -274,25 +269,25 @@ manifests: test-tools-image
|
||||
# consolidated binary for cli, util, server, repo-server, controller
|
||||
.PHONY: argocd-all
|
||||
argocd-all: clean-debug
|
||||
CGO_ENABLED=${CGO_FLAG} GOOS=${GOOS} GOARCH=${GOARCH} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
|
||||
.PHONY: server
|
||||
server: clean-debug
|
||||
CGO_ENABLED=${CGO_FLAG} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
|
||||
|
||||
.PHONY: repo-server
|
||||
repo-server:
|
||||
CGO_ENABLED=${CGO_FLAG} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
|
||||
|
||||
.PHONY: controller
|
||||
controller:
|
||||
CGO_ENABLED=${CGO_FLAG} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
|
||||
.PHONY: build-ui
|
||||
build-ui:
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t argocd-ui --platform=$(TARGET_ARCH) --target argocd-ui .
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-ui --platform=$(TARGET_ARCH) --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
$(DOCKER) run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
|
||||
.PHONY: image
|
||||
ifeq ($(DEV_IMAGE), true)
|
||||
@@ -301,29 +296,29 @@ ifeq ($(DEV_IMAGE), true)
|
||||
# the dist directory is under .dockerignore.
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=${CGO_FLAG} GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
DOCKER_BUILDKIT=1 docker build --platform=$(TARGET_ARCH) -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
|
||||
cp Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 docker build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
|
||||
.PHONY: armimage
|
||||
armimage:
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
|
||||
|
||||
.PHONY: builder-image
|
||||
builder-image:
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) ; fi
|
||||
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) ; fi
|
||||
|
||||
.PHONY: mod-download
|
||||
mod-download: test-tools-image
|
||||
@@ -434,7 +429,7 @@ debug-test-client: test-tools-image
|
||||
# Starts e2e server in a container
|
||||
.PHONY: start-e2e
|
||||
start-e2e: test-tools-image
|
||||
$(DOCKER) version
|
||||
docker version
|
||||
mkdir -p ${GOCACHE}
|
||||
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-e2e-local)
|
||||
|
||||
@@ -481,7 +476,7 @@ clean: clean-debug
|
||||
|
||||
.PHONY: start
|
||||
start: test-tools-image
|
||||
$(DOCKER) version
|
||||
docker version
|
||||
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-local ARGOCD_START=${ARGOCD_START})
|
||||
|
||||
# Starts a local instance of ArgoCD
|
||||
@@ -531,7 +526,7 @@ build-docs-local:
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
|
||||
|
||||
.PHONY: serve-docs-local
|
||||
serve-docs-local:
|
||||
@@ -539,7 +534,8 @@ serve-docs-local:
|
||||
|
||||
.PHONY: serve-docs
|
||||
serve-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}/site:/site -w /site --entrypoint "" ${MKDOCS_DOCKER_IMAGE} python3 -m http.server --bind 0.0.0.0 8000
|
||||
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
.PHONY: verify-kube-connect
|
||||
@@ -562,7 +558,7 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
|
||||
.PHONY: install-test-tools-local
|
||||
install-test-tools-local:
|
||||
./hack/install.sh kustomize
|
||||
./hack/install.sh helm
|
||||
./hack/install.sh helm-linux
|
||||
./hack/install.sh gotestsum
|
||||
|
||||
# Installs all tools required for running codegen (Linux packages)
|
||||
@@ -591,7 +587,7 @@ list:
|
||||
|
||||
.PHONY: applicationset-controller
|
||||
applicationset-controller:
|
||||
GODEBUG="tarinsecurepath=0,zipinsecurepath=0" CGO_ENABLED=${CGO_FLAG} go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
GODEBUG="tarinsecurepath=0,zipinsecurepath=0" CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
|
||||
.PHONY: checksums
|
||||
checksums:
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
**Social:**
|
||||
[](https://twitter.com/argoproj)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://www.linkedin.com/company/argoproj/)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
@@ -86,5 +85,4 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
|
||||
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
|
||||
1. [How to create Argo CD Applications Automatically using ApplicationSet? "Automation of GitOps"](https://amralaayassen.medium.com/how-to-create-argocd-applications-automatically-using-applicationset-automation-of-the-gitops-59455eaf4f72)
|
||||
1. [Progressive Delivery with Service Mesh – Argo Rollouts with Istio](https://www.cncf.io/blog/2022/12/16/progressive-delivery-with-service-mesh-argo-rollouts-with-istio/)
|
||||
|
||||
|
||||
28
USERS.md
@@ -18,15 +18,12 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Albert Heijn](https://ah.nl/)
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
1. [Allianz Direct](https://www.allianzdirect.de/)
|
||||
1. [AlphaSense](https://www.alpha-sense.com/)
|
||||
1. [Amadeus IT Group](https://amadeus.com/)
|
||||
1. [Ambassador Labs](https://www.getambassador.io/)
|
||||
1. [Ancestry](https://www.ancestry.com/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
2. [Arturia](https://www.arturia.com)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
|
||||
1. [Autodesk](https://www.autodesk.com)
|
||||
1. [Axians ACSP](https://www.axians.fr)
|
||||
@@ -36,7 +33,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
|
||||
1. [Beat](https://thebeat.co/en/)
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
1. [Bedag Informatik AG](https://www.bedag.ch/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
@@ -48,19 +44,18 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Candis](https://www.candis.io)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX Europe](https://www.carfax.eu)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [CARFAX Europe](https://www.carfax.eu)
|
||||
1. [Carrefour Group](https://www.carrefour.com)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [CERN](https://home.cern/)
|
||||
1. [Chainnodes](https://chainnodes.org)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chainnodes](https://chainnodes.org)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Cloud Posse](https://www.cloudposse.com/)
|
||||
1. [Cloud Scale](https://cloudscaleinc.com/)
|
||||
1. [CloudGeometry](https://www.cloudgeometry.io/)
|
||||
1. [Cloudmate](https://cloudmt.co.kr/)
|
||||
1. [Cloudogu](https://cloudogu.com/)
|
||||
1. [Cobalt](https://www.cobalt.io/)
|
||||
@@ -99,7 +94,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flexport](https://www.flexport.com/)
|
||||
1. [Flip](https://flip.id)
|
||||
1. [Fly Security](https://www.flysecurity.com.br/)
|
||||
1. [Fonoa](https://www.fonoa.com/)
|
||||
1. [Fortra](https://www.fortra.com)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
@@ -118,8 +112,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [GlueOps](https://glueops.dev)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Gojek](https://www.gojek.io/)
|
||||
1. [GoTo Financial](https://gotofinancial.com/)
|
||||
1. [GoTo](https://www.goto.com/)
|
||||
1. [GoTo Financial](https://gotofinancial.com/)
|
||||
1. [Greenpass](https://www.greenpass.com.br/)
|
||||
1. [Gridfuse](https://gridfuse.com/)
|
||||
1. [Groww](https://groww.in)
|
||||
@@ -132,12 +126,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Hiya](https://hiya.com)
|
||||
1. [Honestbank](https://honestbank.com)
|
||||
1. [Hostinger](https://www.hostinger.com)
|
||||
1. [IABAI](https://www.iab.ai)
|
||||
1. [IBM](https://www.ibm.com/)
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
1. [IFS](https://www.ifs.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [IllumiDesk](https://www.illumidesk.com)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Indeed](https://indeed.com)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
@@ -183,7 +174,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Meilleurs Agents](https://www.meilleursagents.com/)
|
||||
1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/)
|
||||
1. [Mercedes-Benz.io](https://www.mercedes-benz.io/)
|
||||
1. [Metacore Games](https://metacoregames.com/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
@@ -196,7 +186,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Natura &Co](https://naturaeco.com/)
|
||||
1. [Nethopper](https://nethopper.io)
|
||||
1. [New Relic](https://newrelic.com/)
|
||||
1. [Nextbasket](https://nextbasket.com)
|
||||
1. [Nextdoor](https://nextdoor.com/)
|
||||
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
|
||||
1. [Nitro](https://gonitro.com)
|
||||
@@ -204,11 +193,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Objective](https://www.objective.com.br/)
|
||||
1. [OCCMundial](https://occ.com.mx)
|
||||
1. [Octadesk](https://octadesk.com)
|
||||
1. [Octopus Deploy](https://octopus.com)
|
||||
1. [Olfeo](https://www.olfeo.com/)
|
||||
1. [omegaUp](https://omegaUp.com)
|
||||
1. [Omni](https://omni.se/)
|
||||
1. [Oncourse Home Solutions](https://oncoursehome.com/)
|
||||
1. [openEuler](https://openeuler.org)
|
||||
1. [openGauss](https://opengauss.org/)
|
||||
1. [OpenGov](https://opengov.com)
|
||||
@@ -225,16 +212,13 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
1. [Pandosearch](https://www.pandosearch.com/en/home)
|
||||
1. [Patreon](https://www.patreon.com/)
|
||||
1. [PayIt](https://payitgov.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Percona](https://percona.com/)
|
||||
1. [PGS](https://www.pgs.com)
|
||||
1. [Pigment](https://www.gopigment.com/)
|
||||
1. [Pipedrive](https://www.pipedrive.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Pismo](https://pismo.io/)
|
||||
1. [PITS Globale Datenrettungsdienste](https://www.pitsdatenrettung.de/)
|
||||
1. [Platform9 Systems](https://platform9.com/)
|
||||
1. [Polarpoint.io](https://polarpoint.io)
|
||||
1. [PostFinance](https://github.com/postfinance)
|
||||
@@ -251,16 +235,15 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Quipper](https://www.quipper.com/)
|
||||
1. [RapidAPI](https://www.rapidapi.com/)
|
||||
1. [rebuy](https://www.rebuy.de/)
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [Redpill Linpro](https://www.redpill-linpro.com/)
|
||||
1. [Reenigne Cloud](https://reenigne.ca)
|
||||
1. [reev.com](https://www.reev.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rijkswaterstaat](https://www.rijkswaterstaat.nl/en)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Robotinfra](https://www.robotinfra.com)
|
||||
1. [Rocket.Chat](https://rocket.chat)
|
||||
1. [Rubin Observatory](https://www.lsst.org)
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Salad Technologies](https://salad.com/)
|
||||
@@ -271,7 +254,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [SCRM Lidl International Hub](https://scrm.lidl)
|
||||
1. [SEEK](https://seek.com.au)
|
||||
1. [Semgrep](https://semgrep.com)
|
||||
1. [Shield](https://shield.com)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
@@ -287,7 +269,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Splunk](https://splunk.com/)
|
||||
1. [Spores Labs](https://spores.app)
|
||||
1. [Statsig](https://statsig.com)
|
||||
1. [SternumIOT](https://sternumiot.com)
|
||||
1. [StreamNative](https://streamnative.io)
|
||||
1. [Stuart](https://stuart.com/)
|
||||
1. [Sumo Logic](https://sumologic.com/)
|
||||
@@ -301,7 +282,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [Techcombank](https://www.techcombank.com.vn/trang-chu)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [Telavita](https://www.telavita.com.br/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [The Scale Factory](https://www.scalefactory.com/)
|
||||
1. [ThousandEyes](https://www.thousandeyes.com/)
|
||||
|
||||
@@ -17,10 +17,9 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
log "github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -126,20 +125,18 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
// Log a warning if there are unrecognized generators
|
||||
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
desiredApplications, applicationSetReason, generatorsErr := r.generateApplications(logCtx, applicationSetInfo)
|
||||
if generatorsErr != nil {
|
||||
desiredApplications, applicationSetReason, err := r.generateApplications(logCtx, applicationSetInfo)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: generatorsErr.Error(),
|
||||
Message: err.Error(),
|
||||
Reason: string(applicationSetReason),
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
if len(desiredApplications) < 1 {
|
||||
return ctrl.Result{}, generatorsErr
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
parametersGenerated = true
|
||||
@@ -167,16 +164,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
currentApplications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
}
|
||||
|
||||
err = r.updateResourcesStatus(ctx, logCtx, &applicationSetInfo, currentApplications)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get update resources status for application set: %w", err)
|
||||
}
|
||||
|
||||
// appMap is a name->app collection of Applications in this ApplicationSet.
|
||||
appMap := map[string]argov1alpha1.Application{}
|
||||
// appSyncMap tracks which apps will be synced during this reconciliation.
|
||||
@@ -193,11 +180,16 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
} else if applicationSetInfo.Spec.Strategy != nil {
|
||||
// appset uses progressive sync
|
||||
for _, app := range currentApplications {
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
}
|
||||
|
||||
for _, app := range applications {
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, desiredApplications, appMap)
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
@@ -318,7 +310,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
requeueAfter := r.getMinRequeueAfter(&applicationSetInfo)
|
||||
|
||||
if len(validateErrors) == 0 && generatorsErr == nil {
|
||||
if len(validateErrors) == 0 {
|
||||
if err := r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
@@ -1358,86 +1350,6 @@ func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplica
|
||||
return -1
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, logCtx *log.Entry, appset *argov1alpha1.ApplicationSet, apps []argov1alpha1.Application) error {
|
||||
statusMap := getResourceStatusMap(appset)
|
||||
statusMap = buildResourceStatus(statusMap, apps)
|
||||
|
||||
statuses := []argov1alpha1.ResourceStatus{}
|
||||
for _, status := range statusMap {
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
appset.Status.Resources = statuses
|
||||
|
||||
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
|
||||
err := r.Client.Status().Update(ctx, appset)
|
||||
if err != nil {
|
||||
|
||||
logCtx.Errorf("unable to set application set status: %v", err)
|
||||
return fmt.Errorf("unable to set application set status: %v", err)
|
||||
}
|
||||
|
||||
if err := r.Get(ctx, namespacedName, appset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildResourceStatus(statusMap map[string]argov1alpha1.ResourceStatus, apps []argov1alpha1.Application) map[string]argov1alpha1.ResourceStatus {
|
||||
appMap := map[string]argov1alpha1.Application{}
|
||||
for _, app := range apps {
|
||||
appCopy := app
|
||||
appMap[app.Name] = app
|
||||
|
||||
gvk := app.GroupVersionKind()
|
||||
// Create status if it does not exist
|
||||
status, ok := statusMap[app.Name]
|
||||
if !ok {
|
||||
status = argov1alpha1.ResourceStatus{
|
||||
Group: gvk.Group,
|
||||
Version: gvk.Version,
|
||||
Kind: gvk.Kind,
|
||||
Name: app.Name,
|
||||
Namespace: app.Namespace,
|
||||
Status: app.Status.Sync.Status,
|
||||
Health: &appCopy.Status.Health,
|
||||
}
|
||||
}
|
||||
|
||||
status.Group = gvk.Group
|
||||
status.Version = gvk.Version
|
||||
status.Kind = gvk.Kind
|
||||
status.Name = app.Name
|
||||
status.Namespace = app.Namespace
|
||||
status.Status = app.Status.Sync.Status
|
||||
status.Health = &appCopy.Status.Health
|
||||
|
||||
statusMap[app.Name] = status
|
||||
}
|
||||
cleanupDeletedApplicationStatuses(statusMap, appMap)
|
||||
|
||||
return statusMap
|
||||
}
|
||||
|
||||
func getResourceStatusMap(appset *argov1alpha1.ApplicationSet) map[string]argov1alpha1.ResourceStatus {
|
||||
statusMap := map[string]argov1alpha1.ResourceStatus{}
|
||||
for _, status := range appset.Status.Resources {
|
||||
statusMap[status.Name] = status
|
||||
}
|
||||
return statusMap
|
||||
}
|
||||
|
||||
func cleanupDeletedApplicationStatuses(statusMap map[string]argov1alpha1.ResourceStatus, apps map[string]argov1alpha1.Application) {
|
||||
for name := range statusMap {
|
||||
if _, ok := apps[name]; !ok {
|
||||
delete(statusMap, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setApplicationSetApplicationStatus updates the ApplicatonSet's status field
|
||||
// with any new/changed Application statuses.
|
||||
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
|
||||
@@ -1615,14 +1527,10 @@ func shouldRequeueApplicationSet(appOld *argov1alpha1.Application, appNew *argov
|
||||
}
|
||||
|
||||
// the applicationset controller owns the application spec, labels, annotations, and finalizers on the applications
|
||||
// reflect.DeepEqual considers nil slices/maps not equal to empty slices/maps
|
||||
// https://pkg.go.dev/reflect#DeepEqual
|
||||
// ApplicationDestination has an unexported field so we can just use the == for comparsion
|
||||
if !cmp.Equal(appOld.Spec, appNew.Spec, cmpopts.EquateEmpty(), cmpopts.EquateComparable(argov1alpha1.ApplicationDestination{})) ||
|
||||
!cmp.Equal(appOld.ObjectMeta.GetAnnotations(), appNew.ObjectMeta.GetAnnotations(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appOld.ObjectMeta.GetLabels(), appNew.ObjectMeta.GetLabels(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appOld.ObjectMeta.GetFinalizers(), appNew.ObjectMeta.GetFinalizers(), cmpopts.EquateEmpty()) {
|
||||
|
||||
if !reflect.DeepEqual(appOld.Spec, appNew.Spec) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetAnnotations(), appNew.ObjectMeta.GetAnnotations()) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetLabels(), appNew.ObjectMeta.GetLabels()) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetFinalizers(), appNew.ObjectMeta.GetFinalizers()) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -2423,91 +2423,6 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestReconcilerCreateAppsRecoveringRenderError(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
project := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
}
|
||||
appSet := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"name": "very-good-app"}`),
|
||||
}, {
|
||||
Raw: []byte(`{"name": "bad-app"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "{{ index (splitList \"-\" .name ) 2 }}",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "https://kubernetes.default.svc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&project}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
ArgoCDNamespace: "argocd",
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: "argocd",
|
||||
Name: "name",
|
||||
},
|
||||
}
|
||||
|
||||
// Verify that on generatorsError, no error is returned, but the object is requeued
|
||||
res, err := r.Reconcile(context.Background(), req)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, res.RequeueAfter == ReconcileRequeueOnValidationError)
|
||||
|
||||
var app v1alpha1.Application
|
||||
|
||||
// make sure good app got created
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "app"}, &app)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, app.Name, "app")
|
||||
}
|
||||
|
||||
func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
@@ -6067,219 +5982,6 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateResourceStatus(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
apps []v1alpha1.Application
|
||||
expectedResources []v1alpha1.ResourceStatus
|
||||
}{
|
||||
{
|
||||
name: "handles an empty application list",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Resources: []v1alpha1.ResourceStatus{},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{},
|
||||
expectedResources: nil,
|
||||
},
|
||||
{
|
||||
name: "adds status if no existing statuses",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
},
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles an applicationset with existing and up-to-date status",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Resources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
},
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "updates an applicationset with existing and out of date status",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Resources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusProgressing,
|
||||
Message: "Progressing",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
},
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deletes an applicationset status if the application no longer exists",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Resources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
Message: "OK",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{},
|
||||
expectedResources: nil,
|
||||
},
|
||||
} {
|
||||
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
}
|
||||
|
||||
err := r.updateResourcesStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)
|
||||
|
||||
assert.Equal(t, err, nil, "expected no errors, but errors occured")
|
||||
assert.Equal(t, cc.expectedResources, cc.appSet.Status.Resources, "expected resources did not match actual")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOwnsHandler(t *testing.T) {
|
||||
// progressive syncs do not affect create, delete, or generic
|
||||
ownsHandler := getOwnsHandlerPredicates(true)
|
||||
@@ -6385,70 +6087,14 @@ func TestOwnsHandler(t *testing.T) {
|
||||
ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}},
|
||||
ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"bar": "foo"}}},
|
||||
}}, want: true},
|
||||
{name: "DifferentApplicationLabelsNil", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}},
|
||||
ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: nil}},
|
||||
}}, want: false},
|
||||
{name: "DifferentApplicationAnnotations", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"foo": "bar"}}},
|
||||
ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"bar": "foo"}}},
|
||||
}}, want: true},
|
||||
{name: "DifferentApplicationAnnotationsNil", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}},
|
||||
ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: nil}},
|
||||
}}, want: false},
|
||||
{name: "DifferentApplicationFinalizers", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{"argo"}}},
|
||||
ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{"none"}}},
|
||||
}}, want: true},
|
||||
{name: "DifferentApplicationFinalizersNil", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{}}},
|
||||
ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Finalizers: nil}},
|
||||
}}, want: false},
|
||||
{name: "ApplicationDestinationSame", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "server",
|
||||
Namespace: "ns",
|
||||
Name: "name",
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectNew: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "server",
|
||||
Namespace: "ns",
|
||||
Name: "name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: true,
|
||||
}, want: false},
|
||||
{name: "ApplicationDestinationDiff", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "server",
|
||||
Namespace: "ns",
|
||||
Name: "name",
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectNew: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "notSameServer",
|
||||
Namespace: "ns",
|
||||
Name: "name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: true,
|
||||
}, want: true},
|
||||
{name: "NotAnAppOld", args: args{e: event.UpdateEvent{
|
||||
ObjectOld: &v1alpha1.AppProject{},
|
||||
ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"bar": "foo"}}},
|
||||
|
||||
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 26 KiB |
@@ -975,25 +975,6 @@
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"name": "sourcePositions",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"name": "revisions",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -2030,43 +2011,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}/resource-tree": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationSetService"
|
||||
],
|
||||
"summary": "ResourceTree returns resource tree",
|
||||
"operationId": "ApplicationSetService_ResourceTree",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The application set namespace. Default empty is argocd control plane namespace.",
|
||||
"name": "appsetNamespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetTree"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/certificates": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -5720,10 +5664,6 @@
|
||||
"type": "string",
|
||||
"title": "ClusterName contains AWS cluster name"
|
||||
},
|
||||
"profile": {
|
||||
"description": "Profile contains optional role ARN. If set then AWS IAM Authenticator uses the profile to perform cluster operations instead of the default AWS credential provider chain.",
|
||||
"type": "string"
|
||||
},
|
||||
"roleARN": {
|
||||
"description": "RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.",
|
||||
"type": "string"
|
||||
@@ -6225,13 +6165,6 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetCondition"
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"description": "Resources is a list of Applications resources managed by this application set.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceStatus"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -6303,19 +6236,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetTree": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSetTree holds nodes which belongs to the application\nUsed to build a tree of an ApplicationSet and its children",
|
||||
"properties": {
|
||||
"nodes": {
|
||||
"type": "array",
|
||||
"title": "Nodes contains list of nodes which are directly managed by the applicationset",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceNode"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSource": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSource contains all required information about the source of an application",
|
||||
@@ -6500,10 +6420,6 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"labelWithoutSelector": {
|
||||
"type": "boolean",
|
||||
"title": "LabelWithoutSelector specifies whether to apply common labels to resource selectors or not"
|
||||
},
|
||||
"namePrefix": {
|
||||
"type": "string",
|
||||
"title": "NamePrefix is a prefix appended to resources for Kustomize apps"
|
||||
@@ -8584,9 +8500,6 @@
|
||||
"format": "int64",
|
||||
"title": "ID is an auto incrementing identifier of the RevisionHistory"
|
||||
},
|
||||
"initiatedBy": {
|
||||
"$ref": "#/definitions/v1alpha1OperationInitiator"
|
||||
},
|
||||
"revision": {
|
||||
"type": "string",
|
||||
"title": "Revision holds the revision the sync was performed against"
|
||||
|
||||
@@ -233,10 +233,8 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff-enabled", env.ParseBoolFromEnv(common.EnvServerSideDiff, false), "Feature flag to enable ServerSide diff. Default (\"false\")")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
redisClient = client
|
||||
},
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -37,14 +37,13 @@ func newAWSCommand() *cobra.Command {
|
||||
var (
|
||||
clusterName string
|
||||
roleARN string
|
||||
profile string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "aws",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
presignedURLString, err := getSignedRequestWithRetry(ctx, time.Minute, 5*time.Second, clusterName, roleARN, profile, getSignedRequest)
|
||||
presignedURLString, err := getSignedRequestWithRetry(ctx, time.Minute, 5*time.Second, clusterName, roleARN, getSignedRequest)
|
||||
errors.CheckError(err)
|
||||
token := v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))
|
||||
// Set token expiration to 1 minute before the presigned URL expires for some cushion
|
||||
@@ -54,17 +53,16 @@ func newAWSCommand() *cobra.Command {
|
||||
}
|
||||
command.Flags().StringVar(&clusterName, "cluster-name", "", "AWS Cluster name")
|
||||
command.Flags().StringVar(&roleARN, "role-arn", "", "AWS Role ARN")
|
||||
command.Flags().StringVar(&profile, "profile", "", "AWS Profile")
|
||||
return command
|
||||
}
|
||||
|
||||
type getSignedRequestFunc func(clusterName, roleARN string, profile string) (string, error)
|
||||
type getSignedRequestFunc func(clusterName, roleARN string) (string, error)
|
||||
|
||||
func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, profile string, fn getSignedRequestFunc) (string, error) {
|
||||
func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, fn getSignedRequestFunc) (string, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
for {
|
||||
signed, err := fn(clusterName, roleARN, profile)
|
||||
signed, err := fn(clusterName, roleARN)
|
||||
if err == nil {
|
||||
return signed, nil
|
||||
}
|
||||
@@ -76,10 +74,8 @@ func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Durat
|
||||
}
|
||||
}
|
||||
|
||||
func getSignedRequest(clusterName, roleARN string, profile string) (string, error) {
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Profile: profile,
|
||||
})
|
||||
func getSignedRequest(clusterName, roleARN string) (string, error) {
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating new AWS session: %s", err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
// when
|
||||
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)
|
||||
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
@@ -41,7 +41,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
// when
|
||||
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)
|
||||
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
@@ -57,7 +57,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
// when
|
||||
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)
|
||||
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
@@ -70,7 +70,7 @@ type signedRequestMock struct {
|
||||
returnFunc func(m *signedRequestMock) (string, error)
|
||||
}
|
||||
|
||||
func (m *signedRequestMock) getSignedRequestMock(clusterName, roleARN string, profile string) (string, error) {
|
||||
func (m *signedRequestMock) getSignedRequestMock(clusterName, roleARN string) (string, error) {
|
||||
m.getSignedRequestCalls++
|
||||
return m.returnFunc(m)
|
||||
}
|
||||
|
||||
@@ -216,10 +216,8 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&helmRegistryMaxIndexSize, "helm-registry-max-index-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_INDEX_SIZE", "1G"), "Maximum size of registry index file")
|
||||
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
redisClient = client
|
||||
},
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -19,10 +19,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
|
||||
"github.com/argoproj/argo-cd/v2/server"
|
||||
servercache "github.com/argoproj/argo-cd/v2/server/cache"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/dex"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
@@ -68,7 +66,6 @@ func NewCommand() *cobra.Command {
|
||||
enableGZip bool
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
cacheSrc func() (*servercache.Cache, error)
|
||||
repoServerCacheSrc func() (*reposervercache.Cache, error)
|
||||
frameOptions string
|
||||
contentSecurityPolicy string
|
||||
repoServerPlaintext bool
|
||||
@@ -110,8 +107,6 @@ func NewCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
cache, err := cacheSrc()
|
||||
errors.CheckError(err)
|
||||
repoServerCache, err := repoServerCacheSrc()
|
||||
errors.CheckError(err)
|
||||
|
||||
kubeclientset := kubernetes.NewForConfigOrDie(config)
|
||||
|
||||
@@ -196,7 +191,6 @@ func NewCommand() *cobra.Command {
|
||||
EnableGZip: enableGZip,
|
||||
TLSConfigCustomizer: tlsConfigCustomizer,
|
||||
Cache: cache,
|
||||
RepoServerCache: repoServerCache,
|
||||
XFrameOptions: frameOptions,
|
||||
ContentSecurityPolicy: contentSecurityPolicy,
|
||||
RedisClient: redisClient,
|
||||
@@ -269,11 +263,8 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
|
||||
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
|
||||
cacheSrc = servercache.AddCacheFlagsToCmd(command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
redisClient = client
|
||||
},
|
||||
cacheSrc = servercache.AddCacheFlagsToCmd(command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
repoServerCacheSrc = reposervercache.AddCacheFlagsToCmd(command, cacheutil.Options{FlagPrefix: "repo-server-"})
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -48,9 +48,84 @@ func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
},
|
||||
Example: `# Access the Argo CD web UI
|
||||
Example: `# List all clusters
|
||||
$ argocd admin cluster list
|
||||
|
||||
# Add a new cluster
|
||||
$ argocd admin cluster add my-cluster --name my-cluster --in-cluster-context
|
||||
|
||||
# Remove a cluster
|
||||
argocd admin cluster remove my-cluster
|
||||
|
||||
# List all projects
|
||||
$ argocd admin project list
|
||||
|
||||
# Create a new project
|
||||
$argocd admin project create my-project --src-namespace my-source-namespace --dest-namespace my-dest-namespace
|
||||
|
||||
# Update a project
|
||||
$ argocd admin project update my-project --src-namespace my-updated-source-namespace --dest-namespace my-updated-dest-namespace
|
||||
|
||||
# Delete a project
|
||||
$ argocd admin project delete my-project
|
||||
|
||||
# List all settings
|
||||
$ argocd admin settings list
|
||||
|
||||
# Get the current settings
|
||||
$ argocd admin settings get
|
||||
|
||||
# Update settings
|
||||
$ argocd admin settings update --repository.resync --value 15
|
||||
|
||||
# List all applications
|
||||
$ argocd admin app list
|
||||
|
||||
# Get application details
|
||||
$ argocd admin app get my-app
|
||||
|
||||
# Sync an application
|
||||
$ argocd admin app sync my-app
|
||||
|
||||
# Pause an application
|
||||
$ argocd admin app pause my-app
|
||||
|
||||
# Resume an application
|
||||
$ argocd admin app resume my-app
|
||||
|
||||
# List all repositories
|
||||
$ argocd admin repo list
|
||||
|
||||
# Add a repository
|
||||
$ argocd admin repo add https://github.com/argoproj/my-repo.git
|
||||
|
||||
# Remove a repository
|
||||
$ argocd admin repo remove https://github.com/argoproj/my-repo.git
|
||||
|
||||
# Import an application from a YAML file
|
||||
$ argocd admin app import -f my-app.yaml
|
||||
|
||||
# Export an application to a YAML file
|
||||
$ argocd admin app export my-app -o my-exported-app.yaml
|
||||
|
||||
# Access the Argo CD web UI
|
||||
$ argocd admin dashboard
|
||||
|
||||
# List notifications
|
||||
$ argocd admin notification list
|
||||
|
||||
# Get notification details
|
||||
$ argocd admin notification get my-notification
|
||||
|
||||
# Create a new notification
|
||||
$ argocd admin notification create my-notification -f notification-config.yaml
|
||||
|
||||
# Update a notification
|
||||
$ argocd admin notification update my-notification -f updated-notification-config.yaml
|
||||
|
||||
# Delete a notification
|
||||
$ argocd admin notification delete my-notification
|
||||
|
||||
# Reset the initial admin password
|
||||
$ argocd admin initial-password reset
|
||||
`,
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/controller"
|
||||
"github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
@@ -272,26 +271,18 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
|
||||
var result []appReconcileResult
|
||||
if refresh {
|
||||
appClientset := appclientset.NewForConfigOrDie(cfg)
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(cfg)
|
||||
if repoServerAddress == "" {
|
||||
printLine("Repo server is not provided, trying to port-forward to argocd-repo-server pod.")
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
repoServerName := clientOpts.RepoServerName
|
||||
repoServerServiceLabelSelector := common.LabelKeyComponentRepoServer + "=" + common.LabelValueComponentRepoServer
|
||||
repoServerServices, err := kubeClientset.CoreV1().Services(namespace).List(context.Background(), v1.ListOptions{LabelSelector: repoServerServiceLabelSelector})
|
||||
errors.CheckError(err)
|
||||
if len(repoServerServices.Items) > 0 {
|
||||
if repoServerServicelabel, ok := repoServerServices.Items[0].Labels[common.LabelKeyAppName]; ok && repoServerServicelabel != "" {
|
||||
repoServerName = repoServerServicelabel
|
||||
}
|
||||
}
|
||||
repoServerPodLabelSelector := common.LabelKeyAppName + "=" + repoServerName
|
||||
repoServerPodLabelSelector := common.LabelKeyAppName + "=" + clientOpts.RepoServerName
|
||||
repoServerPort, err := kubeutil.PortForward(8081, namespace, &overrides, repoServerPodLabelSelector)
|
||||
errors.CheckError(err)
|
||||
repoServerAddress = fmt.Sprintf("localhost:%d", repoServerPort)
|
||||
}
|
||||
repoServerClient := reposerverclient.NewRepoServerClientset(repoServerAddress, 60, reposerverclient.TLSConfiguration{DisableTLS: false, StrictValidation: false})
|
||||
|
||||
appClientset := appclientset.NewForConfigOrDie(cfg)
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(cfg)
|
||||
result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache, serverSideDiff, ignoreNormalizerOpts)
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
@@ -449,5 +440,5 @@ func reconcileApplications(
|
||||
}
|
||||
|
||||
func newLiveStateCache(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache {
|
||||
return cache.NewLiveStateCache(argoDB, appInformer, settingsMgr, kubeutil.NewKubectl(), server, func(managedByApp map[string]bool, ref apiv1.ObjectReference) {}, &sharding.ClusterSharding{}, argo.NewResourceTracking())
|
||||
return cache.NewLiveStateCache(argoDB, appInformer, settingsMgr, kubeutil.NewKubectl(), server, func(managedByApp map[string]bool, ref apiv1.ObjectReference) {}, nil, argo.NewResourceTracking())
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
@@ -71,7 +72,7 @@ argocd admin cluster namespaces my-cluster `,
|
||||
}
|
||||
|
||||
type ClusterWithInfo struct {
|
||||
v1alpha1.Cluster
|
||||
argoappv1.Cluster
|
||||
// Shard holds controller shard number that handles the cluster
|
||||
Shard int
|
||||
// Namespaces holds list of namespaces managed by Argo CD in the cluster
|
||||
@@ -86,12 +87,8 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusterShardingCache := sharding.NewClusterSharding(argoDB, shard, replicas, shardingAlgorithm)
|
||||
clusterShardingCache.Init(clustersList, appItems)
|
||||
clusterShardingCache.Init(clustersList)
|
||||
clusterShards := clusterShardingCache.GetDistribution()
|
||||
|
||||
var cache *appstatecache.Cache
|
||||
@@ -117,6 +114,10 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
}
|
||||
}
|
||||
|
||||
appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
apps := appItems.Items
|
||||
for i, app := range apps {
|
||||
err := argo.ValidateDestination(ctx, &app.Spec.Destination, argoDB)
|
||||
@@ -129,6 +130,12 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
|
||||
batchSize := 10
|
||||
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
|
||||
clusterSharding := &sharding.ClusterSharding{
|
||||
Shard: shard,
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
}
|
||||
for batchNum := 0; batchNum < batchesCount; batchNum++ {
|
||||
batchStart := batchSize * batchNum
|
||||
batchEnd := batchSize * (batchNum + 1)
|
||||
@@ -140,7 +147,9 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
clusterShard := 0
|
||||
cluster := batch[i]
|
||||
if replicas > 0 {
|
||||
clusterShard = clusterShards[cluster.Server]
|
||||
distributionFunction := sharding.GetDistributionFunction(clusterSharding.GetClusterAccessor(), common.DefaultShardingAlgorithm, replicas)
|
||||
distributionFunction(&cluster)
|
||||
clusterShard := clusterShards[cluster.Server]
|
||||
cluster.Shard = pointer.Int64(int64(clusterShard))
|
||||
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
|
||||
}
|
||||
@@ -617,16 +626,15 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
|
||||
errors.CheckError(err)
|
||||
kubeClientset := fake.NewSimpleClientset()
|
||||
|
||||
var awsAuthConf *v1alpha1.AWSAuthConfig
|
||||
var execProviderConf *v1alpha1.ExecProviderConfig
|
||||
var awsAuthConf *argoappv1.AWSAuthConfig
|
||||
var execProviderConf *argoappv1.ExecProviderConfig
|
||||
if clusterOpts.AwsClusterName != "" {
|
||||
awsAuthConf = &v1alpha1.AWSAuthConfig{
|
||||
awsAuthConf = &argoappv1.AWSAuthConfig{
|
||||
ClusterName: clusterOpts.AwsClusterName,
|
||||
RoleARN: clusterOpts.AwsRoleArn,
|
||||
Profile: clusterOpts.AwsProfile,
|
||||
}
|
||||
} else if clusterOpts.ExecProviderCommand != "" {
|
||||
execProviderConf = &v1alpha1.ExecProviderConfig{
|
||||
execProviderConf = &argoappv1.ExecProviderConfig{
|
||||
Command: clusterOpts.ExecProviderCommand,
|
||||
Args: clusterOpts.ExecProviderArgs,
|
||||
Env: clusterOpts.ExecProviderEnv,
|
||||
@@ -650,7 +658,7 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
|
||||
|
||||
clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, bearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap)
|
||||
if clusterOpts.InClusterEndpoint() {
|
||||
clst.Server = v1alpha1.KubernetesInternalAPIServerAddr
|
||||
clst.Server = argoappv1.KubernetesInternalAPIServerAddr
|
||||
}
|
||||
if clusterOpts.ClusterEndpoint == string(cmdutil.KubePublicEndpoint) {
|
||||
// Ignore `kube-public` cluster endpoints, since this command is intended to run without invoking any network connections.
|
||||
|
||||
@@ -1,43 +1,23 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
accountpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/account"
|
||||
applicationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
|
||||
applicationsetpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/applicationset"
|
||||
certificatepkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/certificate"
|
||||
clusterpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster"
|
||||
gpgkeypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/gpgkey"
|
||||
notificationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/notification"
|
||||
projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project"
|
||||
repocredspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repocreds"
|
||||
repositorypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repository"
|
||||
sessionpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/session"
|
||||
settingspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/settings"
|
||||
versionpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/version"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/oauth2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
k8swatch "k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
func Test_getInfos(t *testing.T) {
|
||||
@@ -422,8 +402,8 @@ func TestFormatSyncPolicy(t *testing.T) {
|
||||
|
||||
policy := formatSyncPolicy(app)
|
||||
|
||||
if policy != "Manual" {
|
||||
t.Fatalf("Incorrect policy %q, should be Manual", policy)
|
||||
if policy != "<none>" {
|
||||
t.Fatalf("Incorrect policy %q, should be <none>", policy)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -557,21 +537,18 @@ func TestPrintApplicationHistoryTable(t *testing.T) {
|
||||
ID: 1,
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
TargetRevision: "1",
|
||||
RepoURL: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
TargetRevision: "2",
|
||||
RepoURL: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
TargetRevision: "3",
|
||||
RepoURL: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -581,86 +558,7 @@ func TestPrintApplicationHistoryTable(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
expectation := "SOURCE test\nID DATE REVISION\n1 0001-01-01 00:00:00 +0000 UTC 1\n2 0001-01-01 00:00:00 +0000 UTC 2\n3 0001-01-01 00:00:00 +0000 UTC 3\n"
|
||||
|
||||
if output != expectation {
|
||||
t.Fatalf("Incorrect print operation output %q, should be %q", output, expectation)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintApplicationHistoryTableWithMultipleSources(t *testing.T) {
|
||||
histories := []v1alpha1.RevisionHistory{
|
||||
{
|
||||
ID: 0,
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
TargetRevision: "0",
|
||||
RepoURL: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
Revisions: []string{
|
||||
"1a",
|
||||
"1b",
|
||||
},
|
||||
//added Source just for testing the fuction
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
TargetRevision: "-1",
|
||||
RepoURL: "ignore",
|
||||
},
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
v1alpha1.ApplicationSource{
|
||||
RepoURL: "test-1",
|
||||
TargetRevision: "1a",
|
||||
},
|
||||
v1alpha1.ApplicationSource{
|
||||
RepoURL: "test-2",
|
||||
TargetRevision: "1b",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Revisions: []string{
|
||||
"2a",
|
||||
"2b",
|
||||
},
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
v1alpha1.ApplicationSource{
|
||||
RepoURL: "test-1",
|
||||
TargetRevision: "2a",
|
||||
},
|
||||
v1alpha1.ApplicationSource{
|
||||
RepoURL: "test-2",
|
||||
TargetRevision: "2b",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Revisions: []string{
|
||||
"3a",
|
||||
"3b",
|
||||
},
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
v1alpha1.ApplicationSource{
|
||||
RepoURL: "test-1",
|
||||
TargetRevision: "3a",
|
||||
},
|
||||
v1alpha1.ApplicationSource{
|
||||
RepoURL: "test-2",
|
||||
TargetRevision: "3b",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
output, _ := captureOutput(func() error {
|
||||
printApplicationHistoryTable(histories)
|
||||
return nil
|
||||
})
|
||||
|
||||
expectation := "SOURCE test\nID DATE REVISION\n0 0001-01-01 00:00:00 +0000 UTC 0\n\nSOURCE test-1\nID DATE REVISION\n1 0001-01-01 00:00:00 +0000 UTC 1a\n2 0001-01-01 00:00:00 +0000 UTC 2a\n3 0001-01-01 00:00:00 +0000 UTC 3a\n\nSOURCE test-2\nID DATE REVISION\n1 0001-01-01 00:00:00 +0000 UTC 1b\n2 0001-01-01 00:00:00 +0000 UTC 2b\n3 0001-01-01 00:00:00 +0000 UTC 3b\n"
|
||||
expectation := "ID DATE REVISION\n1 0001-01-01 00:00:00 +0000 UTC 1\n2 0001-01-01 00:00:00 +0000 UTC 2\n3 0001-01-01 00:00:00 +0000 UTC 3\n"
|
||||
|
||||
if output != expectation {
|
||||
t.Fatalf("Incorrect print operation output %q, should be %q", output, expectation)
|
||||
@@ -741,110 +639,11 @@ Project: default
|
||||
Server: local
|
||||
Namespace: argocd
|
||||
URL: url
|
||||
Source:
|
||||
- Repo: test
|
||||
Target: master
|
||||
Path: /test
|
||||
Helm Values: path1,path2
|
||||
Name Prefix: prefix
|
||||
SyncWindow: Sync Denied
|
||||
Assigned Windows: allow:0 0 * * *:24h,deny:0 0 * * *:24h,allow:0 0 * * *:24h
|
||||
Sync Policy: Automated (Prune)
|
||||
Sync Status: OutOfSync from master
|
||||
Health Status: Progressing (health-message)
|
||||
`
|
||||
assert.Equalf(t, expectation, output, "Incorrect print app summary output %q, should be %q", output, expectation)
|
||||
}
|
||||
|
||||
func TestPrintAppSummaryTable_MultipleSources(t *testing.T) {
|
||||
output, _ := captureOutput(func() error {
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
Automated: &v1alpha1.SyncPolicyAutomated{
|
||||
Prune: true,
|
||||
},
|
||||
},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "local", Namespace: "argocd"},
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "test",
|
||||
TargetRevision: "master",
|
||||
Path: "/test",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
ValueFiles: []string{"path1", "path2"},
|
||||
},
|
||||
Kustomize: &v1alpha1.ApplicationSourceKustomize{NamePrefix: "prefix"},
|
||||
}, {
|
||||
RepoURL: "test2",
|
||||
TargetRevision: "master2",
|
||||
Path: "/test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
},
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusProgressing,
|
||||
Message: "health-message",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
windows := &v1alpha1.SyncWindows{
|
||||
{
|
||||
Kind: "allow",
|
||||
Schedule: "0 0 * * *",
|
||||
Duration: "24h",
|
||||
Applications: []string{
|
||||
"*-prod",
|
||||
},
|
||||
ManualSync: true,
|
||||
},
|
||||
{
|
||||
Kind: "deny",
|
||||
Schedule: "0 0 * * *",
|
||||
Duration: "24h",
|
||||
Namespaces: []string{
|
||||
"default",
|
||||
},
|
||||
},
|
||||
{
|
||||
Kind: "allow",
|
||||
Schedule: "0 0 * * *",
|
||||
Duration: "24h",
|
||||
Clusters: []string{
|
||||
"in-cluster",
|
||||
"cluster1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
printAppSummaryTable(app, "url", windows)
|
||||
return nil
|
||||
})
|
||||
|
||||
expectation := `Name: argocd/test
|
||||
Project: default
|
||||
Server: local
|
||||
Namespace: argocd
|
||||
URL: url
|
||||
Sources:
|
||||
- Repo: test
|
||||
Target: master
|
||||
Path: /test
|
||||
Helm Values: path1,path2
|
||||
Name Prefix: prefix
|
||||
- Repo: test2
|
||||
Target: master2
|
||||
Path: /test2
|
||||
Repo: test
|
||||
Target: master
|
||||
Path: /test
|
||||
Helm Values: path1,path2
|
||||
Name Prefix: prefix
|
||||
SyncWindow: Sync Denied
|
||||
Assigned Windows: allow:0 0 * * *:24h,deny:0 0 * * *:24h,allow:0 0 * * *:24h
|
||||
Sync Policy: Automated (Prune)
|
||||
@@ -1007,14 +806,6 @@ func TestTargetObjects_invalid(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCheckForDeleteEvent(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
fakeClient := new(fakeAcdClient)
|
||||
|
||||
checkForDeleteEvent(ctx, fakeClient, "testApp")
|
||||
}
|
||||
|
||||
func TestPrintApplicationNames(t *testing.T) {
|
||||
output, _ := captureOutput(func() error {
|
||||
app := &v1alpha1.Application{
|
||||
@@ -1510,7 +1301,7 @@ func TestPrintApplicationTableNotWide(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
expectation := "NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS\napp-name http://localhost:8080 default prj OutOfSync Healthy Manual <none>\napp-name http://localhost:8080 default prj OutOfSync Healthy Manual <none>\n"
|
||||
expectation := "NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS\napp-name http://localhost:8080 default prj OutOfSync Healthy <none> <none>\napp-name http://localhost:8080 default prj OutOfSync Healthy <none> <none>\n"
|
||||
assert.Equal(t, output, expectation)
|
||||
}
|
||||
|
||||
@@ -1546,7 +1337,7 @@ func TestPrintApplicationTableWide(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
expectation := "NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS REPO PATH TARGET\napp-name http://localhost:8080 default prj OutOfSync Healthy Manual <none> https://github.com/argoproj/argocd-example-apps guestbook 123\napp-name http://localhost:8080 default prj OutOfSync Healthy Manual <none> https://github.com/argoproj/argocd-example-apps guestbook 123\n"
|
||||
expectation := "NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS REPO PATH TARGET\napp-name http://localhost:8080 default prj OutOfSync Healthy <none> <none> https://github.com/argoproj/argocd-example-apps guestbook 123\napp-name http://localhost:8080 default prj OutOfSync Healthy <none> <none> https://github.com/argoproj/argocd-example-apps guestbook 123\n"
|
||||
assert.Equal(t, output, expectation)
|
||||
}
|
||||
|
||||
@@ -1808,104 +1599,3 @@ func testApp(name, project string, labels map[string]string, annotations map[str
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type fakeAcdClient struct{}
|
||||
|
||||
func (c *fakeAcdClient) ClientOptions() argocdclient.ClientOptions {
|
||||
return argocdclient.ClientOptions{}
|
||||
}
|
||||
func (c *fakeAcdClient) HTTPClient() (*http.Client, error) { return nil, nil }
|
||||
func (c *fakeAcdClient) OIDCConfig(context.Context, *settingspkg.Settings) (*oauth2.Config, *oidc.Provider, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewRepoClient() (io.Closer, repositorypkg.RepositoryServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewRepoClientOrDie() (io.Closer, repositorypkg.RepositoryServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewRepoCredsClient() (io.Closer, repocredspkg.RepoCredsServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewRepoCredsClientOrDie() (io.Closer, repocredspkg.RepoCredsServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewCertClient() (io.Closer, certificatepkg.CertificateServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewCertClientOrDie() (io.Closer, certificatepkg.CertificateServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewClusterClient() (io.Closer, clusterpkg.ClusterServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewClusterClientOrDie() (io.Closer, clusterpkg.ClusterServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewGPGKeyClient() (io.Closer, gpgkeypkg.GPGKeyServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewGPGKeyClientOrDie() (io.Closer, gpgkeypkg.GPGKeyServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewApplicationClient() (io.Closer, applicationpkg.ApplicationServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewApplicationSetClient() (io.Closer, applicationsetpkg.ApplicationSetServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewApplicationClientOrDie() (io.Closer, applicationpkg.ApplicationServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewApplicationSetClientOrDie() (io.Closer, applicationsetpkg.ApplicationSetServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewNotificationClient() (io.Closer, notificationpkg.NotificationServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewNotificationClientOrDie() (io.Closer, notificationpkg.NotificationServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewSessionClient() (io.Closer, sessionpkg.SessionServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewSessionClientOrDie() (io.Closer, sessionpkg.SessionServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewSettingsClient() (io.Closer, settingspkg.SettingsServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewSettingsClientOrDie() (io.Closer, settingspkg.SettingsServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewVersionClient() (io.Closer, versionpkg.VersionServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewVersionClientOrDie() (io.Closer, versionpkg.VersionServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewProjectClient() (io.Closer, projectpkg.ProjectServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewProjectClientOrDie() (io.Closer, projectpkg.ProjectServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewAccountClient() (io.Closer, accountpkg.AccountServiceClient, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) NewAccountClientOrDie() (io.Closer, accountpkg.AccountServiceClient) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *fakeAcdClient) WatchApplicationWithRetry(ctx context.Context, appName string, revision string) chan *v1alpha1.ApplicationWatchEvent {
|
||||
appEventsCh := make(chan *v1alpha1.ApplicationWatchEvent)
|
||||
|
||||
go func() {
|
||||
modifiedEvent := new(v1alpha1.ApplicationWatchEvent)
|
||||
modifiedEvent.Type = k8swatch.Modified
|
||||
appEventsCh <- modifiedEvent
|
||||
deletedEvent := new(v1alpha1.ApplicationWatchEvent)
|
||||
deletedEvent.Type = k8swatch.Deleted
|
||||
appEventsCh <- deletedEvent
|
||||
}()
|
||||
return appEventsCh
|
||||
}
|
||||
|
||||
@@ -350,11 +350,9 @@ func printAppSetSummaryTable(appSet *arogappsetv1.ApplicationSet) {
|
||||
fmt.Printf(printOpFmtStr, "Project:", appSet.Spec.Template.Spec.GetProject())
|
||||
fmt.Printf(printOpFmtStr, "Server:", getServerForAppSet(appSet))
|
||||
fmt.Printf(printOpFmtStr, "Namespace:", appSet.Spec.Template.Spec.Destination.Namespace)
|
||||
if !appSet.Spec.Template.Spec.HasMultipleSources() {
|
||||
fmt.Println("Source:")
|
||||
} else {
|
||||
fmt.Println("Sources:")
|
||||
}
|
||||
fmt.Printf(printOpFmtStr, "Repo:", source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", source.Path)
|
||||
printAppSourceDetails(&source)
|
||||
|
||||
var (
|
||||
|
||||
@@ -180,9 +180,9 @@ func TestPrintAppSetSummaryTable(t *testing.T) {
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Source:
|
||||
- Repo:
|
||||
Target:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: <none>
|
||||
`,
|
||||
},
|
||||
@@ -193,9 +193,9 @@ SyncPolicy: <none>
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Source:
|
||||
- Repo:
|
||||
Target:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: Automated
|
||||
`,
|
||||
},
|
||||
@@ -206,9 +206,9 @@ SyncPolicy: Automated
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Source:
|
||||
- Repo:
|
||||
Target:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: Automated
|
||||
`,
|
||||
},
|
||||
|
||||
@@ -111,7 +111,6 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
awsAuthConf = &argoappv1.AWSAuthConfig{
|
||||
ClusterName: clusterOpts.AwsClusterName,
|
||||
RoleARN: clusterOpts.AwsRoleArn,
|
||||
Profile: clusterOpts.AwsProfile,
|
||||
}
|
||||
} else if clusterOpts.ExecProviderCommand != "" {
|
||||
execProviderConf = &argoappv1.ExecProviderConfig{
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
cache2 "k8s.io/client-go/tools/cache"
|
||||
@@ -79,12 +78,6 @@ func (c *forwardCacheClient) Set(item *cache.Item) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (c *forwardCacheClient) Rename(oldKey string, newKey string, expiration time.Duration) error {
|
||||
return c.doLazy(func(client cache.CacheClient) error {
|
||||
return client.Rename(oldKey, newKey, expiration)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *forwardCacheClient) Get(key string, obj interface{}) error {
|
||||
return c.doLazy(func(client cache.CacheClient) error {
|
||||
return client.Get(key, obj)
|
||||
@@ -116,7 +109,6 @@ type forwardRepoClientset struct {
|
||||
repoClientset repoapiclient.Clientset
|
||||
err error
|
||||
repoServerName string
|
||||
kubeClientset kubernetes.Interface
|
||||
}
|
||||
|
||||
func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.RepoServerServiceClient, error) {
|
||||
@@ -124,19 +116,7 @@ func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.R
|
||||
overrides := clientcmd.ConfigOverrides{
|
||||
CurrentContext: c.context,
|
||||
}
|
||||
repoServerName := c.repoServerName
|
||||
repoServererviceLabelSelector := common.LabelKeyComponentRepoServer + "=" + common.LabelValueComponentRepoServer
|
||||
repoServerServices, err := c.kubeClientset.CoreV1().Services(c.namespace).List(context.Background(), v1.ListOptions{LabelSelector: repoServererviceLabelSelector})
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return
|
||||
}
|
||||
if len(repoServerServices.Items) > 0 {
|
||||
if repoServerServicelabel, ok := repoServerServices.Items[0].Labels[common.LabelKeyAppName]; ok && repoServerServicelabel != "" {
|
||||
repoServerName = repoServerServicelabel
|
||||
}
|
||||
}
|
||||
repoServerPodLabelSelector := common.LabelKeyAppName + "=" + repoServerName
|
||||
repoServerPodLabelSelector := common.LabelKeyAppName + "=" + c.repoServerName
|
||||
repoServerPort, err := kubeutil.PortForward(8081, c.namespace, &overrides, repoServerPodLabelSelector)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
@@ -251,7 +231,7 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
KubeClientset: kubeClientset,
|
||||
Insecure: true,
|
||||
ListenHost: *address,
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr, repoServerName: clientOpts.RepoServerName, kubeClientset: kubeClientset},
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr, repoServerName: clientOpts.RepoServerName},
|
||||
EnableProxyExtension: false,
|
||||
})
|
||||
srv.Init(ctx)
|
||||
|
||||
@@ -78,8 +78,6 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command.AddCommand(NewProjectWindowsCommand(clientOpts))
|
||||
command.AddCommand(NewProjectAddOrphanedIgnoreCommand(clientOpts))
|
||||
command.AddCommand(NewProjectRemoveOrphanedIgnoreCommand(clientOpts))
|
||||
command.AddCommand(NewProjectAddSourceNamespace(clientOpts))
|
||||
command.AddCommand(NewProjectRemoveSourceNamespace(clientOpts))
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -511,88 +509,6 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
return command
|
||||
}
|
||||
|
||||
// NewProjectAddSourceNamespace returns a new instance of an `argocd proj add-source-namespace` command
|
||||
func NewProjectAddSourceNamespace(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "add-source-namespace PROJECT NAMESPACE",
|
||||
Short: "Add source namespace to the AppProject",
|
||||
Example: templates.Examples(`
|
||||
# Add Kubernetes namespace as source namespace to the AppProject where application resources are allowed to be created in.
|
||||
argocd proj add-source-namespace PROJECT NAMESPACE
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) != 2 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
projName := args[0]
|
||||
srcNamespace := args[1]
|
||||
conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
for _, item := range proj.Spec.SourceNamespaces {
|
||||
if item == "*" || item == srcNamespace {
|
||||
fmt.Printf("Source namespace '*' already allowed in project\n")
|
||||
return
|
||||
}
|
||||
}
|
||||
proj.Spec.SourceNamespaces = append(proj.Spec.SourceNamespaces, srcNamespace)
|
||||
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
// NewProjectRemoveSourceNamespace returns a new instance of an `argocd proj remove-source-namespace` command
|
||||
func NewProjectRemoveSourceNamespace(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "remove-source-namespace PROJECT NAMESPACE",
|
||||
Short: "Removes the source namespace from the AppProject",
|
||||
Example: templates.Examples(`
|
||||
# Remove source NAMESPACE in PROJECT
|
||||
argocd proj remove-source-namespace PROJECT NAMESPACE
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) != 2 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
projName := args[0]
|
||||
srcNamespace := args[1]
|
||||
conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
index := -1
|
||||
for i, item := range proj.Spec.SourceNamespaces {
|
||||
if item == srcNamespace && item != "*" {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index == -1 {
|
||||
fmt.Printf("Source namespace '%s' does not exist in project or cannot be removed\n", srcNamespace)
|
||||
} else {
|
||||
proj.Spec.SourceNamespaces = append(proj.Spec.SourceNamespaces[:index], proj.Spec.SourceNamespaces[index+1:]...)
|
||||
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func modifyResourcesList(list *[]metav1.GroupKind, add bool, listDesc string, group string, kind string) bool {
|
||||
if add {
|
||||
for _, item := range *list {
|
||||
|
||||
@@ -64,12 +64,6 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
# Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here
|
||||
argocd repo add ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa
|
||||
|
||||
# Add a Git repository via SSH using socks5 proxy with no proxy credentials
|
||||
argocd repo add ssh://git@github.com/argoproj/argocd-example-apps --ssh-private-key-path ~/id_rsa --proxy socks5://your.proxy.server.ip:1080
|
||||
|
||||
# Add a Git repository via SSH using socks5 proxy with proxy credentials
|
||||
argocd repo add ssh://git@github.com/argoproj/argocd-example-apps --ssh-private-key-path ~/id_rsa --proxy socks5://username:password@your.proxy.server.ip:1080
|
||||
|
||||
# Add a private Git repository via HTTPS using username/password and TLS client certificates:
|
||||
argocd repo add https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key
|
||||
|
||||
|
||||
@@ -75,11 +75,11 @@ func NewCommand() *cobra.Command {
|
||||
command.PersistentFlags().StringVar(&clientOpts.GRPCWebRootPath, "grpc-web-root-path", config.GetFlag("grpc-web-root-path", ""), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.")
|
||||
command.PersistentFlags().StringVar(&cmdutil.LogFormat, "logformat", config.GetFlag("logformat", "text"), "Set the logging format. One of: text|json")
|
||||
command.PersistentFlags().StringVar(&cmdutil.LogLevel, "loglevel", config.GetFlag("loglevel", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.PersistentFlags().StringSliceVarP(&clientOpts.Headers, "header", "H", config.GetStringSliceFlag("header", []string{}), "Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)")
|
||||
command.PersistentFlags().StringSliceVarP(&clientOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.PortForward, "port-forward", config.GetBoolFlag("port-forward"), "Connect to a random argocd-server port using port forwarding")
|
||||
command.PersistentFlags().StringVar(&clientOpts.PortForwardNamespace, "port-forward-namespace", config.GetFlag("port-forward-namespace", ""), "Namespace name which should be used for port forwarding")
|
||||
command.PersistentFlags().IntVar(&clientOpts.HttpRetryMax, "http-retry-max", config.GetIntFlag("http-retry-max", 0), "Maximum number of retries to establish http connection to Argo CD server")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.Core, "core", config.GetBoolFlag("core"), "If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server")
|
||||
command.PersistentFlags().IntVar(&clientOpts.HttpRetryMax, "http-retry-max", 0, "Maximum number of retries to establish http connection to Argo CD server")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.Core, "core", false, "If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server")
|
||||
command.PersistentFlags().StringVar(&clientOpts.ServerName, "server-name", env.StringFromEnv(common.EnvServerName, common.DefaultServerName), fmt.Sprintf("Name of the Argo CD API server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvServerName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.AppControllerName, "controller-name", env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName), fmt.Sprintf("Name of the Argo CD Application controller; set this or the %s environment variable when the controller's name label differs from the default, for example when installing via the Helm chart", common.EnvAppControllerName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisHaProxyName, "redis-haproxy-name", env.StringFromEnv(common.EnvRedisHaProxyName, common.DefaultRedisHaProxyName), fmt.Sprintf("Name of the Redis HA Proxy; set this or the %s environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisHaProxyName))
|
||||
|
||||
255
cmd/util/app.go
@@ -68,7 +68,6 @@ type AppOptions struct {
|
||||
kustomizeVersion string
|
||||
kustomizeCommonLabels []string
|
||||
kustomizeCommonAnnotations []string
|
||||
kustomizeLabelWithoutSelector bool
|
||||
kustomizeForceCommonLabels bool
|
||||
kustomizeForceCommonAnnotations bool
|
||||
kustomizeNamespace string
|
||||
@@ -80,7 +79,6 @@ type AppOptions struct {
|
||||
retryBackoffDuration time.Duration
|
||||
retryBackoffMaxDuration time.Duration
|
||||
retryBackoffFactor int64
|
||||
ref string
|
||||
}
|
||||
|
||||
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
@@ -105,7 +103,7 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringArrayVar(&opts.helmSetFiles, "helm-set-file", []string{}, "Helm set values from respective files specified via the command line (can be repeated to set several values: --helm-set-file key1=path1 --helm-set-file key2=path2)")
|
||||
command.Flags().BoolVar(&opts.helmSkipCrds, "helm-skip-crds", false, "Skip helm crd installation step")
|
||||
command.Flags().StringVar(&opts.project, "project", "", "Application project name")
|
||||
command.Flags().StringVar(&opts.syncPolicy, "sync-policy", "", "Set the sync policy (one of: manual (aliases of manual: none), automated (aliases of automated: auto, automatic))")
|
||||
command.Flags().StringVar(&opts.syncPolicy, "sync-policy", "", "Set the sync policy (one of: none, automated (aliases of automated: auto, automatic))")
|
||||
command.Flags().StringArrayVar(&opts.syncOptions, "sync-option", []string{}, "Add or remove a sync option, e.g add `Prune=false`. Remove using `!` prefix, e.g. `!Prune=false`")
|
||||
command.Flags().BoolVar(&opts.autoPrune, "auto-prune", false, "Set automatic pruning when sync is automated")
|
||||
command.Flags().BoolVar(&opts.selfHeal, "self-heal", false, "Set self healing when sync is automated")
|
||||
@@ -126,7 +124,6 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().BoolVar(&opts.Validate, "validate", true, "Validation of repo and cluster")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeCommonLabels, "kustomize-common-label", []string{}, "Set common labels in Kustomize")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeCommonAnnotations, "kustomize-common-annotation", []string{}, "Set common labels in Kustomize")
|
||||
command.Flags().BoolVar(&opts.kustomizeLabelWithoutSelector, "kustomize-label-without-selector", false, "Do not apply common label to selectors or templates")
|
||||
command.Flags().BoolVar(&opts.kustomizeForceCommonLabels, "kustomize-force-common-label", false, "Force common labels in Kustomize")
|
||||
command.Flags().BoolVar(&opts.kustomizeForceCommonAnnotations, "kustomize-force-common-annotation", false, "Force common annotations in Kustomize")
|
||||
command.Flags().StringVar(&opts.kustomizeNamespace, "kustomize-namespace", "", "Kustomize namespace")
|
||||
@@ -136,37 +133,81 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().DurationVar(&opts.retryBackoffDuration, "sync-retry-backoff-duration", argoappv1.DefaultSyncRetryDuration, "Sync retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)")
|
||||
command.Flags().DurationVar(&opts.retryBackoffMaxDuration, "sync-retry-backoff-max-duration", argoappv1.DefaultSyncRetryMaxDuration, "Max sync retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)")
|
||||
command.Flags().Int64Var(&opts.retryBackoffFactor, "sync-retry-backoff-factor", argoappv1.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed sync retry")
|
||||
command.Flags().StringVar(&opts.ref, "ref", "", "Ref is reference to another source within sources field")
|
||||
}
|
||||
|
||||
func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, appOpts *AppOptions, index int) int {
|
||||
func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, appOpts *AppOptions) int {
|
||||
visited := 0
|
||||
if flags == nil {
|
||||
return visited
|
||||
}
|
||||
source := spec.GetSourcePtr(index)
|
||||
if source == nil {
|
||||
source = &argoappv1.ApplicationSource{}
|
||||
}
|
||||
source, visited = ConstructSource(source, *appOpts, flags)
|
||||
if spec.HasMultipleSources() {
|
||||
if index == 0 {
|
||||
spec.Sources[index] = *source
|
||||
} else if index > 0 {
|
||||
spec.Sources[index-1] = *source
|
||||
} else {
|
||||
spec.Sources = append(spec.Sources, *source)
|
||||
}
|
||||
} else {
|
||||
spec.Source = source
|
||||
}
|
||||
flags.Visit(func(f *pflag.Flag) {
|
||||
visited++
|
||||
|
||||
source := spec.GetSourcePtr()
|
||||
if source == nil {
|
||||
source = &argoappv1.ApplicationSource{}
|
||||
}
|
||||
switch f.Name {
|
||||
case "repo":
|
||||
source.RepoURL = appOpts.repoURL
|
||||
case "path":
|
||||
source.Path = appOpts.appPath
|
||||
case "helm-chart":
|
||||
source.Chart = appOpts.chart
|
||||
case "revision":
|
||||
source.TargetRevision = appOpts.revision
|
||||
case "revision-history-limit":
|
||||
i := int64(appOpts.revisionHistoryLimit)
|
||||
spec.RevisionHistoryLimit = &i
|
||||
case "values":
|
||||
setHelmOpt(source, helmOpts{valueFiles: appOpts.valuesFiles})
|
||||
case "ignore-missing-value-files":
|
||||
setHelmOpt(source, helmOpts{ignoreMissingValueFiles: appOpts.ignoreMissingValueFiles})
|
||||
case "values-literal-file":
|
||||
var data []byte
|
||||
|
||||
// read uri
|
||||
parsedURL, err := url.ParseRequestURI(appOpts.values)
|
||||
if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") {
|
||||
data, err = os.ReadFile(appOpts.values)
|
||||
} else {
|
||||
data, err = config.ReadRemoteFile(appOpts.values)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
setHelmOpt(source, helmOpts{values: string(data)})
|
||||
case "release-name":
|
||||
setHelmOpt(source, helmOpts{releaseName: appOpts.releaseName})
|
||||
case "helm-version":
|
||||
setHelmOpt(source, helmOpts{version: appOpts.helmVersion})
|
||||
case "helm-pass-credentials":
|
||||
setHelmOpt(source, helmOpts{passCredentials: appOpts.helmPassCredentials})
|
||||
case "helm-set":
|
||||
setHelmOpt(source, helmOpts{helmSets: appOpts.helmSets})
|
||||
case "helm-set-string":
|
||||
setHelmOpt(source, helmOpts{helmSetStrings: appOpts.helmSetStrings})
|
||||
case "helm-set-file":
|
||||
setHelmOpt(source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
case "helm-skip-crds":
|
||||
setHelmOpt(source, helmOpts{skipCrds: appOpts.helmSkipCrds})
|
||||
case "directory-recurse":
|
||||
if source.Directory != nil {
|
||||
source.Directory.Recurse = appOpts.directoryRecurse
|
||||
} else {
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
}
|
||||
case "directory-exclude":
|
||||
if source.Directory != nil {
|
||||
source.Directory.Exclude = appOpts.directoryExclude
|
||||
} else {
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude}
|
||||
}
|
||||
case "directory-include":
|
||||
if source.Directory != nil {
|
||||
source.Directory.Include = appOpts.directoryInclude
|
||||
} else {
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude}
|
||||
}
|
||||
case "config-management-plugin":
|
||||
source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin}
|
||||
case "dest-name":
|
||||
spec.Destination.Name = appOpts.destName
|
||||
case "dest-server":
|
||||
@@ -175,9 +216,45 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
spec.Destination.Namespace = appOpts.destNamespace
|
||||
case "project":
|
||||
spec.Project = appOpts.project
|
||||
case "nameprefix":
|
||||
setKustomizeOpt(source, kustomizeOpts{namePrefix: appOpts.namePrefix})
|
||||
case "namesuffix":
|
||||
setKustomizeOpt(source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
case "kustomize-image":
|
||||
setKustomizeOpt(source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
case "kustomize-replica":
|
||||
setKustomizeOpt(source, kustomizeOpts{replicas: appOpts.kustomizeReplicas})
|
||||
case "kustomize-version":
|
||||
setKustomizeOpt(source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
case "kustomize-namespace":
|
||||
setKustomizeOpt(source, kustomizeOpts{namespace: appOpts.kustomizeNamespace})
|
||||
case "kustomize-common-label":
|
||||
parsedLabels, err := label.Parse(appOpts.kustomizeCommonLabels)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(source, kustomizeOpts{commonLabels: parsedLabels})
|
||||
case "kustomize-common-annotation":
|
||||
parsedAnnotations, err := label.Parse(appOpts.kustomizeCommonAnnotations)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(source, kustomizeOpts{commonAnnotations: parsedAnnotations})
|
||||
case "kustomize-force-common-label":
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonLabels: appOpts.kustomizeForceCommonLabels})
|
||||
case "kustomize-force-common-annotation":
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonAnnotations: appOpts.kustomizeForceCommonAnnotations})
|
||||
case "jsonnet-tla-str":
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaStr, false)
|
||||
case "jsonnet-tla-code":
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaCode, true)
|
||||
case "jsonnet-ext-var-str":
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarStr, false)
|
||||
case "jsonnet-ext-var-code":
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarCode, true)
|
||||
case "jsonnet-libs":
|
||||
setJsonnetOptLibs(source, appOpts.jsonnetLibs)
|
||||
case "plugin-env":
|
||||
setPluginOptEnvs(source, appOpts.pluginEnvs)
|
||||
case "sync-policy":
|
||||
switch appOpts.syncPolicy {
|
||||
case "none", "manual":
|
||||
case "none":
|
||||
if spec.SyncPolicy != nil {
|
||||
spec.SyncPolicy.Automated = nil
|
||||
}
|
||||
@@ -231,6 +308,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
log.Fatalf("Invalid sync-retry-limit [%d]", appOpts.retryLimit)
|
||||
}
|
||||
}
|
||||
spec.Source = source
|
||||
})
|
||||
if flags.Changed("auto-prune") {
|
||||
if spec.SyncPolicy == nil || spec.SyncPolicy.Automated == nil {
|
||||
@@ -262,7 +340,6 @@ type kustomizeOpts struct {
|
||||
version string
|
||||
commonLabels map[string]string
|
||||
commonAnnotations map[string]string
|
||||
labelWithoutSelector bool
|
||||
forceCommonLabels bool
|
||||
forceCommonAnnotations bool
|
||||
namespace string
|
||||
@@ -290,9 +367,6 @@ func setKustomizeOpt(src *argoappv1.ApplicationSource, opts kustomizeOpts) {
|
||||
if opts.commonAnnotations != nil {
|
||||
src.Kustomize.CommonAnnotations = opts.commonAnnotations
|
||||
}
|
||||
if opts.labelWithoutSelector {
|
||||
src.Kustomize.LabelWithoutSelector = opts.labelWithoutSelector
|
||||
}
|
||||
if opts.forceCommonLabels {
|
||||
src.Kustomize.ForceCommonLabels = opts.forceCommonLabels
|
||||
}
|
||||
@@ -424,11 +498,11 @@ func setJsonnetOptLibs(src *argoappv1.ApplicationSource, libs []string) {
|
||||
// SetParameterOverrides updates an existing or appends a new parameter override in the application
|
||||
// The app is assumed to be a helm app and is expected to be in the form:
|
||||
// param=value
|
||||
func SetParameterOverrides(app *argoappv1.Application, parameters []string, index int) {
|
||||
func SetParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
if len(parameters) == 0 {
|
||||
return
|
||||
}
|
||||
source := app.Spec.GetSourcePtr(index)
|
||||
source := app.Spec.GetSource()
|
||||
var sourceType argoappv1.ApplicationSourceType
|
||||
if st, _ := source.ExplicitType(); st != nil {
|
||||
sourceType = *st
|
||||
@@ -540,8 +614,8 @@ func constructAppsBaseOnName(appName string, labels, annotations, args []string,
|
||||
Source: &argoappv1.ApplicationSource{},
|
||||
},
|
||||
}
|
||||
SetAppSpecOptions(flags, &app.Spec, &appOpts, 0)
|
||||
SetParameterOverrides(app, appOpts.Parameters, 0)
|
||||
SetAppSpecOptions(flags, &app.Spec, &appOpts)
|
||||
SetParameterOverrides(app, appOpts.Parameters)
|
||||
mergeLabels(app, labels)
|
||||
setAnnotations(app, annotations)
|
||||
return []*argoappv1.Application{
|
||||
@@ -566,15 +640,10 @@ func constructAppsFromFileUrl(fileURL, appName string, labels, annotations, args
|
||||
if app.Name == "" {
|
||||
return nil, fmt.Errorf("app.Name is empty. --name argument can be used to provide app.Name")
|
||||
}
|
||||
|
||||
SetAppSpecOptions(flags, &app.Spec, &appOpts)
|
||||
SetParameterOverrides(app, appOpts.Parameters)
|
||||
mergeLabels(app, labels)
|
||||
setAnnotations(app, annotations)
|
||||
|
||||
// do not allow overrides for applications with multiple sources
|
||||
if !app.Spec.HasMultipleSources() {
|
||||
SetAppSpecOptions(flags, &app.Spec, &appOpts, 0)
|
||||
SetParameterOverrides(app, appOpts.Parameters, 0)
|
||||
}
|
||||
}
|
||||
return apps, nil
|
||||
}
|
||||
@@ -585,117 +654,9 @@ func ConstructApps(fileURL, appName string, labels, annotations, args []string,
|
||||
} else if fileURL != "" {
|
||||
return constructAppsFromFileUrl(fileURL, appName, labels, annotations, args, appOpts, flags)
|
||||
}
|
||||
|
||||
return constructAppsBaseOnName(appName, labels, annotations, args, appOpts, flags)
|
||||
}
|
||||
|
||||
func ConstructSource(source *argoappv1.ApplicationSource, appOpts AppOptions, flags *pflag.FlagSet) (*argoappv1.ApplicationSource, int) {
|
||||
visited := 0
|
||||
flags.Visit(func(f *pflag.Flag) {
|
||||
visited++
|
||||
switch f.Name {
|
||||
case "repo":
|
||||
source.RepoURL = appOpts.repoURL
|
||||
case "path":
|
||||
source.Path = appOpts.appPath
|
||||
case "helm-chart":
|
||||
source.Chart = appOpts.chart
|
||||
case "revision":
|
||||
source.TargetRevision = appOpts.revision
|
||||
case "values":
|
||||
setHelmOpt(source, helmOpts{valueFiles: appOpts.valuesFiles})
|
||||
case "ignore-missing-value-files":
|
||||
setHelmOpt(source, helmOpts{ignoreMissingValueFiles: appOpts.ignoreMissingValueFiles})
|
||||
case "values-literal-file":
|
||||
var data []byte
|
||||
// read uri
|
||||
parsedURL, err := url.ParseRequestURI(appOpts.values)
|
||||
if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") {
|
||||
data, err = os.ReadFile(appOpts.values)
|
||||
} else {
|
||||
data, err = config.ReadRemoteFile(appOpts.values)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
setHelmOpt(source, helmOpts{values: string(data)})
|
||||
case "release-name":
|
||||
setHelmOpt(source, helmOpts{releaseName: appOpts.releaseName})
|
||||
case "helm-version":
|
||||
setHelmOpt(source, helmOpts{version: appOpts.helmVersion})
|
||||
case "helm-pass-credentials":
|
||||
setHelmOpt(source, helmOpts{passCredentials: appOpts.helmPassCredentials})
|
||||
case "helm-set":
|
||||
setHelmOpt(source, helmOpts{helmSets: appOpts.helmSets})
|
||||
case "helm-set-string":
|
||||
setHelmOpt(source, helmOpts{helmSetStrings: appOpts.helmSetStrings})
|
||||
case "helm-set-file":
|
||||
setHelmOpt(source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
case "helm-skip-crds":
|
||||
setHelmOpt(source, helmOpts{skipCrds: appOpts.helmSkipCrds})
|
||||
case "directory-recurse":
|
||||
if source.Directory != nil {
|
||||
source.Directory.Recurse = appOpts.directoryRecurse
|
||||
} else {
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
}
|
||||
case "directory-exclude":
|
||||
if source.Directory != nil {
|
||||
source.Directory.Exclude = appOpts.directoryExclude
|
||||
} else {
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude}
|
||||
}
|
||||
case "directory-include":
|
||||
if source.Directory != nil {
|
||||
source.Directory.Include = appOpts.directoryInclude
|
||||
} else {
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude}
|
||||
}
|
||||
case "config-management-plugin":
|
||||
source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin}
|
||||
case "nameprefix":
|
||||
setKustomizeOpt(source, kustomizeOpts{namePrefix: appOpts.namePrefix})
|
||||
case "namesuffix":
|
||||
setKustomizeOpt(source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
case "kustomize-image":
|
||||
setKustomizeOpt(source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
case "kustomize-replica":
|
||||
setKustomizeOpt(source, kustomizeOpts{replicas: appOpts.kustomizeReplicas})
|
||||
case "kustomize-version":
|
||||
setKustomizeOpt(source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
case "kustomize-namespace":
|
||||
setKustomizeOpt(source, kustomizeOpts{namespace: appOpts.kustomizeNamespace})
|
||||
case "kustomize-common-label":
|
||||
parsedLabels, err := label.Parse(appOpts.kustomizeCommonLabels)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(source, kustomizeOpts{commonLabels: parsedLabels})
|
||||
case "kustomize-common-annotation":
|
||||
parsedAnnotations, err := label.Parse(appOpts.kustomizeCommonAnnotations)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(source, kustomizeOpts{commonAnnotations: parsedAnnotations})
|
||||
case "kustomize-label-without-selector":
|
||||
setKustomizeOpt(source, kustomizeOpts{labelWithoutSelector: appOpts.kustomizeLabelWithoutSelector})
|
||||
case "kustomize-force-common-label":
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonLabels: appOpts.kustomizeForceCommonLabels})
|
||||
case "kustomize-force-common-annotation":
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonAnnotations: appOpts.kustomizeForceCommonAnnotations})
|
||||
case "jsonnet-tla-str":
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaStr, false)
|
||||
case "jsonnet-tla-code":
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaCode, true)
|
||||
case "jsonnet-ext-var-str":
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarStr, false)
|
||||
case "jsonnet-ext-var-code":
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarCode, true)
|
||||
case "jsonnet-libs":
|
||||
setJsonnetOptLibs(source, appOpts.jsonnetLibs)
|
||||
case "plugin-env":
|
||||
setPluginOptEnvs(source, appOpts.pluginEnvs)
|
||||
case "ref":
|
||||
source.Ref = appOpts.ref
|
||||
}
|
||||
})
|
||||
return source, visited
|
||||
}
|
||||
|
||||
func mergeLabels(app *argoappv1.Application, labels []string) {
|
||||
mapLabels, err := label.Parse(labels)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -123,11 +123,6 @@ func Test_setKustomizeOpt(t *testing.T) {
|
||||
setKustomizeOpt(&src, kustomizeOpts{commonAnnotations: map[string]string{"foo1": "bar1", "foo2": "bar2"}})
|
||||
assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{CommonAnnotations: map[string]string{"foo1": "bar1", "foo2": "bar2"}}, src.Kustomize)
|
||||
})
|
||||
t.Run("Label Without Selector", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setKustomizeOpt(&src, kustomizeOpts{commonLabels: map[string]string{"foo1": "bar1", "foo2": "bar2"}, labelWithoutSelector: true})
|
||||
assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{CommonLabels: map[string]string{"foo1": "bar1", "foo2": "bar2"}, LabelWithoutSelector: true}, src.Kustomize)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_setJsonnetOpt(t *testing.T) {
|
||||
@@ -170,16 +165,7 @@ func (f *appOptionsFixture) SetFlag(key, value string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = SetAppSpecOptions(f.command.Flags(), f.spec, f.options, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *appOptionsFixture) SetFlagWithSourcePosition(key, value string, sourcePosition int) error {
|
||||
err := f.command.Flags().Set(key, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = SetAppSpecOptions(f.command.Flags(), f.spec, f.options, sourcePosition)
|
||||
_ = SetAppSpecOptions(f.command.Flags(), f.spec, f.options)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -234,54 +220,6 @@ func Test_setAppSpecOptions(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func newMultiSourceAppOptionsFixture() *appOptionsFixture {
|
||||
fixture := &appOptionsFixture{
|
||||
spec: &v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
v1alpha1.ApplicationSource{},
|
||||
v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
command: &cobra.Command{},
|
||||
options: &AppOptions{},
|
||||
}
|
||||
AddAppFlags(fixture.command, fixture.options)
|
||||
return fixture
|
||||
}
|
||||
|
||||
func Test_setAppSpecOptionsMultiSourceApp(t *testing.T) {
|
||||
f := newMultiSourceAppOptionsFixture()
|
||||
sourcePosition := 0
|
||||
sourcePosition1 := 1
|
||||
sourcePosition2 := 2
|
||||
t.Run("SyncPolicy", func(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlagWithSourcePosition("sync-policy", "automated", sourcePosition1))
|
||||
assert.NotNil(t, f.spec.SyncPolicy.Automated)
|
||||
|
||||
f.spec.SyncPolicy = nil
|
||||
assert.NoError(t, f.SetFlagWithSourcePosition("sync-policy", "automatic", sourcePosition1))
|
||||
assert.NotNil(t, f.spec.SyncPolicy.Automated)
|
||||
})
|
||||
t.Run("Helm - SourcePosition 0", func(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlagWithSourcePosition("helm-version", "v2", sourcePosition))
|
||||
assert.Equal(t, len(f.spec.GetSources()), 2)
|
||||
assert.Equal(t, f.spec.GetSources()[sourcePosition].Helm.Version, "v2")
|
||||
})
|
||||
t.Run("Kustomize", func(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlagWithSourcePosition("kustomize-replica", "my-deployment=2", sourcePosition1))
|
||||
assert.Equal(t, f.spec.Sources[sourcePosition1-1].Kustomize.Replicas, v1alpha1.KustomizeReplicas{{Name: "my-deployment", Count: intstr.FromInt(2)}})
|
||||
assert.NoError(t, f.SetFlagWithSourcePosition("kustomize-replica", "my-deployment=4", sourcePosition2))
|
||||
assert.Equal(t, f.spec.Sources[sourcePosition2-1].Kustomize.Replicas, v1alpha1.KustomizeReplicas{{Name: "my-deployment", Count: intstr.FromInt(4)}})
|
||||
})
|
||||
t.Run("Helm", func(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlagWithSourcePosition("helm-version", "v2", sourcePosition1))
|
||||
assert.NoError(t, f.SetFlagWithSourcePosition("helm-version", "v3", sourcePosition2))
|
||||
assert.Equal(t, len(f.spec.GetSources()), 2)
|
||||
assert.Equal(t, f.spec.GetSources()[sourcePosition1-1].Helm.Version, "v2")
|
||||
assert.Equal(t, f.spec.GetSources()[sourcePosition2-1].Helm.Version, "v3")
|
||||
})
|
||||
}
|
||||
|
||||
func Test_setAnnotations(t *testing.T) {
|
||||
t.Run("Annotations", func(t *testing.T) {
|
||||
app := v1alpha1.Application{}
|
||||
|
||||
@@ -144,7 +144,6 @@ type ClusterOptions struct {
|
||||
Upsert bool
|
||||
ServiceAccount string
|
||||
AwsRoleArn string
|
||||
AwsProfile string
|
||||
AwsClusterName string
|
||||
SystemNamespace string
|
||||
Namespaces []string
|
||||
@@ -170,7 +169,6 @@ func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) {
|
||||
command.Flags().BoolVar(&opts.InCluster, "in-cluster", false, "Indicates Argo CD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
|
||||
command.Flags().StringVar(&opts.AwsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws cli eks token command will be used to access cluster")
|
||||
command.Flags().StringVar(&opts.AwsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assumes a role to perform cluster operations instead of the default AWS credential provider chain.")
|
||||
command.Flags().StringVar(&opts.AwsProfile, "aws-profile", "", "Optional AWS profile. If set then AWS IAM Authenticator uses this profile to perform cluster operations instead of the default AWS credential provider chain.")
|
||||
command.Flags().StringArrayVar(&opts.Namespaces, "namespace", nil, "List of namespaces which are allowed to manage")
|
||||
command.Flags().BoolVar(&opts.ClusterResources, "cluster-resources", false, "Indicates if cluster level resources should be managed. The setting is used only if list of managed namespaces is not empty.")
|
||||
command.Flags().StringVar(&opts.Name, "name", "", "Overwrite the cluster name")
|
||||
|
||||
@@ -2,9 +2,6 @@ package apiclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
@@ -17,9 +14,9 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// MaxGRPCMessageSize contains max grpc message size
|
||||
MaxGRPCMessageSize = env.ParseNumFromEnv(common.EnvGRPCMaxSizeMB, 100, 0, math.MaxInt32) * 1024 * 1024
|
||||
MaxGRPCMessageSize = 100 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Clientset represents config management plugin server api clients
|
||||
|
||||
@@ -369,7 +369,7 @@ func TestRunCommandEmptyCommand(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "Command is empty")
|
||||
}
|
||||
|
||||
// TestRunCommandContextTimeoutWithCleanup makes sure that the process is given enough time to cleanup before sending SIGKILL.
|
||||
// TestRunCommandContextTimeoutWithGracefulTermination makes sure that the process is given enough time to cleanup before sending SIGKILL.
|
||||
func TestRunCommandContextTimeoutWithCleanup(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
@@ -149,14 +149,10 @@ const (
|
||||
LabelKeyAppInstance = "app.kubernetes.io/instance"
|
||||
// LabelKeyAppName is the label key to use to uniquely identify the name of the Kubernetes application
|
||||
LabelKeyAppName = "app.kubernetes.io/name"
|
||||
// LabelKeyAutoLabelClusterInfo if set to true will automatically add extra labels from the cluster info (currently it only adds a k8s version label)
|
||||
LabelKeyAutoLabelClusterInfo = "argocd.argoproj.io/auto-label-cluster-info"
|
||||
// LabelKeyLegacyApplicationName is the legacy label (v0.10 and below) and is superseded by 'app.kubernetes.io/instance'
|
||||
LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name"
|
||||
// LabelKeySecretType contains the type of argocd secret (currently: 'cluster', 'repository', 'repo-config' or 'repo-creds')
|
||||
LabelKeySecretType = "argocd.argoproj.io/secret-type"
|
||||
// LabelKeyClusterKubernetesVersion contains the kubernetes version of the cluster secret if it has been enabled
|
||||
LabelKeyClusterKubernetesVersion = "argocd.argoproj.io/kubernetes-version"
|
||||
// LabelValueSecretTypeCluster indicates a secret type of cluster
|
||||
LabelValueSecretTypeCluster = "cluster"
|
||||
// LabelValueSecretTypeRepository indicates a secret type of repository
|
||||
@@ -188,10 +184,6 @@ const (
|
||||
// AnnotationKeyAppSkipReconcile tells the Application to skip the Application controller reconcile.
|
||||
// Skip reconcile when the value is "true" or any other string values that can be strconv.ParseBool() to be true.
|
||||
AnnotationKeyAppSkipReconcile = "argocd.argoproj.io/skip-reconcile"
|
||||
// LabelKeyComponentRepoServer is the label key to identify the component as repo-server
|
||||
LabelKeyComponentRepoServer = "app.kubernetes.io/component"
|
||||
// LabelValueComponentRepoServer is the label value for the repo-server component
|
||||
LabelValueComponentRepoServer = "repo-server"
|
||||
)
|
||||
|
||||
// Environment variables for tuning and debugging Argo CD
|
||||
@@ -246,8 +238,6 @@ const (
|
||||
EnvLogFormat = "ARGOCD_LOG_FORMAT"
|
||||
// EnvLogLevel log level that is defined by `--loglevel` option
|
||||
EnvLogLevel = "ARGOCD_LOG_LEVEL"
|
||||
// EnvLogFormatEnableFullTimestamp enables the FullTimestamp option in logs
|
||||
EnvLogFormatEnableFullTimestamp = "ARGOCD_LOG_FORMAT_ENABLE_FULL_TIMESTAMP"
|
||||
// EnvMaxCookieNumber max number of chunks a cookie can be broken into
|
||||
EnvMaxCookieNumber = "ARGOCD_MAX_COOKIE_NUMBER"
|
||||
// EnvPluginSockFilePath allows to override the pluginSockFilePath for repo server and cmp server
|
||||
@@ -273,8 +263,6 @@ const (
|
||||
// EnvServerSideDiff defines the env var used to enable ServerSide Diff feature.
|
||||
// If defined, value must be "true" or "false".
|
||||
EnvServerSideDiff = "ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF"
|
||||
// EnvGRPCMaxSizeMB is the environment variable to look for a max GRPC message size
|
||||
EnvGRPCMaxSizeMB = "ARGOCD_GRPC_MAX_SIZE_MB"
|
||||
)
|
||||
|
||||
// Config Management Plugin related constants
|
||||
|
||||
@@ -48,6 +48,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argov1alpha "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions/application/v1alpha1"
|
||||
applisters "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
@@ -514,13 +515,13 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal live state of managed resources: %w", err)
|
||||
}
|
||||
var target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(managedResource.TargetState), &target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal target state of managed resources: %w", err)
|
||||
}
|
||||
|
||||
if live == nil {
|
||||
var target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(managedResource.TargetState), &target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal target state of managed resources: %w", err)
|
||||
}
|
||||
nodes = append(nodes, appv1.ResourceNode{
|
||||
ResourceRef: appv1.ResourceRef{
|
||||
Version: target.GroupVersionKind().Version,
|
||||
@@ -800,13 +801,7 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
if err != nil {
|
||||
log.Warnf("Cannot init sharding. Error while querying clusters list from database: %v", err)
|
||||
} else {
|
||||
appItems, err := ctrl.getAppList(metav1.ListOptions{})
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("Cannot init sharding. Error while querying application list from database: %v", err)
|
||||
} else {
|
||||
ctrl.clusterSharding.Init(clusters, appItems)
|
||||
}
|
||||
ctrl.clusterSharding.Init(clusters)
|
||||
}
|
||||
|
||||
errors.CheckError(ctrl.stateCache.Init())
|
||||
@@ -1060,7 +1055,7 @@ func (ctrl *ApplicationController) getPermittedAppLiveObjects(app *appv1.Applica
|
||||
return objsMap, nil
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) isValidDestination(app *appv1.Application) (bool, *appv1.Cluster) {
|
||||
func (ctrl *ApplicationController) isValidDestination(app *appv1.Application) (bool, *argov1alpha.Cluster) {
|
||||
// Validate the cluster using the Application destination's `name` field, if applicable,
|
||||
// and set the Server field, if needed.
|
||||
if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil {
|
||||
@@ -2116,10 +2111,6 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
}
|
||||
newApp, newOK := obj.(*appv1.Application)
|
||||
if err == nil && newOK {
|
||||
ctrl.clusterSharding.AddApp(newApp)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
if !ctrl.canProcessApp(new) {
|
||||
@@ -2150,7 +2141,6 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
|
||||
ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, delay)
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
ctrl.clusterSharding.UpdateApp(newApp)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
if !ctrl.canProcessApp(obj) {
|
||||
@@ -2163,10 +2153,6 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
// for deletes, we immediately add to the refresh queue
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
}
|
||||
delApp, delOK := obj.(*appv1.Application)
|
||||
if err == nil && delOK {
|
||||
ctrl.clusterSharding.DeleteApp(delApp)
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
@@ -2242,26 +2228,4 @@ func (ctrl *ApplicationController) toAppQualifiedName(appName, appNamespace stri
|
||||
return fmt.Sprintf("%s/%s", appNamespace, appName)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) getAppList(options metav1.ListOptions) (*appv1.ApplicationList, error) {
|
||||
watchNamespace := ctrl.namespace
|
||||
// If we have at least one additional namespace configured, we need to
|
||||
// watch on them all.
|
||||
if len(ctrl.applicationNamespaces) > 0 {
|
||||
watchNamespace = ""
|
||||
}
|
||||
|
||||
appList, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(watchNamespace).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newItems := []appv1.Application{}
|
||||
for _, app := range appList.Items {
|
||||
if ctrl.isAppNamespaceAllowed(&app) {
|
||||
newItems = append(newItems, app)
|
||||
}
|
||||
}
|
||||
appList.Items = newItems
|
||||
return appList, nil
|
||||
}
|
||||
|
||||
type ClusterFilterFunction func(c *appv1.Cluster, distributionFunction sharding.DistributionFunction) bool
|
||||
type ClusterFilterFunction func(c *argov1alpha.Cluster, distributionFunction sharding.DistributionFunction) bool
|
||||
|
||||
@@ -54,15 +54,14 @@ type namespacedResource struct {
|
||||
}
|
||||
|
||||
type fakeData struct {
|
||||
apps []runtime.Object
|
||||
manifestResponse *apiclient.ManifestResponse
|
||||
manifestResponses []*apiclient.ManifestResponse
|
||||
managedLiveObjs map[kube.ResourceKey]*unstructured.Unstructured
|
||||
namespacedResources map[kube.ResourceKey]namespacedResource
|
||||
configMapData map[string]string
|
||||
metricsCacheExpiration time.Duration
|
||||
applicationNamespaces []string
|
||||
updateRevisionForPathsResponse *apiclient.UpdateRevisionForPathsResponse
|
||||
apps []runtime.Object
|
||||
manifestResponse *apiclient.ManifestResponse
|
||||
manifestResponses []*apiclient.ManifestResponse
|
||||
managedLiveObjs map[kube.ResourceKey]*unstructured.Unstructured
|
||||
namespacedResources map[kube.ResourceKey]namespacedResource
|
||||
configMapData map[string]string
|
||||
metricsCacheExpiration time.Duration
|
||||
applicationNamespaces []string
|
||||
}
|
||||
|
||||
type MockKubectl struct {
|
||||
@@ -108,8 +107,6 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
}
|
||||
}
|
||||
|
||||
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(data.updateRevisionForPathsResponse, nil)
|
||||
|
||||
mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient}
|
||||
|
||||
secret := corev1.Secret{
|
||||
|
||||
9
controller/cache/cache.go
vendored
@@ -374,14 +374,9 @@ func isRetryableError(err error) bool {
|
||||
isResourceQuotaConflictErr(err) ||
|
||||
isTransientNetworkErr(err) ||
|
||||
isExceededQuotaErr(err) ||
|
||||
isHTTP2GoawayErr(err) ||
|
||||
errors.Is(err, syscall.ECONNRESET)
|
||||
}
|
||||
|
||||
func isHTTP2GoawayErr(err error) bool {
|
||||
return strings.Contains(err.Error(), "http2: server sent GOAWAY and closed the connection")
|
||||
}
|
||||
|
||||
func isExceededQuotaErr(err error) bool {
|
||||
return kerrors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota")
|
||||
}
|
||||
@@ -439,10 +434,6 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
return nil, fmt.Errorf("error getting cluster: %w", err)
|
||||
}
|
||||
|
||||
if c.clusterSharding == nil {
|
||||
return nil, fmt.Errorf("unable to handle cluster %s: cluster sharding is not configured", cluster.Server)
|
||||
}
|
||||
|
||||
if !c.canHandleCluster(cluster) {
|
||||
return nil, fmt.Errorf("controller is configured to ignore cluster %s", cluster.Server)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
@@ -102,11 +101,8 @@ func (c *clusterInfoUpdater) updateClusters() {
|
||||
}
|
||||
_ = kube.RunAllAsync(len(clustersFiltered), func(i int) error {
|
||||
cluster := clustersFiltered[i]
|
||||
clusterInfo := infoByServer[cluster.Server]
|
||||
if err := c.updateClusterInfo(ctx, cluster, clusterInfo); err != nil {
|
||||
log.Warnf("Failed to save cluster info: %v", err)
|
||||
} else if err := updateClusterLabels(ctx, clusterInfo, cluster, c.db.UpdateCluster); err != nil {
|
||||
log.Warnf("Failed to update cluster labels: %v", err)
|
||||
if err := c.updateClusterInfo(ctx, cluster, infoByServer[cluster.Server]); err != nil {
|
||||
log.Warnf("Failed to save clusters info: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -118,12 +114,6 @@ func (c *clusterInfoUpdater) updateClusterInfo(ctx context.Context, cluster appv
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while fetching the apps list: %w", err)
|
||||
}
|
||||
|
||||
updated := c.getUpdatedClusterInfo(ctx, apps, cluster, info, metav1.Now())
|
||||
return c.cache.SetClusterInfo(cluster.Server, &updated)
|
||||
}
|
||||
|
||||
func (c *clusterInfoUpdater) getUpdatedClusterInfo(ctx context.Context, apps []*appv1.Application, cluster appv1.Cluster, info *cache.ClusterInfo, now metav1.Time) appv1.ClusterInfo {
|
||||
var appCount int64
|
||||
for _, a := range apps {
|
||||
if c.projGetter != nil {
|
||||
@@ -139,6 +129,7 @@ func (c *clusterInfoUpdater) getUpdatedClusterInfo(ctx context.Context, apps []*
|
||||
appCount += 1
|
||||
}
|
||||
}
|
||||
now := metav1.Now()
|
||||
clusterInfo := appv1.ClusterInfo{
|
||||
ConnectionState: appv1.ConnectionState{ModifiedAt: &now},
|
||||
ApplicationsCount: appCount,
|
||||
@@ -165,15 +156,5 @@ func (c *clusterInfoUpdater) getUpdatedClusterInfo(ctx context.Context, apps []*
|
||||
}
|
||||
}
|
||||
|
||||
return clusterInfo
|
||||
}
|
||||
|
||||
func updateClusterLabels(ctx context.Context, clusterInfo *cache.ClusterInfo, cluster appv1.Cluster, updateCluster func(context.Context, *appv1.Cluster) (*appv1.Cluster, error)) error {
|
||||
if clusterInfo != nil && cluster.Labels[common.LabelKeyAutoLabelClusterInfo] == "true" && cluster.Labels[common.LabelKeyClusterKubernetesVersion] != clusterInfo.K8SVersion {
|
||||
cluster.Labels[common.LabelKeyClusterKubernetesVersion] = clusterInfo.K8SVersion
|
||||
_, err := updateCluster(ctx, &cluster)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.cache.SetClusterInfo(cluster.Server, &clusterInfo)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -99,92 +98,3 @@ func TestClusterSecretUpdater(t *testing.T) {
|
||||
assert.Equal(t, test.ExpectedStatus, clusterInfo.ConnectionState.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateClusterLabels(t *testing.T) {
|
||||
shouldNotBeInvoked := func(ctx context.Context, cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
|
||||
shouldNotHappen := errors.New("if an error happens here, something's wrong")
|
||||
assert.NoError(t, shouldNotHappen)
|
||||
return nil, shouldNotHappen
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
clusterInfo *clustercache.ClusterInfo
|
||||
cluster v1alpha1.Cluster
|
||||
updateCluster func(context.Context, *v1alpha1.Cluster) (*v1alpha1.Cluster, error)
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
"enableClusterInfoLabels = false",
|
||||
&clustercache.ClusterInfo{
|
||||
Server: "kubernetes.svc.local",
|
||||
K8SVersion: "1.28",
|
||||
},
|
||||
v1alpha1.Cluster{
|
||||
Server: "kubernetes.svc.local",
|
||||
Labels: nil,
|
||||
},
|
||||
shouldNotBeInvoked,
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"clusterInfo = nil",
|
||||
nil,
|
||||
v1alpha1.Cluster{
|
||||
Server: "kubernetes.svc.local",
|
||||
Labels: map[string]string{"argocd.argoproj.io/auto-label-cluster-info": "true"},
|
||||
},
|
||||
shouldNotBeInvoked,
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"clusterInfo.k8sversion == cluster k8s label",
|
||||
&clustercache.ClusterInfo{
|
||||
Server: "kubernetes.svc.local",
|
||||
K8SVersion: "1.28",
|
||||
},
|
||||
v1alpha1.Cluster{
|
||||
Server: "kubernetes.svc.local",
|
||||
Labels: map[string]string{"argocd.argoproj.io/kubernetes-version": "1.28", "argocd.argoproj.io/auto-label-cluster-info": "true"},
|
||||
},
|
||||
shouldNotBeInvoked,
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"clusterInfo.k8sversion != cluster k8s label, no error",
|
||||
&clustercache.ClusterInfo{
|
||||
Server: "kubernetes.svc.local",
|
||||
K8SVersion: "1.28",
|
||||
},
|
||||
v1alpha1.Cluster{
|
||||
Server: "kubernetes.svc.local",
|
||||
Labels: map[string]string{"argocd.argoproj.io/kubernetes-version": "1.27", "argocd.argoproj.io/auto-label-cluster-info": "true"},
|
||||
},
|
||||
func(ctx context.Context, cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
|
||||
assert.Equal(t, cluster.Labels["argocd.argoproj.io/kubernetes-version"], "1.28")
|
||||
return nil, nil
|
||||
},
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"clusterInfo.k8sversion != cluster k8s label, some error",
|
||||
&clustercache.ClusterInfo{
|
||||
Server: "kubernetes.svc.local",
|
||||
K8SVersion: "1.28",
|
||||
},
|
||||
v1alpha1.Cluster{
|
||||
Server: "kubernetes.svc.local",
|
||||
Labels: map[string]string{"argocd.argoproj.io/kubernetes-version": "1.27", "argocd.argoproj.io/auto-label-cluster-info": "true"},
|
||||
},
|
||||
func(ctx context.Context, cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
|
||||
assert.Equal(t, cluster.Labels["argocd.argoproj.io/kubernetes-version"], "1.28")
|
||||
return nil, errors.New("some error happened while saving")
|
||||
},
|
||||
assert.Error,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.wantErr(t, updateClusterLabels(context.Background(), tt.clusterInfo, tt.cluster, tt.updateCluster), fmt.Sprintf("updateClusterLabels(%v, %v, %v)", context.Background(), tt.clusterInfo, tt.cluster))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,7 +374,7 @@ func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
|
||||
}
|
||||
}
|
||||
|
||||
// assertMetricsNotPrinted
|
||||
// assertMetricNotPrinted
|
||||
func assertMetricsNotPrinted(t *testing.T, expectedLines, body string) {
|
||||
for _, line := range strings.Split(expectedLines, "\n") {
|
||||
if line == "" {
|
||||
|
||||
@@ -9,16 +9,12 @@ import (
|
||||
)
|
||||
|
||||
type ClusterShardingCache interface {
|
||||
Init(clusters *v1alpha1.ClusterList, apps *v1alpha1.ApplicationList)
|
||||
Init(clusters *v1alpha1.ClusterList)
|
||||
Add(c *v1alpha1.Cluster)
|
||||
Delete(clusterServer string)
|
||||
Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster)
|
||||
AddApp(a *v1alpha1.Application)
|
||||
DeleteApp(a *v1alpha1.Application)
|
||||
UpdateApp(a *v1alpha1.Application)
|
||||
IsManagedCluster(c *v1alpha1.Cluster) bool
|
||||
GetDistribution() map[string]int
|
||||
GetAppDistribution() map[string]int
|
||||
}
|
||||
|
||||
type ClusterSharding struct {
|
||||
@@ -26,7 +22,6 @@ type ClusterSharding struct {
|
||||
Replicas int
|
||||
Shards map[string]int
|
||||
Clusters map[string]*v1alpha1.Cluster
|
||||
Apps map[string]*v1alpha1.Application
|
||||
lock sync.RWMutex
|
||||
getClusterShard DistributionFunction
|
||||
}
|
||||
@@ -38,12 +33,11 @@ func NewClusterSharding(_ db.ArgoDB, shard, replicas int, shardingAlgorithm stri
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
Apps: make(map[string]*v1alpha1.Application),
|
||||
}
|
||||
distributionFunction := NoShardingDistributionFunction()
|
||||
if replicas > 1 {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
distributionFunction = GetDistributionFunction(clusterSharding.getClusterAccessor(), clusterSharding.getAppAccessor(), shardingAlgorithm, replicas)
|
||||
distributionFunction = GetDistributionFunction(clusterSharding.GetClusterAccessor(), shardingAlgorithm, replicas)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
@@ -68,7 +62,7 @@ func (s *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
|
||||
return clusterShard == s.Shard
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList, apps *v1alpha1.ApplicationList) {
|
||||
func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
newClusters := make(map[string]*v1alpha1.Cluster, len(clusters.Items))
|
||||
@@ -77,13 +71,6 @@ func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList, apps *v1al
|
||||
newClusters[c.Server] = &cluster
|
||||
}
|
||||
sharding.Clusters = newClusters
|
||||
|
||||
newApps := make(map[string]*v1alpha1.Application, len(apps.Items))
|
||||
for i := range apps.Items {
|
||||
app := apps.Items[i]
|
||||
newApps[app.Name] = &app
|
||||
}
|
||||
sharding.Apps = newApps
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
|
||||
@@ -186,8 +173,7 @@ func hasShardingUpdates(old, new *v1alpha1.Cluster) bool {
|
||||
return old.Shard == nil || new.Shard == nil || int64(*old.Shard) != int64(*new.Shard)
|
||||
}
|
||||
|
||||
// A read lock should be acquired before calling getClusterAccessor.
|
||||
func (d *ClusterSharding) getClusterAccessor() clusterAccessor {
|
||||
func (d *ClusterSharding) GetClusterAccessor() clusterAccessor {
|
||||
return func() []*v1alpha1.Cluster {
|
||||
// no need to lock, as this is only called from the updateDistribution function
|
||||
clusters := make([]*v1alpha1.Cluster, 0, len(d.Clusters))
|
||||
@@ -197,68 +183,3 @@ func (d *ClusterSharding) getClusterAccessor() clusterAccessor {
|
||||
return clusters
|
||||
}
|
||||
}
|
||||
|
||||
// A read lock should be acquired before calling getAppAccessor.
|
||||
func (d *ClusterSharding) getAppAccessor() appAccessor {
|
||||
return func() []*v1alpha1.Application {
|
||||
apps := make([]*v1alpha1.Application, 0, len(d.Apps))
|
||||
for _, a := range d.Apps {
|
||||
apps = append(apps, a)
|
||||
}
|
||||
return apps
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) AddApp(a *v1alpha1.Application) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
_, ok := sharding.Apps[a.Name]
|
||||
sharding.Apps[a.Name] = a
|
||||
if !ok {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. App already added")
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) DeleteApp(a *v1alpha1.Application) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
if _, ok := sharding.Apps[a.Name]; ok {
|
||||
delete(sharding.Apps, a.Name)
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) UpdateApp(a *v1alpha1.Application) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
_, ok := sharding.Apps[a.Name]
|
||||
sharding.Apps[a.Name] = a
|
||||
if !ok {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. No relevant changes")
|
||||
}
|
||||
}
|
||||
|
||||
// GetAppDistribution should be not be called from a DestributionFunction because
|
||||
// it could cause a deadlock when updateDistribution is called.
|
||||
func (sharding *ClusterSharding) GetAppDistribution() map[string]int {
|
||||
sharding.lock.RLock()
|
||||
clusters := sharding.Clusters
|
||||
apps := sharding.Apps
|
||||
sharding.lock.RUnlock()
|
||||
|
||||
appDistribution := make(map[string]int, len(clusters))
|
||||
|
||||
for _, a := range apps {
|
||||
if _, ok := appDistribution[a.Spec.Destination.Server]; !ok {
|
||||
appDistribution[a.Spec.Destination.Server] = 0
|
||||
}
|
||||
appDistribution[a.Spec.Destination.Server]++
|
||||
}
|
||||
return appDistribution
|
||||
}
|
||||
|
||||
@@ -139,12 +139,6 @@ func TestClusterSharding_Delete(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1alpha1.ApplicationList{
|
||||
Items: []v1alpha1.Application{
|
||||
createApp("app2", "https://127.0.0.1:6443"),
|
||||
createApp("app1", "https://kubernetes.default.svc"),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
sharding.Delete("https://kubernetes.default.svc")
|
||||
@@ -170,12 +164,6 @@ func TestClusterSharding_Update(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1alpha1.ApplicationList{
|
||||
Items: []v1alpha1.Application{
|
||||
createApp("app2", "https://127.0.0.1:6443"),
|
||||
createApp("app1", "https://kubernetes.default.svc"),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
@@ -219,12 +207,6 @@ func TestClusterSharding_UpdateServerName(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1alpha1.ApplicationList{
|
||||
Items: []v1alpha1.Application{
|
||||
createApp("app2", "https://127.0.0.1:6443"),
|
||||
createApp("app1", "https://kubernetes.default.svc"),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
@@ -269,12 +251,6 @@ func TestClusterSharding_IsManagedCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1alpha1.ApplicationList{
|
||||
Items: []v1alpha1.Application{
|
||||
createApp("app2", "https://127.0.0.1:6443"),
|
||||
createApp("app1", "https://kubernetes.default.svc"),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.True(t, sharding0.IsManagedCluster(&v1alpha1.Cluster{
|
||||
@@ -302,12 +278,6 @@ func TestClusterSharding_IsManagedCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1alpha1.ApplicationList{
|
||||
Items: []v1alpha1.Application{
|
||||
createApp("app2", "https://127.0.0.1:6443"),
|
||||
createApp("app1", "https://kubernetes.default.svc"),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.False(t, sharding1.IsManagedCluster(&v1alpha1.Cluster{
|
||||
@@ -357,12 +327,6 @@ func TestClusterSharding_ClusterShardOfResourceShouldNotBeChanged(t *testing.T)
|
||||
*clusterWithToBigValue,
|
||||
},
|
||||
},
|
||||
&v1alpha1.ApplicationList{
|
||||
Items: []v1alpha1.Application{
|
||||
createApp("app2", "https://127.0.0.1:6443"),
|
||||
createApp("app1", "https://kubernetes.default.svc"),
|
||||
},
|
||||
},
|
||||
)
|
||||
distribution := sharding.GetDistribution()
|
||||
assert.Equal(t, 3, len(distribution))
|
||||
|
||||
@@ -43,7 +43,6 @@ const ShardControllerMappingKey = "shardControllerMapping"
|
||||
type DistributionFunction func(c *v1alpha1.Cluster) int
|
||||
type ClusterFilterFunction func(c *v1alpha1.Cluster) bool
|
||||
type clusterAccessor func() []*v1alpha1.Cluster
|
||||
type appAccessor func() []*v1alpha1.Application
|
||||
|
||||
// shardApplicationControllerMapping stores the mapping of Shard Number to Application Controller in ConfigMap.
|
||||
// It also stores the heartbeat of last synced time of the application controller.
|
||||
@@ -76,7 +75,7 @@ func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, r
|
||||
|
||||
// GetDistributionFunction returns which DistributionFunction should be used based on the passed algorithm and
|
||||
// the current datas.
|
||||
func GetDistributionFunction(clusters clusterAccessor, apps appAccessor, shardingAlgorithm string, replicasCount int) DistributionFunction {
|
||||
func GetDistributionFunction(clusters clusterAccessor, shardingAlgorithm string, replicasCount int) DistributionFunction {
|
||||
log.Debugf("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction(replicasCount)
|
||||
switch shardingAlgorithm {
|
||||
@@ -375,13 +374,13 @@ func GetClusterSharding(kubeClient kubernetes.Interface, settingsMgr *settings.S
|
||||
|
||||
// if app controller deployment is not found when dynamic cluster distribution is enabled error out
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(dynamic cluster distribution) failed to get app controller deployment: %v", err)
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: %v", err)
|
||||
}
|
||||
|
||||
if appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
return nil, fmt.Errorf("(dynamic cluster distribution) failed to get app controller deployment replica count")
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment replica count")
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
func TestGetShardByID_NotEmptyID(t *testing.T) {
|
||||
@@ -102,14 +101,13 @@ func TestGetClusterFilterLegacy(t *testing.T) {
|
||||
|
||||
func TestGetClusterFilterUnknown(t *testing.T) {
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
appAccessor, _, _, _, _, _ := createTestApps()
|
||||
// Test with replicas set to 0
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "unknown")
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := GetDistributionFunction(clusterAccessor, appAccessor, "unknown", replicasCount)
|
||||
distributionFunction := GetDistributionFunction(clusterAccessor, "unknown", replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -121,10 +119,9 @@ func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "5")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
appAccessor, _, _, _, _, _ := createTestApps()
|
||||
replicasCount := 5
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
filter := GetDistributionFunction(clusterAccessor, appAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
filter := GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, 0, filter(nil))
|
||||
assert.Equal(t, 4, filter(&cluster1))
|
||||
assert.Equal(t, 1, filter(&cluster2))
|
||||
@@ -134,13 +131,13 @@ func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
var fixedShard int64 = 4
|
||||
cluster5 := &v1alpha1.Cluster{ID: "5", Shard: &fixedShard}
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, appAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
cluster5.Shard = &fixedShard
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, appAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
@@ -148,11 +145,10 @@ func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "4")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
appAccessor, _, _, _, _, _ := createTestApps()
|
||||
replicasCount := 4
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
|
||||
filter := GetDistributionFunction(clusterAccessor, appAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
filter := GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, filter(nil), 0)
|
||||
assert.Equal(t, filter(&cluster1), 0)
|
||||
assert.Equal(t, filter(&cluster2), 1)
|
||||
@@ -165,14 +161,14 @@ func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
cluster5 := v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, appAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
cluster5 = v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters = []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, appAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
@@ -846,7 +842,7 @@ func TestGetClusterSharding(t *testing.T) {
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: fmt.Errorf("(dynamic cluster distribution) failed to get app controller deployment: deployments.apps \"missing-deployment\" not found"),
|
||||
expectedErr: fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: deployments.apps \"missing-deployment\" not found"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -874,81 +870,3 @@ func TestGetClusterSharding(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppAwareCache(t *testing.T) {
|
||||
_, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
_, app1, app2, app3, app4, app5 := createTestApps()
|
||||
|
||||
clusterSharding := NewClusterSharding(db, 0, 1, "legacy")
|
||||
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}}
|
||||
appList := &v1alpha1.ApplicationList{Items: []v1alpha1.Application{app1, app2, app3, app4, app5}}
|
||||
clusterSharding.Init(clusterList, appList)
|
||||
|
||||
appDistribution := clusterSharding.GetAppDistribution()
|
||||
|
||||
assert.Equal(t, 2, appDistribution["cluster1"])
|
||||
assert.Equal(t, 2, appDistribution["cluster2"])
|
||||
assert.Equal(t, 1, appDistribution["cluster3"])
|
||||
|
||||
app6 := createApp("app6", "cluster4")
|
||||
clusterSharding.AddApp(&app6)
|
||||
|
||||
app1Update := createApp("app1", "cluster2")
|
||||
clusterSharding.UpdateApp(&app1Update)
|
||||
|
||||
clusterSharding.DeleteApp(&app3)
|
||||
|
||||
appDistribution = clusterSharding.GetAppDistribution()
|
||||
|
||||
assert.Equal(t, 1, appDistribution["cluster1"])
|
||||
assert.Equal(t, 2, appDistribution["cluster2"])
|
||||
assert.Equal(t, 1, appDistribution["cluster3"])
|
||||
assert.Equal(t, 1, appDistribution["cluster4"])
|
||||
}
|
||||
|
||||
func createTestApps() (appAccessor, v1alpha1.Application, v1alpha1.Application, v1alpha1.Application, v1alpha1.Application, v1alpha1.Application) {
|
||||
app1 := createApp("app1", "cluster1")
|
||||
app2 := createApp("app2", "cluster1")
|
||||
app3 := createApp("app3", "cluster2")
|
||||
app4 := createApp("app4", "cluster2")
|
||||
app5 := createApp("app5", "cluster3")
|
||||
|
||||
apps := []v1alpha1.Application{app1, app2, app3, app4, app5}
|
||||
|
||||
return getAppAccessor(apps), app1, app2, app3, app4, app5
|
||||
}
|
||||
|
||||
func getAppAccessor(apps []v1alpha1.Application) appAccessor {
|
||||
// Convert the array to a slice of pointers
|
||||
appPointers := getAppPointers(apps)
|
||||
appAccessor := func() []*v1alpha1.Application { return appPointers }
|
||||
return appAccessor
|
||||
}
|
||||
|
||||
func getAppPointers(apps []v1alpha1.Application) []*v1alpha1.Application {
|
||||
var appPointers []*v1alpha1.Application
|
||||
for i := range apps {
|
||||
appPointers = append(appPointers, &apps[i])
|
||||
}
|
||||
return appPointers
|
||||
}
|
||||
|
||||
func createApp(name string, server string) v1alpha1.Application {
|
||||
var testApp = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: ` + name + `
|
||||
spec:
|
||||
destination:
|
||||
server: ` + server + `
|
||||
`
|
||||
|
||||
var app v1alpha1.Application
|
||||
err := yaml.Unmarshal([]byte(testApp), &app)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/app/path"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
@@ -197,38 +196,6 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
return nil, nil, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
|
||||
syncedRevision := app.Status.Sync.Revision
|
||||
if app.Spec.HasMultipleSources() {
|
||||
if i < len(app.Status.Sync.Revisions) {
|
||||
syncedRevision = app.Status.Sync.Revisions[i]
|
||||
} else {
|
||||
syncedRevision = ""
|
||||
}
|
||||
}
|
||||
|
||||
val, ok := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
|
||||
if !source.IsHelm() && syncedRevision != "" && ok && val != "" {
|
||||
// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
|
||||
_, err = repoClient.UpdateRevisionForPaths(context.Background(), &apiclient.UpdateRevisionForPathsRequest{
|
||||
Repo: repo,
|
||||
Revision: revisions[i],
|
||||
SyncedRevision: syncedRevision,
|
||||
Paths: path.GetAppRefreshPaths(app),
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
RefSources: refSources,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("version_ms")
|
||||
log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i])
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
@@ -915,16 +882,7 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *appStateManager) persistRevisionHistory(
|
||||
app *v1alpha1.Application,
|
||||
revision string,
|
||||
source v1alpha1.ApplicationSource,
|
||||
revisions []string,
|
||||
sources []v1alpha1.ApplicationSource,
|
||||
hasMultipleSources bool,
|
||||
startedAt metav1.Time,
|
||||
initiatedBy v1alpha1.OperationInitiator,
|
||||
) error {
|
||||
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, revisions []string, sources []v1alpha1.ApplicationSource, hasMultipleSources bool, startedAt metav1.Time) error {
|
||||
var nextID int64
|
||||
if len(app.Status.History) > 0 {
|
||||
nextID = app.Status.History.LastRevisionHistory().ID + 1
|
||||
@@ -937,7 +895,6 @@ func (m *appStateManager) persistRevisionHistory(
|
||||
ID: nextID,
|
||||
Sources: sources,
|
||||
Revisions: revisions,
|
||||
InitiatedBy: initiatedBy,
|
||||
})
|
||||
} else {
|
||||
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
|
||||
@@ -946,7 +903,6 @@ func (m *appStateManager) persistRevisionHistory(
|
||||
DeployStartedAt: &startedAt,
|
||||
ID: nextID,
|
||||
Source: source,
|
||||
InitiatedBy: initiatedBy,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -23,11 +23,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/testdata"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
)
|
||||
@@ -652,37 +649,6 @@ var defaultProj = argoappv1.AppProject{
|
||||
},
|
||||
}
|
||||
|
||||
// TestCompareAppStateWithManifestGeneratePath tests that it compares revisions when the manifest-generate-path annotation is set.
|
||||
func TestCompareAppStateWithManifestGeneratePath(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.SetAnnotations(map[string]string{argoappv1.AnnotationKeyManifestGeneratePaths: "."})
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Revision: "abc123",
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
}
|
||||
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
updateRevisionForPathsResponse: &apiclient.UpdateRevisionForPathsResponse{},
|
||||
}
|
||||
|
||||
ctrl := newFakeController(&data, nil)
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, false)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, "abc123", compRes.syncStatus.Revision)
|
||||
ctrl.repoClientset.(*mockrepoclient.Clientset).RepoServerServiceClient.(*mockrepoclient.RepoServerServiceClient).AssertNumberOfCalls(t, "UpdateRevisionForPaths", 1)
|
||||
}
|
||||
|
||||
func TestSetHealth(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
deployment := kube.MustToUnstructured(&v1.Deployment{
|
||||
@@ -872,7 +838,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
|
||||
app.Spec.RevisionHistoryLimit = &i
|
||||
}
|
||||
addHistory := func() {
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1.Time{}, v1alpha1.OperationInitiator{})
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1.Time{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
addHistory()
|
||||
@@ -908,7 +874,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
|
||||
assert.Len(t, app.Status.History, 9)
|
||||
|
||||
metav1NowTime := metav1.NewTime(time.Now())
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1NowTime, v1alpha1.OperationInitiator{})
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1NowTime)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, app.Status.History.LastRevisionHistory().DeployStartedAt, &metav1NowTime)
|
||||
}
|
||||
@@ -1542,17 +1508,6 @@ func TestUseDiffCache(t *testing.T) {
|
||||
expectedUseCache: true,
|
||||
serverSideDiff: false,
|
||||
},
|
||||
{
|
||||
testName: "will use diff cache with sync policy",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: test.YamlToApplication(testdata.DiffCacheYaml),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: true,
|
||||
serverSideDiff: true,
|
||||
},
|
||||
{
|
||||
testName: "will use diff cache for multisource",
|
||||
noCache: false,
|
||||
|
||||
@@ -104,7 +104,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") &&
|
||||
hasSharedResource {
|
||||
state.Phase = common.OperationFailed
|
||||
state.Message = fmt.Sprintf("Shared resource found: %s", sharedResourceMessage)
|
||||
state.Message = fmt.Sprintf("Shared resouce found: %s", sharedResourceMessage)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -162,12 +162,6 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
|
||||
return
|
||||
} else if syncWindowPreventsSync(app, proj) {
|
||||
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
|
||||
if state.Phase == common.OperationRunning {
|
||||
state.Message = "Sync operation blocked by sync window"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if app.Spec.HasMultipleSources() {
|
||||
@@ -398,7 +392,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
logEntry.WithField("duration", time.Since(start)).Info("sync/terminate complete")
|
||||
|
||||
if !syncOp.DryRun && len(syncOp.Resources) == 0 && state.Phase.Successful() {
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, app.Spec.HasMultipleSources(), state.StartedAt, state.Operation.InitiatedBy)
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, app.Spec.HasMultipleSources(), state.StartedAt)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("failed to record sync to history: %v", err)
|
||||
@@ -530,12 +524,3 @@ func delayBetweenSyncWaves(phase common.SyncPhase, wave int, finalWave bool) err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) bool {
|
||||
window := proj.Spec.SyncWindows.Matches(app)
|
||||
isManual := false
|
||||
if app.Status.OperationState != nil {
|
||||
isManual = !app.Status.OperationState.Operation.InitiatedBy.Automated
|
||||
}
|
||||
return !window.CanSync(isManual)
|
||||
}
|
||||
|
||||
@@ -255,75 +255,6 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncWindowDeniesSync(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
setup := func() *fixture {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = nil
|
||||
app.Status.History = nil
|
||||
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SyncWindows: v1alpha1.SyncWindows{{
|
||||
Kind: "deny",
|
||||
Schedule: "0 0 * * *",
|
||||
Duration: "24h",
|
||||
Clusters: []string{"*"},
|
||||
Namespaces: []string{"*"},
|
||||
Applications: []string{"*"},
|
||||
}},
|
||||
},
|
||||
}
|
||||
data := fakeData{
|
||||
apps: []runtime.Object{app, project},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("will keep the sync progressing if a sync window prevents the sync", func(t *testing.T) {
|
||||
// given a project with an active deny sync window and an operation in progress
|
||||
t.Parallel()
|
||||
f := setup()
|
||||
opMessage := "Sync operation blocked by sync window"
|
||||
|
||||
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
}},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
//then
|
||||
assert.Equal(t, common.OperationRunning, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestNormalizeTargetResources(t *testing.T) {
|
||||
type fixture struct {
|
||||
comparisonResult *comparisonResult
|
||||
|
||||
3
controller/testdata/data.go
vendored
@@ -12,9 +12,6 @@ var (
|
||||
//go:embed target-deployment-new-entries.yaml
|
||||
TargetDeploymentNewEntries string
|
||||
|
||||
//go:embed diff-cache.yaml
|
||||
DiffCacheYaml string
|
||||
|
||||
//go:embed live-httpproxy.yaml
|
||||
LiveHTTPProxy string
|
||||
|
||||
|
||||
498
controller/testdata/diff-cache.yaml
vendored
@@ -1,498 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
argocd-image-updater.argoproj.io/allow-tags: any
|
||||
argocd-image-updater.argoproj.io/ignore-tags: ""
|
||||
argocd-image-updater.argoproj.io/image-list-disabled-hack: ""
|
||||
argocd-image-updater.argoproj.io/update-strategy: semver
|
||||
argocd-image-updater.argoproj.io/write-back-method: git
|
||||
argocd-image-updater.argoproj.io/write-back-target: kustomization
|
||||
argocd-notif-onDeployed.slack-disabled: ""
|
||||
argocd-notif-onHealthDegraded.slack-disabled: ""
|
||||
argocd-notif-onSyncFailed.slack-disabled: ""
|
||||
argocd-notif-onSyncRunning.slack-disabled: ""
|
||||
argocd-notif-onSyncStatusUnknown.slack-disabled: ""
|
||||
argocd-notif-onSyncSucceeded.slack-disabled: ""
|
||||
argocd.argoproj.io/compare-options: ServerSideDiff=true
|
||||
argocd.argoproj.io/manifest-generate-paths: .;/chart
|
||||
creationTimestamp: "2024-03-04T21:30:33Z"
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
generation: 263
|
||||
labels:
|
||||
cloud_provider: gcp
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
foo: bar
|
||||
preview: "true"
|
||||
project: sre
|
||||
service_class: alpha
|
||||
stack: gke-v2
|
||||
name: velero-test
|
||||
namespace: argo-cd
|
||||
ownerReferences:
|
||||
- apiVersion: argoproj.io/v1alpha1
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: ApplicationSet
|
||||
name: velero
|
||||
uid: 86cdfba4-8697-47b3-8489-71fab7f4a805
|
||||
resourceVersion: "722811357"
|
||||
uid: 94978696-4fd4-40b3-a1de-38d9df9e9316
|
||||
spec:
|
||||
destination:
|
||||
name: gke-alpha-01-europe-west1
|
||||
namespace: test-lla
|
||||
project: sre
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
syncPolicy:
|
||||
retry:
|
||||
backoff:
|
||||
duration: 5s
|
||||
factor: 2
|
||||
maxDuration: 3m
|
||||
limit: 10
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
- ApplyOutOfSyncOnly=true
|
||||
- RespectIgnoreDifferences=false
|
||||
- ServerSideApply=true
|
||||
- Validate=true
|
||||
status:
|
||||
controllerNamespace: argo-cd
|
||||
health:
|
||||
status: Healthy
|
||||
history:
|
||||
- deployStartedAt: "2024-03-04T22:00:05Z"
|
||||
deployedAt: "2024-03-04T22:00:06Z"
|
||||
id: 14
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-04T22:08:29Z"
|
||||
deployedAt: "2024-03-04T22:08:30Z"
|
||||
id: 15
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-04T22:09:16Z"
|
||||
deployedAt: "2024-03-04T22:09:16Z"
|
||||
id: 16
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-04T22:11:41Z"
|
||||
deployedAt: "2024-03-04T22:11:41Z"
|
||||
id: 17
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-04T22:50:55Z"
|
||||
deployedAt: "2024-03-04T22:50:55Z"
|
||||
id: 18
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-04T22:52:56Z"
|
||||
deployedAt: "2024-03-04T22:52:56Z"
|
||||
id: 19
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-04T22:56:15Z"
|
||||
deployedAt: "2024-03-04T22:56:15Z"
|
||||
id: 20
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-05T07:31:56Z"
|
||||
deployedAt: "2024-03-05T07:31:57Z"
|
||||
id: 21
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-05T07:32:44Z"
|
||||
deployedAt: "2024-03-05T07:32:44Z"
|
||||
id: 22
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
- deployStartedAt: "2024-03-05T07:33:03Z"
|
||||
deployedAt: "2024-03-05T07:33:04Z"
|
||||
id: 23
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
operationState:
|
||||
finishedAt: "2024-03-05T07:33:04Z"
|
||||
message: successfully synced (all tasks run)
|
||||
operation:
|
||||
initiatedBy:
|
||||
username: laurent.lavaud@mirakl.com
|
||||
retry:
|
||||
backoff:
|
||||
duration: 5s
|
||||
factor: 2
|
||||
maxDuration: 3m
|
||||
limit: 10
|
||||
sync:
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
syncOptions:
|
||||
- ServerSideApply=true
|
||||
syncStrategy:
|
||||
hook: {}
|
||||
phase: Succeeded
|
||||
startedAt: "2024-03-05T07:33:03Z"
|
||||
syncResult:
|
||||
resources:
|
||||
- group: ""
|
||||
hookPhase: Running
|
||||
kind: Service
|
||||
message: service/test-lla serverside-applied
|
||||
name: test-lla
|
||||
namespace: test-lla
|
||||
status: Synced
|
||||
syncPhase: Sync
|
||||
version: v1
|
||||
- group: apps
|
||||
hookPhase: Running
|
||||
kind: Deployment
|
||||
message: deployment.apps/test-lla serverside-applied
|
||||
name: test-lla
|
||||
namespace: test-lla
|
||||
status: Synced
|
||||
syncPhase: Sync
|
||||
version: v1
|
||||
revision: ea8759964626a583667a2bfd08f334ec2070040a
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
reconciledAt: "2024-03-05T07:33:04Z"
|
||||
resources:
|
||||
- health:
|
||||
status: Healthy
|
||||
kind: Service
|
||||
name: test-lla
|
||||
namespace: test-lla
|
||||
status: Synced
|
||||
version: v1
|
||||
- group: apps
|
||||
health:
|
||||
status: Healthy
|
||||
kind: Deployment
|
||||
name: test-lla
|
||||
namespace: test-lla
|
||||
status: Synced
|
||||
version: v1
|
||||
sourceType: Plugin
|
||||
summary:
|
||||
images:
|
||||
- nginx:latest
|
||||
sync:
|
||||
comparedTo:
|
||||
destination:
|
||||
name: gke-alpha-01-europe-west1
|
||||
namespace: test-lla
|
||||
source:
|
||||
path: instances/test
|
||||
plugin:
|
||||
env:
|
||||
- name: RELEASE_NAME
|
||||
value: test-lla
|
||||
- name: CHART_REPOSITORY
|
||||
value: oci://europe-west1-docker.pkg.dev/platform-89be/charts
|
||||
- name: CHART_NAME
|
||||
value: velero
|
||||
- name: PREVIEW
|
||||
value: "false"
|
||||
- name: HELM_VALUES
|
||||
value: |
|
||||
global:
|
||||
app:
|
||||
cluster_name: gke-alpha-01-europe-west1
|
||||
service_class: alpha
|
||||
cloud_provider: gcp
|
||||
cluster_stack: gke-v2
|
||||
- name: HELM_ARGS
|
||||
value: ""
|
||||
name: cmp-helm-v2
|
||||
repoURL: https://github.com/mirakl/manifests-velero.git
|
||||
targetRevision: test-lla
|
||||
revision: rev1
|
||||
status: Synced
|
||||
BIN
docs/assets/api-management.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/groups-claim.png
Normal file
|
After Width: | Height: | Size: 81 KiB |
BIN
docs/assets/groups-scope.png
Normal file
|
After Width: | Height: | Size: 58 KiB |
|
Before Width: | Height: | Size: 254 KiB |
|
Before Width: | Height: | Size: 83 KiB |
|
Before Width: | Height: | Size: 224 KiB |
|
Before Width: | Height: | Size: 352 KiB |
|
Before Width: | Height: | Size: 142 KiB |
|
Before Width: | Height: | Size: 183 KiB |
@@ -37,17 +37,6 @@ sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
|
||||
rm argocd-linux-amd64
|
||||
```
|
||||
|
||||
#### Download latest stable version
|
||||
|
||||
You can download the latest stable release by executing below steps:
|
||||
|
||||
```bash
|
||||
VERSION=$(curl -L -s https://raw.githubusercontent.com/argoproj/argo-cd/stable/VERSION)
|
||||
curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/download/v$VERSION/argocd-linux-amd64
|
||||
sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
|
||||
rm argocd-linux-amd64
|
||||
```
|
||||
|
||||
You should now be able to run `argocd` commands.
|
||||
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ and the CLI functionalities.
|
||||
### Application Controller
|
||||
|
||||
The Application Controller is responsible for reconciling the
|
||||
Application resource in Kubernetes synchronizing the desired
|
||||
Application resource in Kubernetes syncronizing the desired
|
||||
application state (provided in Git) with the live state (in
|
||||
Kubernetes). The Application Controller is also responsible for
|
||||
reconciling the Project resource.
|
||||
|
||||
@@ -103,12 +103,10 @@ Design documents are usually submitted as PR and use [this template](https://git
|
||||
|
||||
Our community regularly meets virtually to discuss issues, ideas and enhancements around Argo CD. We do invite you to join this virtual meetings if you want to bring up certain things (including your enhancement proposals), participate in our triaging or just want to get to know other contributors.
|
||||
|
||||
The current cadence of our meetings is weekly, every Thursday at 8:15AM Pacific Time ([click here to check in your current timezone][1]). We use Zoom to conduct these meetings.
|
||||
The current cadence of our meetings is weekly, every Thursday at 4:15pm UTC (8:15am Pacific, 11:15am Eastern, 5:15pm Central European, 9:45pm Indian). We use Zoom to conduct these meetings.
|
||||
|
||||
* [Agenda document (Google Docs, includes Zoom link)](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
|
||||
If you want to discuss something, we kindly ask you to put your item on the
|
||||
[agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
for one of the upcoming meetings so that we can plan in the time for discussing it.
|
||||
|
||||
[1]: https://www.timebie.com/std/pacific.php?q=081500
|
||||
for one of the upcoming meetings so that we can plan in the time for discussing it.
|
||||
@@ -9,9 +9,7 @@ and the [toolchain guide](toolchain-guide.md).
|
||||
|
||||
### Install Go
|
||||
|
||||
<https://go.dev/doc/install/>
|
||||
|
||||
Install Go with a version equal to or greater than the version listed in `go.mod` (verify go version with `go version`).
|
||||
Install version 1.18 or newer (Verify version by running `go version`)
|
||||
|
||||
### Clone the Argo CD repo
|
||||
|
||||
@@ -25,29 +23,16 @@ git clone https://github.com/argoproj/argo-cd.git
|
||||
|
||||
<https://docs.docker.com/engine/install/>
|
||||
|
||||
### Install or Upgrade a Tool for Running Local Clusters (e.g. kind or minikube)
|
||||
|
||||
#### Installation guide for kind:
|
||||
### Install or Upgrade `kind` (Optional - Should work with any local cluster)
|
||||
|
||||
<https://kind.sigs.k8s.io/docs/user/quick-start/>
|
||||
|
||||
#### Installation guide for minikube:
|
||||
|
||||
<https://minikube.sigs.k8s.io/docs/start/>
|
||||
|
||||
### Start Your Local Cluster
|
||||
|
||||
For example, if you are using kind:
|
||||
```shell
|
||||
kind create cluster
|
||||
```
|
||||
|
||||
Or, if you are using minikube:
|
||||
|
||||
```shell
|
||||
minikube start
|
||||
```
|
||||
|
||||
### Install Argo CD
|
||||
|
||||
```shell
|
||||
|
||||
@@ -15,7 +15,7 @@ requests before forwarding to the backend service.
|
||||
|
||||
As proxy extension is in [Alpha][1] phase, the feature is disabled by
|
||||
default. To enable it, it is necessary to configure the feature flag
|
||||
in Argo CD command parameters. The easiest way to properly enable
|
||||
in Argo CD command parameters. The easiest way to to properly enable
|
||||
this feature flag is by adding the `server.enable.proxy.extension` key
|
||||
in the existing `argocd-cmd-params-cm`. For example:
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ These are the upcoming releases dates:
|
||||
| v2.8 | Monday, Jun. 26, 2023 | Monday, Aug. 7, 2023 | [Keith Chong](https://github.com/keithchong) | [Keith Chong](https://github.com/keithchong) | [checklist](https://github.com/argoproj/argo-cd/issues/13742) |
|
||||
| v2.9 | Monday, Sep. 18, 2023 | Monday, Nov. 6, 2023 | [Leonardo Almeida](https://github.com/leoluz) | [Leonardo Almeida](https://github.com/leoluz) | [checklist](https://github.com/argoproj/argo-cd/issues/14078) |
|
||||
| v2.10 | Monday, Dec. 18, 2023 | Monday, Feb. 5, 2024 | [Katie Lamkin](https://github.com/kmlamkin9) | | [checklist](https://github.com/argoproj/argo-cd/issues/16339) |
|
||||
| v2.11 | Friday, Apr. 5, 2024 | Monday, May 6, 2024 | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/17726) |
|
||||
| v2.11 | Monday, Mar. 18, 2024 | Monday, May 6, 2024 |
|
||||
| v2.12 | Monday, Jun. 17, 2024 | Monday, Aug. 5, 2024 |
|
||||
|
||||
Actual release dates might differ from the plan by a few days.
|
||||
@@ -71,7 +71,7 @@ that minor release. It will have to wait for the next minor release.
|
||||
|
||||
### Security Patch Policy
|
||||
|
||||
CVEs in Argo CD code will be patched for all supported versions. Read more about supported versions in the [security policy for Argo CD](https://github.com/argoproj/argo-cd/security/policy#supported-versions).
|
||||
CVEs in Argo CD code will be patched for all [supported versions](../operator-manual/installation.md#supported-versions).
|
||||
|
||||
### Dependencies Lifecycle Policy
|
||||
|
||||
|
||||
@@ -2,19 +2,20 @@
|
||||
|
||||
## Developing And Testing
|
||||
|
||||
The website is built using `mkdocs` and `mkdocs-material`.
|
||||
The website is built using `mkdocs` and `mkdocs-material`.
|
||||
|
||||
To test:
|
||||
|
||||
```bash
|
||||
make serve-docs
|
||||
```
|
||||
Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/).
|
||||
Make a change to documentation and the website will rebuild and refresh the view.
|
||||
|
||||
Before submitting a PR build the website, to verify that there are no errors building the site
|
||||
Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/).
|
||||
|
||||
## Deploying
|
||||
|
||||
```bash
|
||||
make build-docs
|
||||
make publish-docs
|
||||
```
|
||||
|
||||
## Analytics
|
||||
@@ -22,4 +23,4 @@ make build-docs
|
||||
!!! tip
|
||||
Don't forget to disable your ad-blocker when testing.
|
||||
|
||||
We collect [Google Analytics](https://analytics.google.com/analytics/web/#/report-home/a105170809w198079555p192782995).
|
||||
We collect [Google Analytics](https://analytics.google.com/analytics/web/#/report-home/a105170809w198079555p192782995).
|
||||
@@ -138,14 +138,6 @@ The following steps are required no matter whether you chose to use a virtualize
|
||||
export SUDO=sudo
|
||||
```
|
||||
|
||||
If you have podman installed, you can also leverage its rootless mode. In
|
||||
order to use podman for running and testing Argo CD locally, set the
|
||||
`DOCKER` environment variable to `podman` before you run `make`, e.g.
|
||||
|
||||
```
|
||||
DOCKER=podman make start
|
||||
```
|
||||
|
||||
### Clone the Argo CD repository from your personal fork on GitHub
|
||||
|
||||
* `mkdir -p ~/go/src/github.com/argoproj`
|
||||
@@ -312,7 +304,7 @@ For installing the tools required to build and test Argo CD on your local system
|
||||
You can change the target location by setting the `BIN` environment before running the installer scripts. For example, you can install the binaries into `~/go/bin` (which should then be the first component in your `PATH` environment, i.e. `export PATH=~/go/bin:$PATH`):
|
||||
|
||||
```shell
|
||||
BIN=~/go/bin make install-tools-local
|
||||
make BIN=~/go/bin install-tools-local
|
||||
```
|
||||
|
||||
Additionally, you have to install at least the following tools via your OS's package manager (this list might not be always up-to-date):
|
||||
|
||||
@@ -22,8 +22,12 @@ This will create a new namespace, `argocd`, where Argo CD services and applicati
|
||||
The installation manifests include `ClusterRoleBinding` resources that reference `argocd` namespace. If you are installing Argo CD into a different
|
||||
namespace then make sure to update the namespace reference.
|
||||
|
||||
!!! tip
|
||||
If you are not interested in UI, SSO, and multi-cluster features, then you can install only the [core](operator-manual/core/#installing) Argo CD components.
|
||||
If you are not interested in UI, SSO, multi-cluster features then you can install [core](operator-manual/installation.md#core) Argo CD components only:
|
||||
|
||||
```bash
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/core-install.yaml
|
||||
```
|
||||
|
||||
This default installation will have a self-signed certificate and cannot be accessed without a bit of extra work.
|
||||
Do one of:
|
||||
@@ -32,12 +36,6 @@ Do one of:
|
||||
* Configure the client OS to trust the self signed certificate.
|
||||
* Use the --insecure flag on all Argo CD CLI operations in this guide.
|
||||
|
||||
!!! note
|
||||
Default namespace for `kubectl` config must be set to `argocd`.
|
||||
This is only needed for the following commands since the previous commands have -n argocd already:
|
||||
`kubectl config set-context --current --namespace=argocd`
|
||||
|
||||
|
||||
Use `argocd login --core` to [configure](./user-guide/commands/argocd_login.md) CLI access and skip steps 3-5.
|
||||
|
||||
## 2. Download Argo CD CLI
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Applications in any namespace
|
||||
|
||||
**Current feature state**: Beta
|
||||
|
||||
!!! warning
|
||||
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
|
||||
|
||||
@@ -11,6 +13,10 @@ Argo CD administrators can define a certain set of namespaces where `Application
|
||||
|
||||
Some manual steps will need to be performed by the Argo CD administrator in order to enable this feature.
|
||||
|
||||
!!! note
|
||||
This feature is considered beta as of now. Some of the implementation details may change over the course of time until it is promoted to a stable status. We will be happy if early adopters use this feature and provide us with bug reports and feedback.
|
||||
|
||||
|
||||
One additional advantage of adopting applications in any namespace is to allow end-users to configure notifications for their Argo CD application in the namespace where Argo CD application is running in. See notifications [namespace based configuration](notifications/index.md#namespace-based-configuration) page for more information.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -119,7 +119,7 @@ spec:
|
||||
extVars:
|
||||
- name: foo
|
||||
value: bar
|
||||
# You can use "code" to determine if the value is either string (false, the default) or Jsonnet code (if code is true).
|
||||
# You can use "code to determine if the value is either string (false, the default) or Jsonnet code (if code is true).
|
||||
- code: true
|
||||
name: baz
|
||||
value: "true"
|
||||
|
||||
@@ -3,217 +3,32 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: test-hello-world-appset
|
||||
namespace: argocd
|
||||
# To preserve this annotation and label we can use the preservedFields property
|
||||
preservedFields:
|
||||
# This annotation and label exists only on this Application, and not in
|
||||
# the parent ApplicationSet template:
|
||||
# ignoreApplicationDifferences is the preferred way to accomplish this now.
|
||||
annotations:
|
||||
my-custom-annotation: some-value
|
||||
labels:
|
||||
my-custom-label: some-value
|
||||
|
||||
spec:
|
||||
# See docs for available generators and their specs.
|
||||
generators:
|
||||
|
||||
# Using a generator plugin without combining it with Matrix or Merge
|
||||
# Plugins allow you to provide your own generator
|
||||
- plugin:
|
||||
# Specify the configMap where the plugin configuration is located.
|
||||
configMapRef:
|
||||
name: my-plugin
|
||||
# You can pass arbitrary parameters to the plugin. `input.parameters` is a map, but values may be any type.
|
||||
# These parameters will also be available on the generator's output under the `generator.input.parameters` key.
|
||||
input:
|
||||
parameters:
|
||||
key1: "value1"
|
||||
key2: "value2"
|
||||
list: ["list", "of", "values"]
|
||||
boolean: true
|
||||
map:
|
||||
key1: "value1"
|
||||
key2: "value2"
|
||||
key3: "value3"
|
||||
# You can also attach arbitrary values to the generator's output under the `values` key. These values will be
|
||||
# available in templates under the `values` key.
|
||||
values:
|
||||
value1: something
|
||||
# When using a Plugin generator, the ApplicationSet controller polls every `requeueAfterSeconds` interval (defaulting to every 30 minutes) to detect changes.
|
||||
requeueAfterSeconds: 30
|
||||
|
||||
# to automatically discover repositories within an organization
|
||||
- scmProvider:
|
||||
# Which protocol to clone using.
|
||||
cloneProtocol: ssh
|
||||
# The GitHub mode uses the GitHub API to scan an organization in either github.com or GitHub Enterprise
|
||||
github:
|
||||
# The GitHub organization to scan.
|
||||
organization: myorg
|
||||
# For GitHub Enterprise:
|
||||
api: https://git.example.com/
|
||||
# If true, scan every branch of every repository. If false, scan only the default branch. Defaults to false.
|
||||
allBranches: true
|
||||
# Reference to a Secret containing an access token. (optional)
|
||||
tokenRef:
|
||||
secretName: github-token
|
||||
key: token
|
||||
# (optional) use a GitHub App to access the API instead of a PAT.
|
||||
appSecretName: gh-app-repo-creds
|
||||
#Pass additional key-value pairs via values field
|
||||
values:
|
||||
name: "{{organization}}-{{repository}}"
|
||||
|
||||
#The GitLab mode uses the GitLab API to scan and organization in either gitlab.com or self-hosted GitLab.
|
||||
gitlab:
|
||||
#The Gitea mode uses the Gitea API to scan organizations in your instance
|
||||
gitea:
|
||||
#Use the Bitbucket Server API (1.0) to scan repos in a project.
|
||||
bitbucketServer:
|
||||
#Uses the Azure DevOps API to look up eligible repositories
|
||||
azureDevOps:
|
||||
# The Bitbucket mode uses the Bitbucket API V2 to scan a workspace in bitbucket.org
|
||||
bitbucket:
|
||||
#Uses AWS ResourceGroupsTagging and AWS CodeCommit APIs to scan repos across AWS accounts and regionsz
|
||||
awsCodeCommit:
|
||||
|
||||
#Filters allow selecting which repositories to generate for.
|
||||
filters:
|
||||
# Include any repository starting with "myapp" AND including a Kustomize config AND labeled with "deploy-ok" ...
|
||||
- repositoryMatch: ^myapp
|
||||
pathsExist: [kubernetes/kustomization.yaml]
|
||||
labelMatch: deploy-ok
|
||||
# ... OR include any repository starting with "otherapp" AND a Helm folder and doesn't have file disabledrepo.txt.
|
||||
- repositoryMatch: ^otherapp
|
||||
pathsExist: [helm]
|
||||
pathsDoNotExist: [disabledrepo.txt]
|
||||
# matrix 'parent' generator
|
||||
- matrix:
|
||||
generators:
|
||||
# any of the top-level generators may be used here instead.
|
||||
|
||||
# merge 'parent' generator
|
||||
# Use the selector set by both child generators to combine them.
|
||||
- merge:
|
||||
mergeKeys:
|
||||
- server
|
||||
# Note that this would not work with goTemplate enabled,
|
||||
# nested merge keys are not supported there.
|
||||
- values.selector
|
||||
generators:
|
||||
- clusters:
|
||||
values:
|
||||
kafka: 'true'
|
||||
redis: 'false'
|
||||
# For clusters with a specific label, enable Kafka.
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
use-kafka: 'false'
|
||||
values:
|
||||
kafka: 'false'
|
||||
# For a specific cluster, enable Redis.
|
||||
- list:
|
||||
elements:
|
||||
- server: https://2.4.6.8
|
||||
values.redis: 'true'
|
||||
|
||||
|
||||
- list:
|
||||
elements:
|
||||
- cluster: https://kubernetes.default.svc
|
||||
# Determines whether go templating will be used in the `template` field below.
|
||||
goTemplate: true
|
||||
goTemplate: false
|
||||
# Optional list of go templating options, see https://pkg.go.dev/text/template#Template.Option
|
||||
# This is only relevant if `goTemplate` is true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
|
||||
goTemplateOptions: ["missingkey="]
|
||||
# These fields are identical to the Application spec.
|
||||
# The generator's template field takes precedence over the spec's template fields
|
||||
template:
|
||||
metadata:
|
||||
name: test-hello-world-app
|
||||
spec:
|
||||
project: my-project
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
# defines from which Git repository to extract the desired Application manifests
|
||||
source:
|
||||
- chart: '{{.chart}}'
|
||||
# developers may customize app details using JSON files from above repo URL
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
# Path within the repository where Kubernetes manifests are located
|
||||
path: applicationset/examples/list-generator/guestbook/{{cluster}}
|
||||
helm:
|
||||
useCredentials: "{{.useCredentials}}" # This field may NOT be templated, because it is a boolean field
|
||||
parameters:
|
||||
- name: "image.tag"
|
||||
value: "pull-{{head_sha}}"
|
||||
- name: "{{.name}}"
|
||||
value: "{{.value}}"
|
||||
- name: throw-away
|
||||
value: "{{end}}"
|
||||
destination:
|
||||
# Only one of name or server may be specified: if both are specified, an error is returned.
|
||||
# Name of the cluster (within Argo CD) to deploy to
|
||||
name: production-cluster # cluster is restricted
|
||||
# API Server URL for the cluster
|
||||
server: '{{.url}}'
|
||||
# Target namespace in which to deploy the manifests from source
|
||||
namespace: dev-team-one # namespace is restricted
|
||||
|
||||
# This sync policy pertains to the ApplicationSet, not to the Applications it creates.
|
||||
syncPolicy:
|
||||
# Prevents ApplicationSet controller from modifying or deleting Applications
|
||||
applicationsSync: create-only
|
||||
|
||||
# Prevents ApplicationSet controller from deleting Applications. Update is allowed
|
||||
# applicationsSync: create-update
|
||||
|
||||
# Prevents ApplicationSet controller from modifying Applications. Delete is allowed.
|
||||
# applicationsSync: create-delete
|
||||
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
# Prevent an Application's child resources from being deleted, when the parent Application is deleted
|
||||
preserveResourcesOnDeletion: true
|
||||
|
||||
# which fields of the ApplicationSet should be ignored when comparing Applications.
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
- name: some-app
|
||||
jqExpressions:
|
||||
- .spec.source.helm.values
|
||||
|
||||
# Determines whether the controller will delete Applications when an ApplicationSet is deleted.
|
||||
preserveResourcesOnDeletion: false
|
||||
# Alpha feature to determine the order in which ApplicationSet applies changes.
|
||||
strategy:
|
||||
# This field lets you define fields which should be ignored when applying Application resources. This is helpful if you
|
||||
# want to use ApplicationSets to create apps, but also want to allow users to modify those apps without having their
|
||||
# changes overwritten by the ApplicationSet.
|
||||
# This update strategy allows you to group Applications by labels present on the generated Application resources
|
||||
type: RollingSync
|
||||
rollingSync:
|
||||
steps:
|
||||
# Application groups are selected using their labels and matchExpressions
|
||||
- matchExpressions:
|
||||
- key: envLabel
|
||||
operator: In
|
||||
values:
|
||||
- env-dev
|
||||
# maxUpdate: 100% # if undefined, all applications matched are updated together (default is 100%)
|
||||
- matchExpressions:
|
||||
- key: envLabel
|
||||
operator: In
|
||||
values:
|
||||
- env-qa
|
||||
maxUpdate: 0 # if 0, no matched applications will be synced unless they're synced manually
|
||||
- matchExpressions:
|
||||
- key: envLabel
|
||||
operator: In
|
||||
values:
|
||||
- env-prod
|
||||
maxUpdate: 10% # maxUpdate supports both integer and percentage string values (rounds down, but floored at 1 Application for >0%)
|
||||
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
@@ -221,94 +36,3 @@ spec:
|
||||
jqPathExpressions:
|
||||
- .spec.source.helm.values
|
||||
|
||||
# Cluster-decision-resource-based ApplicationSet generator
|
||||
- clusterDecisionResource:
|
||||
# ConfigMap with GVK information for the duck type resource
|
||||
configMapRef: my-configmap
|
||||
name: quak # Choose either "name" of the resource or "labelSelector"
|
||||
labelSelector:
|
||||
matchLabels: # OPTIONAL
|
||||
duck: spotted
|
||||
matchExpressions: # OPTIONAL
|
||||
- key: duck
|
||||
operator: In
|
||||
values:
|
||||
- "spotted"
|
||||
- "canvasback"
|
||||
# OPTIONAL: Checks for changes every 60sec (default 3min)
|
||||
requeueAfterSeconds: 60
|
||||
|
||||
# The Pull Request generator uses the API of an SCMaaS provider to automatically discover open pull requests within a repository
|
||||
- pullRequest:
|
||||
# When using a Pull Request generator, the ApplicationSet controller polls every `requeueAfterSeconds` interval (defaulting to every 30 minutes) to detect changes.
|
||||
requeueAfterSeconds: 1800
|
||||
# See below for provider specific options.
|
||||
# Specify the repository from which to fetch the GitHub Pull requests.
|
||||
github:
|
||||
# The GitHub organization or user.
|
||||
owner: myorg
|
||||
# The Github repository
|
||||
repo: myrepository
|
||||
# For GitHub Enterprise (optional)
|
||||
api: https://git.example.com/
|
||||
# Reference to a Secret containing an access token. (optional)
|
||||
tokenRef:
|
||||
secretName: github-token
|
||||
key: token
|
||||
# (optional) use a GitHub App to access the API instead of a PAT.
|
||||
appSecretName: github-app-repo-creds
|
||||
# Labels is used to filter the PRs that you want to target. (optional)
|
||||
labels:
|
||||
- preview
|
||||
|
||||
# Filters allow selecting which pull requests to generate for
|
||||
# Include any pull request ending with "argocd". (optional)
|
||||
filters:
|
||||
- branchMatch: ".*-argocd"
|
||||
|
||||
# Specify the project from which to fetch the GitLab merge requests.
|
||||
gitlab:
|
||||
# Specify the repository from which to fetch the Gitea Pull requests.
|
||||
gitea:
|
||||
# Fetch pull requests from a repo hosted on a Bitbucket Server (not the same as Bitbucket Cloud).
|
||||
bitbucketServer:
|
||||
# Fetch pull requests from a repo hosted on a Bitbucket Cloud.
|
||||
bitbucket:
|
||||
# Specify the organization, project and repository from which you want to fetch pull requests.
|
||||
azuredevops:
|
||||
# Fetch pull requests from AWS CodeCommit repositories.
|
||||
awsCodeCommit:
|
||||
|
||||
# The list generator generates a set of two application which then filter by the key value to only select the env with value staging
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
env: staging
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
env: prod
|
||||
# The generator's template field takes precedence over the spec's template fields
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
revision: HEAD
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
# New path value is generated here:
|
||||
path: 'applicationset/examples/template-override/{{cluster}}-override'
|
||||
destination: {}
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
env: staging
|
||||
# It is also possible to use matchExpressions for more powerful selectors
|
||||
- clusters: {}
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: server
|
||||
operator: In
|
||||
values:
|
||||
- https://kubernetes.default.svc
|
||||
- https://some-other-cluster
|
||||
@@ -72,7 +72,7 @@ data:
|
||||
The allow-list only applies to SCM providers for which the user may configure a custom `api`. Where an SCM or PR
|
||||
generator does not accept a custom API URL, the provider is implicitly allowed.
|
||||
|
||||
If you do not intend to allow users to use the SCM or PR generators, you can disable them entirely by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.enable.scm.providers` to `false`.
|
||||
If you do not intend to allow users to use the SCM or PR generators, you can disable them entirely by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ALLOW_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.allow.scm.providers` to `false`.
|
||||
|
||||
### Overview
|
||||
|
||||
|
||||
@@ -136,29 +136,6 @@ However, if you do wish to target both local and non-local clusters, while also
|
||||
|
||||
These steps might seem counterintuitive, but the act of changing one of the default values for the local cluster causes the Argo CD Web UI to create a new secret for this cluster. In the Argo CD namespace, you should now see a Secret resource named `cluster-(cluster suffix)` with label `argocd.argoproj.io/secret-type": "cluster"`. You may also create a local [cluster secret declaratively](../../declarative-setup/#clusters), or with the CLI using `argocd cluster add "(context name)" --in-cluster`, rather than through the Web UI.
|
||||
|
||||
### Fetch clusters based on their K8s version
|
||||
|
||||
There is also the possibility to fetch clusters based upon their Kubernetes version. To do this, the label `argocd.argoproj.io/auto-label-cluster-info` needs to be set to `true` on the cluster secret.
|
||||
Once that has been set, the controller will dynamically label the cluster secret with the Kubernetes version it is running on. To retrieve that value, you need to use the
|
||||
`argocd.argoproj.io/kubernetes-version`, as the example below demonstrates:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
goTemplate: true
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/kubernetes-version: 1.28
|
||||
# matchExpressions are also supported.
|
||||
#matchExpressions:
|
||||
# - key: argocd.argoproj.io/kubernetes-version
|
||||
# operator: In
|
||||
# values:
|
||||
# - "1.27"
|
||||
# - "1.28"
|
||||
```
|
||||
|
||||
### Pass additional key-value pairs via `values` field
|
||||
|
||||
You may pass additional, arbitrary string key-value pairs via the `values` field of the cluster generator. Values added via the `values` field are added as `values.(field)`
|
||||
|
||||
@@ -53,7 +53,7 @@ It can be enabled in any of these ways:
|
||||
|
||||
1. Pass `--enable-new-git-file-globbing` to the ApplicationSet controller args.
|
||||
1. Set `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING=true` in the ApplicationSet controller environment variables.
|
||||
1. Set `applicationsetcontroller.enable.new.git.file.globbing: "true"` in the `argocd-cmd-params-cm` ConfigMap.
|
||||
1. Set `applicationsetcontroller.enable.new.git.file.globbing: true` in the Argo CD ConfigMap.
|
||||
|
||||
Note that the default may change in the future.
|
||||
|
||||
|
||||
@@ -77,12 +77,10 @@ metadata:
|
||||
data:
|
||||
token: "$plugin.myplugin.token" # Alternatively $<some_K8S_secret>:plugin.myplugin.token
|
||||
baseUrl: "http://myplugin.plugin-ns.svc.cluster.local."
|
||||
requestTimeout: "60"
|
||||
```
|
||||
|
||||
- `token`: Pre-shared token used to authenticate HTTP request (points to the right key you created in the `argocd-secret` Secret)
|
||||
- `baseUrl`: BaseUrl of the k8s service exposing your plugin in the cluster.
|
||||
- `requestTimeout`: Timeout of the request to the plugin in seconds (default: 30)
|
||||
|
||||
### Store credentials
|
||||
|
||||
|
||||
@@ -84,8 +84,8 @@ spec:
|
||||
generators:
|
||||
- pullRequest:
|
||||
gitlab:
|
||||
# The GitLab project ID.
|
||||
project: "12341234"
|
||||
# The GitLab project.
|
||||
project: myproject
|
||||
# For self-hosted GitLab (optional)
|
||||
api: https://git.example.com/
|
||||
# Reference to a Secret containing an access token. (optional)
|
||||
@@ -104,7 +104,7 @@ spec:
|
||||
# ...
|
||||
```
|
||||
|
||||
* `project`: Required project ID of the GitLab project.
|
||||
* `project`: Required name of the GitLab project.
|
||||
* `api`: If using self-hosted GitLab, the URL to access it. (Optional)
|
||||
* `tokenRef`: A `Secret` name and key containing the GitLab access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. (Optional)
|
||||
* `labels`: Labels is used to filter the MRs that you want to target. (Optional)
|
||||
|
||||
@@ -12,8 +12,7 @@ An additional `normalize` function makes any string parameter usable as a valid
|
||||
with hyphens and truncating at 253 characters. This is useful when making parameters safe for things like Application
|
||||
names.
|
||||
|
||||
Another `slugify` function has been added which, by default, sanitizes and smart truncates (it doesn't cut a word into 2). This function accepts a couple of arguments:
|
||||
|
||||
Another function has `slugify` function has been added which, by default, sanitizes and smart truncate (means doesn't cut a word into 2). This function accepts a couple of arguments:
|
||||
- The first argument (if provided) is an integer specifying the maximum length of the slug.
|
||||
- The second argument (if provided) is a boolean indicating whether smart truncation is enabled.
|
||||
- The last argument (if provided) is the input name that needs to be slugified.
|
||||
@@ -207,8 +206,6 @@ ApplicationSet controller provides:
|
||||
1. contains no more than 253 characters
|
||||
2. contains only lowercase alphanumeric characters, '-' or '.'
|
||||
3. starts and ends with an alphanumeric character
|
||||
|
||||
- `slugify`: sanitizes like `normalize` and smart truncates (it doesn't cut a word into 2) like described in the [introduction](#introduction) section.
|
||||
- `toYaml` / `fromYaml` / `fromYamlArray` helm like functions
|
||||
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ spec:
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
targetRevision: HEAD
|
||||
revision: HEAD
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
# New path value is generated here:
|
||||
path: 'applicationset/examples/template-override/{{cluster}}-override'
|
||||
@@ -99,7 +99,7 @@ spec:
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
# This 'default' value is not used: it is replaced by the generator's template path, above
|
||||
# This 'default' value is not used: it is is replaced by the generator's template path, above
|
||||
path: applicationset/examples/template-override/default
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
|
||||
@@ -235,6 +235,14 @@ data:
|
||||
# can be either empty, "normal" or "strict". By default, it is empty i.e. disabled.
|
||||
resource.respectRBAC: "normal"
|
||||
|
||||
# Configuration to add a config management plugin.
|
||||
configManagementPlugins: |
|
||||
- name: kasane
|
||||
init:
|
||||
command: [kasane, update]
|
||||
generate:
|
||||
command: [kasane, show]
|
||||
|
||||
# A set of settings that allow enabling or disabling the config management tool.
|
||||
# If unset, each defaults to "true".
|
||||
kustomize.enabled: true
|
||||
@@ -300,10 +308,8 @@ data:
|
||||
# have either a permanent banner or a regular closeable banner, and NOT both. eg. A user can't dismiss a
|
||||
# notification message (closeable) banner, to then immediately see a permanent banner.
|
||||
# ui.bannerpermanent: "true"
|
||||
# An option to specify the position of the banner, either the top or bottom of the page, or both. The valid values
|
||||
# are: "top", "bottom" and "both". The default (if the option is not provided), is "top". If "both" is specified, then
|
||||
# the content appears both at the top and the bottom of the page. Uncomment the following line to make the banner appear
|
||||
# at the bottom of the page. Change the value as needed.
|
||||
# An option to specify the position of the banner, either the top or bottom of the page. The default is at the top.
|
||||
# Uncomment to make the banner appear at the bottom of the page. Any value other than "bottom" will make the banner appear at the top.
|
||||
# ui.bannerposition: "bottom"
|
||||
|
||||
# Application reconciliation timeout is the max amount of time required to discover if a new manifests version got
|
||||
@@ -320,10 +326,6 @@ data:
|
||||
# cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default.
|
||||
cluster.inClusterEnabled: "true"
|
||||
|
||||
# The maximum number of pod logs to render in UI. If the application has more than this number of pods, the logs will not be rendered.
|
||||
# This is to prevent the UI from becoming unresponsive when rendering a large number of logs. Default is 10.
|
||||
server.maxPodLogsToRender: 10
|
||||
|
||||
# Application pod logs RBAC enforcement enables control over who can and who can't view application pod logs.
|
||||
# When you enable the switch, pod logs will be visible only to admin role by default. Other roles/users will not be able to view them via cli and UI.
|
||||
# When you enable the switch, viewing pod logs for other roles/users will require explicit RBAC allow policies (allow get on logs subresource).
|
||||
|
||||
@@ -25,7 +25,7 @@ A few use-cases that justify running Argo CD Core are:
|
||||
|
||||
- As a cluster admin, I want to rely on Kubernetes RBAC only.
|
||||
- As a devops engineer, I don't want to learn a new API or depend on
|
||||
another CLI to automate my deployments. I want to rely on the
|
||||
another CLI to automate my deployments. I want instead rely in
|
||||
Kubernetes API only.
|
||||
- As a cluster admin, I don't want to provide Argo CD UI or Argo CD
|
||||
CLI to developers.
|
||||
|
||||
@@ -549,7 +549,6 @@ bearerToken: string
|
||||
awsAuthConfig:
|
||||
clusterName: string
|
||||
roleARN: string
|
||||
profile: string
|
||||
# Configure external command to supply client credentials
|
||||
# See https://godoc.org/k8s.io/client-go/tools/clientcmd/api#ExecConfig
|
||||
execProviderConfig:
|
||||
@@ -670,9 +669,9 @@ extended to allow assumption of multiple roles, either as an explicit array of r
|
||||
"Statement" : {
|
||||
"Effect" : "Allow",
|
||||
"Action" : "sts:AssumeRole",
|
||||
"Resource" : [
|
||||
"<arn:aws:iam::<AWS_ACCOUNT_ID>:role/<IAM_ROLE_NAME>"
|
||||
]
|
||||
"Principal" : {
|
||||
"AWS" : "<arn:aws:iam::<AWS_ACCOUNT_ID>:role/<IAM_ROLE_NAME>"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -732,140 +731,6 @@ data:
|
||||
"rolearn": "<arn:aws:iam::<AWS_ACCOUNT_ID>:role/<IAM_ROLE_NAME>"
|
||||
"username": "<some-username>"
|
||||
```
|
||||
|
||||
#### Alternative EKS Authentication Methods
|
||||
In some scenarios it may not be possible to use IRSA, such as when the Argo CD cluster is running on a different cloud
|
||||
provider's platform. In this case, there are two options:
|
||||
1. Use `execProviderConfig` to call the AWS authentication mechanism which enables the injection of environment variables to supply credentials
|
||||
2. Leverage the new AWS profile option available in Argo CD release 2.10
|
||||
|
||||
Both of these options will require the steps involving IAM and the `aws-auth` config map (defined above) to provide the
|
||||
principal with access to the cluster.
|
||||
|
||||
##### Using execProviderConfig with Environment Variables
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mycluster-secret
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
type: Opaque
|
||||
stringData:
|
||||
name: mycluster
|
||||
server: https://mycluster.example.com
|
||||
namespaces: "my,managed,namespaces"
|
||||
clusterResources: "true"
|
||||
config: |
|
||||
{
|
||||
"execProviderConfig": {
|
||||
"command": "argocd-k8s-auth",
|
||||
"args": ["aws", "--cluster-name", "my-eks-cluster"],
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"env": {
|
||||
"AWS_REGION": "xx-east-1",
|
||||
"AWS_ACCESS_KEY_ID": "{{ .aws_key_id }}",
|
||||
"AWS_SECRET_ACCESS_KEY": "{{ .aws_key_secret }}",
|
||||
"AWS_SESSION_TOKEN": "{{ .aws_token }}"
|
||||
}
|
||||
},
|
||||
"tlsClientConfig": {
|
||||
"insecure": false,
|
||||
"caData": "{{ .cluster_cert }}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This example assumes that the role being attached to the credentials that have been supplied, if this is not the case
|
||||
the role can be appended to the `args` section like so:
|
||||
|
||||
```yaml
|
||||
...
|
||||
"args": ["aws", "--cluster-name", "my-eks-cluster", "--roleARN", "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<IAM_ROLE_NAME>"],
|
||||
...
|
||||
```
|
||||
This construct can be used in conjunction with something like the External Secrets Operator to avoid storing the keys in
|
||||
plain text and additionally helps to provide a foundation for key rotation.
|
||||
|
||||
##### Using An AWS Profile For Authentication
|
||||
The option to use profiles, added in release 2.10, provides a method for supplying credentials while still using the
|
||||
standard Argo CD EKS cluster declaration with an additional command flag that points to an AWS credentials file:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mycluster-secret
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
type: Opaque
|
||||
stringData:
|
||||
name: "mycluster.com"
|
||||
server: "https://mycluster.com"
|
||||
config: |
|
||||
{
|
||||
"awsAuthConfig": {
|
||||
"clusterName": "my-eks-cluster-name",
|
||||
"roleARN": "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<IAM_ROLE_NAME>",
|
||||
"profile": "/mount/path/to/my-profile-file"
|
||||
},
|
||||
"tlsClientConfig": {
|
||||
"insecure": false,
|
||||
"caData": "<base64 encoded certificate>"
|
||||
}
|
||||
}
|
||||
```
|
||||
This will instruct ArgoCD to read the file at the provided path and use the credentials defined within to authenticate to
|
||||
AWS. The profile must be mounted in order for this to work. For example, the following values can be defined in a Helm
|
||||
based ArgoCD deployment:
|
||||
|
||||
```yaml
|
||||
controller:
|
||||
extraVolumes:
|
||||
- name: my-profile-volume
|
||||
secret:
|
||||
secretName: my-aws-profile
|
||||
items:
|
||||
- key: my-profile-file
|
||||
path: my-profile-file
|
||||
extraVolumeMounts:
|
||||
- name: my-profile-mount
|
||||
mountPath: /mount/path/to
|
||||
readOnly: true
|
||||
|
||||
server:
|
||||
extraVolumes:
|
||||
- name: my-profile-volume
|
||||
secret:
|
||||
secretName: my-aws-profile
|
||||
items:
|
||||
- key: my-profile-file
|
||||
path: my-profile-file
|
||||
extraVolumeMounts:
|
||||
- name: my-profile-mount
|
||||
mountPath: /mount/path/to
|
||||
readOnly: true
|
||||
```
|
||||
|
||||
Where the secret is defined as follows:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: my-aws-profile
|
||||
type: Opaque
|
||||
stringData:
|
||||
my-profile-file: |
|
||||
[default]
|
||||
region = <aws_region>
|
||||
aws_access_key_id = <aws_access_key_id>
|
||||
aws_secret_access_key = <aws_secret_access_key>
|
||||
aws_session_token = <aws_session_token>
|
||||
```
|
||||
|
||||
> ⚠️ Secret mounts are updated on an interval, not real time. If rotation is a requirement ensure the token lifetime outlives the mount update interval and the rotation process doesn't immediately invalidate the existing token
|
||||
|
||||
|
||||
### GKE
|
||||
|
||||
GKE cluster secret example using argocd-k8s-auth and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity):
|
||||
@@ -923,15 +788,6 @@ In addition to the environment variables above, argocd-k8s-auth accepts two extr
|
||||
|
||||
This is an example of using the [federated workload login flow](https://github.com/Azure/kubelogin#azure-workload-federated-identity-non-interactive). The federated token file needs to be mounted as a secret into argoCD, so it can be used in the flow. The location of the token file needs to be set in the environment variable AZURE_FEDERATED_TOKEN_FILE.
|
||||
|
||||
If your AKS cluster utilizes the [Mutating Admission Webhook](https://azure.github.io/azure-workload-identity/docs/installation/mutating-admission-webhook.html) from the Azure Workload Identity project, follow these steps to enable the `argocd-application-controller` and `argocd-server` pods to use the federated identity:
|
||||
|
||||
1. **Label the Pods**: Add the `azure.workload.identity/use: "true"` label to the `argocd-application-controller` and `argocd-server` pods.
|
||||
|
||||
2. **Create Federated Identity Credential**: Generate an Azure federated identity credential for the `argocd-application-controller` and `argocd-server` service accounts. Refer to the [Federated Identity Credential](https://azure.github.io/azure-workload-identity/docs/topics/federated-identity-credential.html) documentation for detailed instructions.
|
||||
|
||||
3. **Set the AZURE_CLIENT_ID**: Update the `AZURE_CLIENT_ID` in the cluster secret to match the client id of the newly created federated identity credential.
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -950,9 +806,9 @@ stringData:
|
||||
"env": {
|
||||
"AAD_ENVIRONMENT_NAME": "AzurePublicCloud",
|
||||
"AZURE_CLIENT_ID": "fill in client id",
|
||||
"AZURE_TENANT_ID": "fill in tenant id", # optional, injected by workload identity mutating admission webhook if enabled
|
||||
"AZURE_FEDERATED_TOKEN_FILE": "/opt/path/to/federated_file.json", # optional, injected by workload identity mutating admission webhook if enabled
|
||||
"AZURE_AUTHORITY_HOST": "https://login.microsoftonline.com/", # optional, injected by workload identity mutating admission webhook if enabled
|
||||
"AZURE_TENANT_ID": "fill in tenant id",
|
||||
"AZURE_FEDERATED_TOKEN_FILE": "/opt/path/to/federated_file.json",
|
||||
"AZURE_AUTHORITY_HOST": "https://login.microsoftonline.com/",
|
||||
"AAD_LOGIN_METHOD": "workloadidentity"
|
||||
},
|
||||
"args": ["azure"],
|
||||
@@ -1145,7 +1001,7 @@ Example of `kustomization.yaml`:
|
||||
```yaml
|
||||
# additional resources like ingress rules, cluster and repository secrets.
|
||||
resources:
|
||||
- github.com/argoproj/argo-cd//manifests/cluster-install?ref=stable
|
||||
- github.com/argoproj/argo-cd//manifests/cluster-install?ref=v1.0.1
|
||||
- clusters-secrets.yaml
|
||||
- repos-secrets.yaml
|
||||
|
||||
|
||||
@@ -170,29 +170,25 @@ Argo CD repo server maintains one repository clone locally and uses it for appli
|
||||
Argo CD determines if manifest generation might change local files in the local repository clone based on the config management tool and application settings.
|
||||
If the manifest generation has no side effects then requests are processed in parallel without a performance penalty. The following are known cases that might cause slowness and their workarounds:
|
||||
|
||||
* **Multiple Helm based applications pointing to the same directory in one Git repository:** for historical reasons Argo CD generates Helm manifests sequentially. To enable parallel generation set `ARGOCD_HELM_ALLOW_CONCURRENCY=true` to `argocd-repo-server` deployment or create `.argocd-allow-concurrency` file.
|
||||
Future versions of Argo CD will enable this by default.
|
||||
* **Multiple Helm based applications pointing to the same directory in one Git repository:** ensure that your Helm chart doesn't have conditional
|
||||
[dependencies](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) and create `.argocd-allow-concurrency` file in the chart directory.
|
||||
|
||||
* **Multiple Custom plugin based applications:** avoid creating temporal files during manifest generation and create `.argocd-allow-concurrency` file in the app directory, or use the sidecar plugin option, which processes each application using a temporary copy of the repository.
|
||||
|
||||
* **Multiple Kustomize applications in same repository with [parameter overrides](../user-guide/parameters.md):** sorry, no workaround for now.
|
||||
|
||||
|
||||
### Manifest Paths Annotation
|
||||
### Webhook and Manifest Paths Annotation
|
||||
|
||||
Argo CD aggressively caches generated manifests and uses the repository commit SHA as a cache key. A new commit to the Git repository invalidates the cache for all applications configured in the repository.
|
||||
This can negatively affect repositories with multiple applications. You can use [webhooks](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/webhook.md) and the `argocd.argoproj.io/manifest-generate-paths` Application CRD annotation to solve this problem and improve performance.
|
||||
|
||||
The `argocd.argoproj.io/manifest-generate-paths` annotation contains a semicolon-separated list of paths within the Git repository that are used during manifest generation. It will use the paths specified in the annotation to compare the last cached revision to the latest commit. If no modified files match the paths specified in `argocd.argoproj.io/manifest-generate-paths`, then it will not trigger application reconciliation and the existing cache will be considered valid for the new commit.
|
||||
The `argocd.argoproj.io/manifest-generate-paths` annotation contains a semicolon-separated list of paths within the Git repository that are used during manifest generation. The webhook compares paths specified in the annotation with the changed files specified in the webhook payload. If no modified files match the paths specified in `argocd.argoproj.io/manifest-generate-paths`, then the webhook will not trigger application reconciliation and the existing cache will be considered valid for the new commit.
|
||||
|
||||
Installations that use a different repository for each application are **not** subject to this behavior and will likely get no benefit from using these annotations.
|
||||
|
||||
Similarly, applications referencing an external Helm values file will not get the benefits of this feature when an unrelated change happens in the external source.
|
||||
|
||||
For webhooks, the comparison is done using the files specified in the webhook event payload instead.
|
||||
|
||||
!!! note
|
||||
Application manifest paths annotation support for webhooks depends on the git provider used for the Application. It is currently only supported for GitHub, GitLab, and Gogs based repos.
|
||||
Application manifest paths annotation support depends on the git provider used for the Application. It is currently only supported for GitHub, GitLab, and Gogs based repos.
|
||||
|
||||
* **Relative path** The annotation might contain a relative path. In this case the path is considered relative to the path specified in the application source:
|
||||
|
||||
|
||||
@@ -166,43 +166,6 @@ The argocd-server Service needs to be annotated with `projectcontour.io/upstream
|
||||
The API server should then be run with TLS disabled. Edit the `argocd-server` deployment to add the
|
||||
`--insecure` flag to the argocd-server command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md).
|
||||
|
||||
Contour httpproxy CRD:
|
||||
|
||||
Using a contour httpproxy CRD allows you to use the same hostname for the GRPC and REST api.
|
||||
|
||||
```yaml
|
||||
apiVersion: projectcontour.io/v1
|
||||
kind: HTTPProxy
|
||||
metadata:
|
||||
name: argocd-server
|
||||
namespace: argocd
|
||||
spec:
|
||||
ingressClassName: contour
|
||||
virtualhost:
|
||||
fqdn: path.to.argocd.io
|
||||
tls:
|
||||
secretName: wildcard-tls
|
||||
routes:
|
||||
- conditions:
|
||||
- prefix: /
|
||||
- header:
|
||||
name: Content-Type
|
||||
contains: application/grpc
|
||||
services:
|
||||
- name: argocd-server
|
||||
port: 80
|
||||
protocol: h2c # allows for unencrypted http2 connections
|
||||
timeoutPolicy:
|
||||
response: 1h
|
||||
idle: 600s
|
||||
idleConnection: 600s
|
||||
- conditions:
|
||||
- prefix: /
|
||||
services:
|
||||
- name: argocd-server
|
||||
port: 80
|
||||
```
|
||||
|
||||
## [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx)
|
||||
|
||||
### Option 1: SSL-Passthrough
|
||||
|
||||
@@ -70,8 +70,6 @@ Scraped at the `argocd-server-metrics:8083/metrics` endpoint.
|
||||
| `argocd_redis_request_total` | counter | Number of Kubernetes requests executed during application reconciliation. |
|
||||
| `grpc_server_handled_total` | counter | Total number of RPCs completed on the server, regardless of success or failure. |
|
||||
| `grpc_server_msg_sent_total` | counter | Total number of gRPC stream messages sent by the server. |
|
||||
| `argocd_proxy_extension_request_total` | counter | Number of requests sent to the configured proxy extensions. |
|
||||
| `argocd_proxy_extension_request_duration_seconds` | histogram | Request duration in seconds between the Argo CD API server and the proxy extension backend. |
|
||||
|
||||
## Repo Server Metrics
|
||||
Metrics about the Repo Server.
|
||||
@@ -81,7 +79,6 @@ Scraped at the `argocd-repo-server:8084/metrics` endpoint.
|
||||
|--------|:----:|-------------|
|
||||
| `argocd_git_request_duration_seconds` | histogram | Git requests duration seconds. |
|
||||
| `argocd_git_request_total` | counter | Number of git requests performed by repo server |
|
||||
| `argocd_git_fetch_fail_total` | counter | Number of git fetch requests failures by repo server |
|
||||
| `argocd_redis_request_duration_seconds` | histogram | Redis requests duration seconds. |
|
||||
| `argocd_redis_request_total` | counter | Number of Kubernetes requests executed during application reconciliation. |
|
||||
| `argocd_repo_pending_request_total` | gauge | Number of pending requests requiring repository lock |
|
||||
@@ -171,8 +168,6 @@ apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: argocd-redis-haproxy-metrics
|
||||
labels:
|
||||
release: prometheus-operator
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -181,7 +176,7 @@ spec:
|
||||
- port: http-exporter-port
|
||||
```
|
||||
|
||||
For notifications controller, you need to additionally add following:
|
||||
For notifications controller, you need to additionally add following:
|
||||
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
|
||||
@@ -62,7 +62,8 @@ slack:
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"title": "{{$c.type}}",
|
||||
"value": "{{$c.message}}",
|
||||
@@ -89,7 +90,8 @@ teams:
|
||||
"value": "{{.app.status.sync.revision}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"name": "{{$c.type}}",
|
||||
"value": "{{$c.message}}"
|
||||
@@ -143,7 +145,8 @@ slack:
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"title": "{{$c.type}}",
|
||||
"value": "{{$c.message}}",
|
||||
@@ -166,7 +169,8 @@ teams:
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"name": "{{$c.type}}",
|
||||
"value": "{{$c.message}}"
|
||||
@@ -220,7 +224,8 @@ slack:
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"title": "{{$c.type}}",
|
||||
"value": "{{$c.message}}",
|
||||
@@ -247,7 +252,8 @@ teams:
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"name": "{{$c.type}}",
|
||||
"value": "{{$c.message}}"
|
||||
@@ -301,7 +307,8 @@ slack:
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"title": "{{$c.type}}",
|
||||
"value": "{{$c.message}}",
|
||||
@@ -328,7 +335,8 @@ teams:
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"name": "{{$c.type}}",
|
||||
"value": "{{$c.message}}"
|
||||
@@ -386,7 +394,8 @@ slack:
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"title": "{{$c.type}}",
|
||||
"value": "{{$c.message}}",
|
||||
@@ -409,7 +418,8 @@ teams:
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"name": "{{$c.type}}",
|
||||
"value": "{{$c.message}}"
|
||||
@@ -462,7 +472,8 @@ slack:
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"title": "{{$c.type}}",
|
||||
"value": "{{$c.message}}",
|
||||
@@ -489,7 +500,8 @@ teams:
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
{{if not $index}},{{end}}
|
||||
{{if $index}},{{end}}
|
||||
{
|
||||
"name": "{{$c.type}}",
|
||||
"value": "{{$c.message}}"
|
||||
|
||||
@@ -48,16 +48,6 @@ Transforms given GIT URL into HTTPs format.
|
||||
|
||||
Returns repository URL full name `(<owner>/<repoName>)`. Currently supports only Github, GitLab and Bitbucket.
|
||||
|
||||
<hr>
|
||||
**`repo.QueryEscape(s string) string`**
|
||||
|
||||
QueryEscape escapes the string, so it can be safely placed inside a URL
|
||||
|
||||
Example:
|
||||
```
|
||||
/projects/{{ call .repo.QueryEscape (call .repo.FullNameByRepoURL .app.status.RepoURL) }}/merge_requests
|
||||
```
|
||||
|
||||
<hr>
|
||||
**`repo.GetCommitMetadata(sha string) CommitMetadata`**
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(increase(argocd_notifications_trigger_eval_total[$interval])) by (name)",
|
||||
"expr": "sum(increase(argocd_notifications_trigger_eval_total[$interval])) by (notifier)",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
@@ -146,7 +146,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(increase(argocd_notifications_deliveries_total[$interval])) by (service)",
|
||||
"expr": "sum(increase(argocd_notifications_deliveries_total[$interval])) by (notifier)",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
|
||||
@@ -13,8 +13,8 @@ The following metrics are available:
|
||||
Number of delivered notifications.
|
||||
Labels:
|
||||
|
||||
* `trigger` - trigger name
|
||||
* `service` - notification service name
|
||||
* `template` - notification template name
|
||||
* `notifier` - notification service name
|
||||
* `succeeded` - flag that indicates if notification was successfully sent or failed
|
||||
|
||||
### `argocd_notifications_trigger_eval_total`
|
||||
|
||||
@@ -43,7 +43,7 @@ You should turn off "send_resolved" or you will receive unnecessary recovery not
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.alertmanager: |
|
||||
targets:
|
||||
@@ -58,7 +58,7 @@ If your alertmanager has changed the default api, you can customize "apiPath".
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.alertmanager: |
|
||||
targets:
|
||||
@@ -89,7 +89,7 @@ stringData:
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.alertmanager: |
|
||||
targets:
|
||||
@@ -110,7 +110,7 @@ data:
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.alertmanager: |
|
||||
targets:
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# AWS SQS
|
||||
# AWS SQS
|
||||
|
||||
## Parameters
|
||||
|
||||
This notification service is capable of sending simple messages to AWS SQS queue.
|
||||
This notification service is capable of sending simple messages to AWS SQS queue.
|
||||
|
||||
* `queue` - name of the queue you are intending to send messages to. Can be overridden with target destination annotation.
|
||||
* `region` - region of the sqs queue can be provided via env variable AWS_DEFAULT_REGION
|
||||
@@ -30,7 +30,7 @@ metadata:
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.awssqs: |
|
||||
region: "us-east-2"
|
||||
@@ -63,7 +63,7 @@ stringData:
|
||||
|
||||
### Minimal configuration using AWS Env variables
|
||||
|
||||
Ensure the following list of environment variables are injected via OIDC, or another method. And assuming SQS is local to the account.
|
||||
Ensure following list of environment variables are injected via OIDC, or other method. And assuming SQS is local to the account.
|
||||
You may skip usage of secret for sensitive data and omit other parameters. (Setting parameters via ConfigMap takes precedent.)
|
||||
|
||||
Variables:
|
||||
@@ -89,7 +89,7 @@ metadata:
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.awssqs: |
|
||||
queue: "myqueue"
|
||||
@@ -104,16 +104,3 @@ data:
|
||||
- oncePer: obj.metadata.annotations["generation"]
|
||||
|
||||
```
|
||||
|
||||
## FIFO SQS Queues
|
||||
|
||||
FIFO queues require a [MessageGroupId](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html#SQS-SendMessage-request-MessageGroupId) to be sent along with every message, every message with a matching MessageGroupId will be processed one by one in order.
|
||||
|
||||
To send to a FIFO SQS Queue you must include a `messageGroupId` in the template such as in the example below:
|
||||
|
||||
```yaml
|
||||
template.deployment-ready: |
|
||||
message: |
|
||||
Deployment {{.obj.metadata.name}} is ready!
|
||||
messageGroupId: {{.obj.metadata.name}}-deployment
|
||||
```
|
||||
|
||||