mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
83 Commits
v2.3.10
...
release-2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2de6920da4 | ||
|
|
70fef5b5a8 | ||
|
|
9d092ddff8 | ||
|
|
ab907e1154 | ||
|
|
9061ae2495 | ||
|
|
6ee8d1cda8 | ||
|
|
0ae7351304 | ||
|
|
198eb03b03 | ||
|
|
c683c8e935 | ||
|
|
02281f2cb5 | ||
|
|
ba75811fad | ||
|
|
e3cb1017e5 | ||
|
|
6a93a8fa00 | ||
|
|
1b73581543 | ||
|
|
883469373c | ||
|
|
2382958ee8 | ||
|
|
8c2e7e6566 | ||
|
|
4fb8ec9f5f | ||
|
|
a05a450995 | ||
|
|
d143571617 | ||
|
|
181008e310 | ||
|
|
e77eafe294 | ||
|
|
8a7f841466 | ||
|
|
e4bc8b4908 | ||
|
|
eb7b8a4790 | ||
|
|
75c3285ddf | ||
|
|
90ae691700 | ||
|
|
7ac47700fb | ||
|
|
3bfdaa93b1 | ||
|
|
39762bc563 | ||
|
|
4ffcac1170 | ||
|
|
ab5ac8e426 | ||
|
|
b0917a8f66 | ||
|
|
78f9035871 | ||
|
|
29450f7127 | ||
|
|
12387ec4fd | ||
|
|
9a5379ca46 | ||
|
|
b7d4baa1fa | ||
|
|
1540323517 | ||
|
|
82c29a0122 | ||
|
|
1295acdaa5 | ||
|
|
a8fd7c8245 | ||
|
|
f479187c88 | ||
|
|
ea15d38fde | ||
|
|
9785967bc3 | ||
|
|
40a3e61061 | ||
|
|
cd6bac967c | ||
|
|
8ef492279b | ||
|
|
5ecf969e11 | ||
|
|
b73ea919c5 | ||
|
|
3680a4a518 | ||
|
|
7d36cb32ea | ||
|
|
08ffc7cab8 | ||
|
|
07cf3355fe | ||
|
|
210e54f6a3 | ||
|
|
9ebfe157f1 | ||
|
|
1f21e04964 | ||
|
|
ff8cd75469 | ||
|
|
b9003b4f86 | ||
|
|
bab79ee084 | ||
|
|
71cd8b6650 | ||
|
|
75cb10ed5a | ||
|
|
82c3bba0c8 | ||
|
|
4ec90801aa | ||
|
|
fa3ac41440 | ||
|
|
27e47a3dc7 | ||
|
|
553033592d | ||
|
|
f3c820269c | ||
|
|
31bfc2ba21 | ||
|
|
fdfcb002c4 | ||
|
|
73388c8668 | ||
|
|
196376833e | ||
|
|
3ef61d737c | ||
|
|
d24aaff5d2 | ||
|
|
0d7d5255c9 | ||
|
|
5f118f5c9b | ||
|
|
3855e2c853 | ||
|
|
fbf38d1029 | ||
|
|
0bc9d1b973 | ||
|
|
bd8e16aa1a | ||
|
|
65556c22ec | ||
|
|
508fdd6fd5 | ||
|
|
3b2d31cc58 |
89
.github/workflows/ci-build.yaml
vendored
89
.github/workflows/ci-build.yaml
vendored
@@ -12,10 +12,7 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.17'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -27,9 +24,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -45,13 +42,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -69,9 +66,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
|
||||
with:
|
||||
version: v1.46.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
@@ -85,11 +82,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -109,13 +106,17 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -126,12 +127,12 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
@@ -145,11 +146,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -169,13 +170,17 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -186,7 +191,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -196,9 +201,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -222,6 +227,10 @@ jobs:
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Initialize local Helm
|
||||
run: |
|
||||
helm2 init --client-only
|
||||
@@ -243,14 +252,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@v1
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
with:
|
||||
node-version: '12.18.4'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -280,12 +289,12 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -296,16 +305,16 @@ jobs:
|
||||
run: |
|
||||
mkdir -p test-results
|
||||
- name: Get code coverage artifiact
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: code-coverage
|
||||
- name: Get test result artifact
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
|
||||
with:
|
||||
file: coverage.out
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
@@ -356,14 +365,22 @@ jobs:
|
||||
ARGOCD_SERVER: "127.0.0.1:8088"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
run: |
|
||||
sudo pkill mono || true
|
||||
# ubuntu-22.04 comes with kubectl, but the version is not pinned. The version as of 2022-12-05 is 1.26.0 which
|
||||
# breaks the `TestNamespacedResourceDiffing` e2e test. So we'll pin to 1.25 and then fix the underlying issue.
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
rm /usr/local/bin/kubectl
|
||||
curl -LO https://dl.k8s.io/release/v1.25.4/bin/linux/amd64/kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
- name: Install K3S
|
||||
env:
|
||||
INSTALL_K3S_VERSION: ${{ matrix.k3s-version }}+k3s1
|
||||
@@ -376,7 +393,7 @@ jobs:
|
||||
sudo chown runner $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -402,9 +419,9 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:6.2.7-alpine
|
||||
docker pull redis:6.2.8-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
@@ -432,7 +449,7 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
8
.github/workflows/codeql.yml
vendored
8
.github/workflows/codeql.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -61,4 +61,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
|
||||
29
.github/workflows/image.yaml
vendored
29
.github/workflows/image.yaml
vendored
@@ -28,22 +28,22 @@ jobs:
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
steps:
|
||||
- uses: actions/setup-go@v1
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
|
||||
# get image tag
|
||||
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
|
||||
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
id: image
|
||||
|
||||
# login
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password $PASSWORD
|
||||
docker login quay.io --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
|
||||
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
@@ -52,8 +52,8 @@ jobs:
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@15c905b16b06416d2086efa066dd8e3a35cc7f98 # v2.4.0
|
||||
- run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --push="${{ github.event_name == 'push' }}" \
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
@@ -69,13 +69,20 @@ jobs:
|
||||
|
||||
# sign container images
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@e82f1b9a8007d399333baba4d75915558e9fb6a4
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD latest image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd:latest
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
|
||||
# Displays the public key to share.
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY
|
||||
env:
|
||||
|
||||
61
.github/workflows/release.yaml
vendored
61
.github/workflows/release.yaml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
- '!release-v0*'
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.17'
|
||||
GOLANG_VERSION: '1.17'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
GIT_EMAIL: argoproj@gmail.com
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -147,7 +147,7 @@ jobs:
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -178,6 +178,10 @@ jobs:
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
helm2 init --client-only
|
||||
|
||||
# We install kustomize in the dist directory
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
|
||||
make manifests-local VERSION=${TARGET_VERSION}
|
||||
git diff
|
||||
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
|
||||
@@ -196,20 +200,19 @@ jobs:
|
||||
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
run: |
|
||||
set -ue
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_TOKEN}"
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
|
||||
# Remove the following when Docker Hub is gone
|
||||
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@15c905b16b06416d2086efa066dd8e3a35cc7f98 # v2.4.0
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64 --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
@@ -217,14 +220,23 @@ jobs:
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Sign Argo CD container images
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@e82f1b9a8007d399333baba4d75915558e9fb6a4
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY docker.io/argoproj/argocd:v${TARGET_VERSION}
|
||||
echo "IMAGE_DIGEST_QUAY=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
|
||||
echo "IMAGE_DIGEST_DOCK=$(crane digest docker.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD container images and assets
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST_QUAY }}
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY docker.io/argoproj/argocd@${{ env.IMAGE_DIGEST_DOCK }}
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
|
||||
# Retrieves the public key to release as an asset
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
|
||||
env:
|
||||
@@ -234,8 +246,8 @@ jobs:
|
||||
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@v1
|
||||
with:
|
||||
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
- name: Push changes to release branch
|
||||
@@ -245,7 +257,7 @@ jobs:
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@v1
|
||||
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: create_release
|
||||
@@ -266,7 +278,7 @@ jobs:
|
||||
SIGS_BOM_VERSION: v0.2.1
|
||||
# comma delimited list of project relative folders to inspect for package
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
run: |
|
||||
@@ -288,8 +300,16 @@ jobs:
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Sign sbom
|
||||
run: |
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
@@ -301,12 +321,13 @@ jobs:
|
||||
files: |
|
||||
dist/argocd-*
|
||||
/tmp/sbom.tar.gz
|
||||
/tmp/sbom.tar.gz.sig
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Update homebrew formula
|
||||
env:
|
||||
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
|
||||
uses: dawidd6/action-homebrew-bump-formula@v3
|
||||
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
|
||||
with:
|
||||
token: ${{env.HOMEBREW_TOKEN}}
|
||||
formula: argocd
|
||||
|
||||
2
Makefile
2
Makefile
@@ -561,4 +561,4 @@ list:
|
||||
|
||||
.PHONY: checksums
|
||||
checksums:
|
||||
for f in ./dist/$(BIN_NAME)-*; do openssl dgst -sha256 "$$f" | awk ' { print $$2 }' > "$$f".sha256 ; done
|
||||
sha256sum ./dist/$(BIN_NAME)-* | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BIN_NAME)-$(TARGET_VERSION)-checksums.txt
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,7 +1,7 @@
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} "
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.7-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.8-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
|
||||
@@ -207,7 +207,7 @@ var validatorsByGroup = map[string]settingValidator{
|
||||
}
|
||||
ssoProvider = "Dex"
|
||||
} else if general.OIDCConfigRAW != "" {
|
||||
if _, err := settings.UnmarshalOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
if err := settings.ValidateOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
return "", fmt.Errorf("invalid oidc.config: %v", err)
|
||||
}
|
||||
ssoProvider = "OIDC"
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Default service addresses and URLS of Argo CD internal services
|
||||
@@ -241,3 +245,10 @@ func GetPluginSockFilePath() string {
|
||||
return pluginSockFilePath
|
||||
}
|
||||
}
|
||||
|
||||
// Common error messages
|
||||
const TokenVerificationError = "failed to verify the token"
|
||||
|
||||
var TokenVerificationErr = errors.New(TokenVerificationError)
|
||||
|
||||
var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission denied")
|
||||
|
||||
@@ -41,6 +41,9 @@ spec:
|
||||
valueFiles:
|
||||
- values-prod.yaml
|
||||
|
||||
# Ignore locally missing valueFiles when installing Helm chart. Defaults to false
|
||||
ignoreMissingValueFiles: false
|
||||
|
||||
# Values file as block file
|
||||
values: |
|
||||
ingress:
|
||||
@@ -57,6 +60,9 @@ spec:
|
||||
hosts:
|
||||
- mydomain.example.com
|
||||
|
||||
# Skip custom resource definition installation if chart contains custom resource definitions. Defaults to false
|
||||
skipCrds: false
|
||||
|
||||
# Optional Helm version to template with. If omitted it will fall back to look at the 'apiVersion' in Chart.yaml
|
||||
# and decide which Helm binary to use automatically. This field can be either 'v2' or 'v3'.
|
||||
version: v2
|
||||
@@ -110,7 +116,11 @@ spec:
|
||||
|
||||
# Destination cluster and namespace to deploy the application
|
||||
destination:
|
||||
# cluster API URL
|
||||
server: https://kubernetes.default.svc
|
||||
# or cluster name
|
||||
# name: in-cluster
|
||||
# The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace
|
||||
namespace: guestbook
|
||||
|
||||
# Sync policy
|
||||
|
||||
@@ -38,7 +38,7 @@ data:
|
||||
help.download.windows-amd64: "path-or-url-to-download"
|
||||
|
||||
# A dex connector configuration (optional). See SSO configuration documentation:
|
||||
# https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/sso
|
||||
# https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/user-management/index.md#sso
|
||||
# https://dexidp.io/docs/connectors/
|
||||
dex.config: |
|
||||
connectors:
|
||||
|
||||
@@ -51,7 +51,7 @@ following example builds an entirely customized repo-server from a Dockerfile, i
|
||||
dependencies that may be needed for generating manifests.
|
||||
|
||||
```Dockerfile
|
||||
FROM argoproj/argocd:latest
|
||||
FROM argoproj/argocd:v2.5.4 # Replace tag with the appropriate argo version
|
||||
|
||||
# Switch to root for the ability to perform install
|
||||
USER root
|
||||
|
||||
@@ -9,7 +9,7 @@ Metrics about applications. Scraped at the `argocd-metrics:8082/metrics` endpoin
|
||||
|--------|:----:|-------------|
|
||||
| `argocd_app_info` | gauge | Information about Applications. It contains labels such as `sync_status` and `health_status` that reflect the application state in ArgoCD. |
|
||||
| `argocd_app_k8s_request_total` | counter | Number of kubernetes requests executed during application reconciliation |
|
||||
| `argocd_app_labels` | gauge | Argo Application labels converted to Prometheus labels. Disabled by default. See section bellow about how to enable it. |
|
||||
| `argocd_app_labels` | gauge | Argo Application labels converted to Prometheus labels. Disabled by default. See section below about how to enable it. |
|
||||
| `argocd_app_reconcile` | histogram | Application reconciliation performance. |
|
||||
| `argocd_app_sync_total` | counter | Counter for application sync history |
|
||||
| `argocd_cluster_api_resource_objects` | gauge | Number of k8s resource objects in the cache. |
|
||||
@@ -41,7 +41,7 @@ Some examples are:
|
||||
As the Application labels are specific to each company, this feature is disabled by default. To enable it, add the
|
||||
`--metrics-application-labels` flag to the ArgoCD application controller.
|
||||
|
||||
The example bellow will expose the ArgoCD Application labels `team-name` and `business-unit` to Prometheus:
|
||||
The example below will expose the ArgoCD Application labels `team-name` and `business-unit` to Prometheus:
|
||||
|
||||
containers:
|
||||
- command:
|
||||
|
||||
@@ -15,9 +15,11 @@ spec:
|
||||
- '*'
|
||||
|
||||
# Only permit applications to deploy to the guestbook namespace in the same cluster
|
||||
# Destination clusters can be identified by 'server', 'name', or both.
|
||||
destinations:
|
||||
- namespace: guestbook
|
||||
server: https://kubernetes.default.svc
|
||||
name: in-cluster
|
||||
|
||||
# Deny all cluster-scoped resources from being created, except for Namespace
|
||||
clusterResourceWhitelist:
|
||||
|
||||
@@ -9,9 +9,8 @@ Operators can add actions to custom resources in form of a Lua script and expand
|
||||
|
||||
Argo CD supports custom resource actions written in [Lua](https://www.lua.org/). This is useful if you:
|
||||
|
||||
* Have a custom resource for which Argo CD does not provide any built-in actions.
|
||||
* Have a commonly performed manual task that might be error prone if executed by users via `kubectl`
|
||||
|
||||
* Have a custom resource for which Argo CD does not provide any built-in actions.
|
||||
* Have a commonly performed manual task that might be error prone if executed by users via `kubectl`
|
||||
|
||||
You can define your own custom resource actions in the `argocd-cm` ConfigMap.
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
# Secret Management
|
||||
|
||||
Argo CD is un-opinionated about how secrets are managed. There's many ways to do it and there's no one-size-fits-all solution. Here's some ways people are doing GitOps secrets:
|
||||
Argo CD is un-opinionated about how secrets are managed. There are many ways to do it, and there's no one-size-fits-all solution.
|
||||
|
||||
Many solutions use plugins to inject secrets into the application manifests. See [Mitigating Risks of Secret-Injection Plugins](#mitigating-risks-of-secret-injection-plugins)
|
||||
below to make sure you use those plugins securely.
|
||||
|
||||
Here are some ways people are doing GitOps secrets:
|
||||
|
||||
* [Bitnami Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets)
|
||||
* [GoDaddy Kubernetes External Secrets](https://github.com/godaddy/kubernetes-external-secrets)
|
||||
@@ -15,3 +20,17 @@ Argo CD is un-opinionated about how secrets are managed. There's many ways to do
|
||||
* [argocd-vault-replacer](https://github.com/crumbhole/argocd-vault-replacer)
|
||||
|
||||
For discussion, see [#1364](https://github.com/argoproj/argo-cd/issues/1364)
|
||||
|
||||
## Mitigating Risks of Secret-Injection Plugins
|
||||
|
||||
Argo CD caches the manifests generated by plugins, along with the injected secrets, in its Redis instance. Those
|
||||
manifests are also available via the repo-server API (a gRPC service). This means that the secrets are available to
|
||||
anyone who has access to the Redis instance or to the repo-server.
|
||||
|
||||
Consider these steps to mitigate the risks of secret-injection plugins:
|
||||
|
||||
1. Set up network policies to prevent direct access to Argo CD components (Redis and the repo-server). Make sure your
|
||||
cluster supports those network policies and can actually enforce them.
|
||||
2. Consider running Argo CD on its own cluster, with no other applications running on it.
|
||||
3. [Enable password authentication on the Redis instance](https://github.com/argoproj/argo-cd/issues/3130) (currently
|
||||
only supported for non-HA Argo CD installations).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# v1.8 to v2.0
|
||||
# v1.8 to 2.0
|
||||
|
||||
## Redis Upgraded to v6.2.1
|
||||
|
||||
|
||||
@@ -300,6 +300,19 @@ data:
|
||||
issuer: https://dev-123456.oktapreview.com
|
||||
clientID: aaaabbbbccccddddeee
|
||||
clientSecret: $oidc.okta.clientSecret
|
||||
|
||||
# Optional list of allowed aud claims. If omitted or empty, defaults to the clientID value above (and the
|
||||
# cliCientID, if that is also specified). If you specify a list and want the clientID to be allowed, you must
|
||||
# explicitly include it in the list.
|
||||
# Token verification will pass if any of the token's audiences matches any of the audiences in this list.
|
||||
allowedAudiences:
|
||||
- aaaabbbbccccddddeee
|
||||
- qqqqwwwweeeerrrrttt
|
||||
|
||||
# Optional. If false, tokens without an audience will always fail validation. If true, tokens without an audience
|
||||
# will always pass validation.
|
||||
# Defaults to true for Argo CD < 2.6.0. Defaults to false for Argo CD >= 2.6.0.
|
||||
skipAudienceCheckWhenTokenHasNoAudience: true
|
||||
|
||||
# Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"]
|
||||
requestedScopes: ["openid", "profile", "email", "groups"]
|
||||
|
||||
@@ -80,7 +80,7 @@ data:
|
||||
- '.webhooks[]?.clientConfig.caBundle'
|
||||
```
|
||||
|
||||
Resource customization can also be configured to ignore all differences made by a managedField.manager at the system level. The example bellow shows how to configure ArgoCD to ignore changes made by `kube-controller-manager` in `Deployment` resources.
|
||||
Resource customization can also be configured to ignore all differences made by a managedField.manager at the system level. The example below shows how to configure Argo CD to ignore changes made by `kube-controller-manager` in `Deployment` resources.
|
||||
|
||||
```yaml
|
||||
data:
|
||||
@@ -89,7 +89,7 @@ data:
|
||||
- kube-controller-manager
|
||||
```
|
||||
|
||||
It is possible to configure ignoreDifferences to be applied to all resources in every Application managed by an ArgoCD instance. In order to do so, resource customizations can be configured like in the example bellow:
|
||||
It is possible to configure ignoreDifferences to be applied to all resources in every Application managed by an Argo CD instance. In order to do so, resource customizations can be configured like in the example below:
|
||||
|
||||
```yaml
|
||||
data:
|
||||
|
||||
@@ -43,6 +43,9 @@ spec:
|
||||
recurse: true
|
||||
```
|
||||
|
||||
!!! warning
|
||||
Directory-type applications only work for plain manifest files. If Argo CD encounters Kustomize, Helm, or Jsonnet files when directory: is set, it will fail to render the manifests.
|
||||
|
||||
## Including/Excluding Files
|
||||
|
||||
### Including Only Certain Files
|
||||
|
||||
@@ -282,7 +282,7 @@ Helm, [starting with v3.6.1](https://github.com/helm/helm/releases/tag/v3.6.1),
|
||||
prevents sending repository credentials to download charts that are being served
|
||||
from a different domain than the repository.
|
||||
|
||||
If needed, it is possible to specifically set the Helm version to template with by setting the `helm-pass-credentials` flag on the cli:
|
||||
If needed, it is possible to opt into passing credentials for all domains by setting the `helm-pass-credentials` flag on the cli:
|
||||
|
||||
```bash
|
||||
argocd app set helm-guestbook --helm-pass-credentials
|
||||
|
||||
@@ -69,7 +69,7 @@ spec:
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
targetRevision: HEAD
|
||||
path: guestbook-kustomize
|
||||
path: kustomize-guestbook
|
||||
|
||||
kustomize:
|
||||
version: v3.5.4
|
||||
@@ -84,4 +84,4 @@ argocd app set <appyName> --kustomize-version v3.5.4
|
||||
|
||||
## Build Environment
|
||||
|
||||
Kustomize does not support parameters and therefore cannot support the standard [build environment](build-environment.md).
|
||||
Kustomize apps have access to the [standard build environment](build-environment.md) which can be used in combination with a [config managment plugin](config-management-plugins.md) to alter the rendered manifests.
|
||||
|
||||
@@ -40,8 +40,8 @@ metadata:
|
||||
argocd.argoproj.io/sync-options: Validate=false
|
||||
```
|
||||
|
||||
If you want to exclude a whole class of objects globally, consider setting `resource.customizations` in [system level configuration](../user-guide/diffing.md#system-level-configuration).
|
||||
|
||||
If you want to exclude a whole class of objects globally, consider setting `resource.customizations` in [system level configuration](../user-guide/diffing.md#system-level-configuration).
|
||||
|
||||
## Skip Dry Run for new custom resources types
|
||||
|
||||
>v1.6
|
||||
@@ -64,9 +64,9 @@ The dry run will still be executed if the CRD is already present in the cluster.
|
||||
|
||||
## Selective Sync
|
||||
|
||||
Currently when syncing using auto sync ArgoCD applies every object in the application.
|
||||
Currently when syncing using auto sync Argo CD applies every object in the application.
|
||||
For applications containing thousands of objects this takes quite a long time and puts undue pressure on the api server.
|
||||
Turning on selective sync option which will sync only out-of-sync resources.
|
||||
Turning on selective sync option which will sync only out-of-sync resources.
|
||||
|
||||
You can add this option by following ways
|
||||
|
||||
@@ -81,7 +81,7 @@ spec:
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- ApplyOutOfSyncOnly=true
|
||||
```
|
||||
```
|
||||
|
||||
2) Set sync option via argocd cli
|
||||
|
||||
@@ -108,8 +108,8 @@ spec:
|
||||
|
||||
## Prune Last
|
||||
|
||||
This feature is to allow the ability for resource pruning to happen as a final, implicit wave of a sync operation,
|
||||
after the other resources have been deployed and become healthy, and after all other waves completed successfully.
|
||||
This feature is to allow the ability for resource pruning to happen as a final, implicit wave of a sync operation,
|
||||
after the other resources have been deployed and become healthy, and after all other waves completed successfully.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -146,6 +146,10 @@ spec:
|
||||
|
||||
If the `Replace=true` sync option is set the ArgoCD will use `kubectl replace` or `kubectl create` command to apply changes.
|
||||
|
||||
!!! warning
|
||||
During the sync process, the resources will be synchronized using the 'kubectl replace/create' command.
|
||||
This sync option has the potential to be destructive and might lead to resources having to be recreated, which could cause an outage for your application.
|
||||
|
||||
This can also be configured at individual resource level.
|
||||
```yaml
|
||||
metadata:
|
||||
@@ -168,7 +172,7 @@ spec:
|
||||
|
||||
## Respect ignore difference configs
|
||||
|
||||
This sync option is used to enable ArgoCD to consider the configurations made in the `spec.ignoreDifferences` attribute also during the sync stage. By default, ArgoCD uses the `ignoreDifferences` config just for computing the diff between the live and desired state which defines if the application is synced or not. However during the sync stage, the desired state is applied as-is. The patch is calculated using a 3-way-merge between the live state the desired state and the `last-applied-configuration` annotation. This sometimes leads to an undesired results. This behavior can be changed by setting the `RespectIgnoreDifferences=true` sync option like in the example bellow:
|
||||
This sync option is used to enable Argo CD to consider the configurations made in the `spec.ignoreDifferences` attribute also during the sync stage. By default, Argo CD uses the `ignoreDifferences` config just for computing the diff between the live and desired state which defines if the application is synced or not. However during the sync stage, the desired state is applied as-is. The patch is calculated using a 3-way-merge between the live state the desired state and the `last-applied-configuration` annotation. This sometimes leads to an undesired results. This behavior can be changed by setting the `RespectIgnoreDifferences=true` sync option like in the example below:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -194,10 +198,16 @@ The example above shows how an ArgoCD Application can be configured so it will i
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
namespace: test
|
||||
namespace: argocd
|
||||
spec:
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: some-namespace
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
```
|
||||
The example above shows how an Argo CD Application can be configured so it will create namespaces for the Application resources if the namespaces don't exist already. Without this either declared in the Application manifest or passed in the cli via `--sync-option CreateNamespace=true`, the Application will fail to sync if the resources' namespaces do not exist.
|
||||
|
||||
The example above shows how an Argo CD Application can be configured so it will create the namespace specified in `spec.destination.namespace` if it doesn't exist already. Without this either declared in the Application manifest or passed in the CLI via `--sync-option CreateNamespace=true`, the Application will fail to sync if the namespace doesn't exist.
|
||||
|
||||
Note that the namespace to be created must be informed in the `spec.destination.namespace` field of the Application resource. The `metadata.namespace` field in the Application's child manifests must match this value, or can be omitted, so resources are created in the proper destination.
|
||||
|
||||
@@ -20,6 +20,8 @@ For Helm, all versions are [Semantic Versions](https://semver.org/). As a result
|
||||
| Track minor releases (e.g. in QA) | Use a range | `1.*` or `>=1.0.0 <2.0.0` |
|
||||
| Use the latest (e.g. in local development) | Use star range | `*` or `>=0.0.0` |
|
||||
|
||||
**Note for OCI Helm repositories**: the only available strategy is "Pin to a version".
|
||||
|
||||
[Read about version ranges](https://www.telerik.com/blogs/the-mystical-magical-semver-ranges-used-by-npm-bower)
|
||||
|
||||
## Git
|
||||
|
||||
9
go.mod
9
go.mod
@@ -54,7 +54,7 @@ require (
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180306154005-525d0eb5f91d // indirect
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_golang v1.11.1
|
||||
github.com/r3labs/diff v1.1.0
|
||||
github.com/robfig/cron v1.2.0
|
||||
github.com/rs/cors v1.8.0 // indirect
|
||||
@@ -200,7 +200,7 @@ require (
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.2.2 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.2.2
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/apiserver v0.23.1
|
||||
@@ -216,6 +216,8 @@ require (
|
||||
)
|
||||
|
||||
replace (
|
||||
// Address CVE-2021-4238
|
||||
github.com/Masterminds/goutils => github.com/Masterminds/goutils v1.1.1
|
||||
// https://github.com/golang/go/issues/33546#issuecomment-519656923
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
|
||||
@@ -226,6 +228,9 @@ replace (
|
||||
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.15.0
|
||||
|
||||
// Avoid CVE-2022-3064
|
||||
gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.2.4
|
||||
|
||||
// Avoid CVE-2022-28948
|
||||
gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
|
||||
|
||||
|
||||
17
go.sum
17
go.sum
@@ -73,8 +73,8 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
|
||||
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||
@@ -833,8 +833,9 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -1539,16 +1540,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
|
||||
@@ -28,7 +28,7 @@ spec:
|
||||
name: dexconfig
|
||||
containers:
|
||||
- name: dex
|
||||
image: ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
image: ghcr.io/dexidp/dex:v2.35.3
|
||||
imagePullPolicy: Always
|
||||
command: [/shared/argocd-dex, rundex]
|
||||
securityContext:
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.3.10
|
||||
newTag: v2.3.17
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -21,7 +21,7 @@ spec:
|
||||
serviceAccountName: argocd-redis
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--save"
|
||||
|
||||
@@ -9564,7 +9564,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -9698,7 +9698,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -9747,7 +9747,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -9912,7 +9912,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -11,4 +11,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.3.10
|
||||
newTag: v2.3.17
|
||||
|
||||
@@ -11,7 +11,7 @@ patchesStrategicMerge:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.3.10
|
||||
newTag: v2.3.17
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/dex
|
||||
|
||||
@@ -878,7 +878,7 @@ spec:
|
||||
automountServiceAccountToken: false
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
{}
|
||||
@@ -906,7 +906,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-server
|
||||
@@ -947,7 +947,7 @@ spec:
|
||||
lifecycle:
|
||||
{}
|
||||
- name: sentinel
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-sentinel
|
||||
|
||||
@@ -15,6 +15,6 @@ redis-ha:
|
||||
client: 6m
|
||||
checkInterval: 3s
|
||||
image:
|
||||
tag: 6.2.7-alpine
|
||||
tag: 6.2.8-alpine
|
||||
sentinel:
|
||||
bind: "0.0.0.0"
|
||||
|
||||
@@ -10494,7 +10494,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
image: ghcr.io/dexidp/dex:v2.35.3
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -10516,7 +10516,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -10549,7 +10549,7 @@ spec:
|
||||
containers:
|
||||
- command:
|
||||
- argocd-notifications
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -10788,7 +10788,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -10837,7 +10837,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -11064,7 +11064,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -11260,7 +11260,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -11342,7 +11342,7 @@ spec:
|
||||
- /data/conf/redis.conf
|
||||
command:
|
||||
- redis-server
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -11380,7 +11380,7 @@ spec:
|
||||
- /data/conf/sentinel.conf
|
||||
command:
|
||||
- redis-sentinel
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -11426,7 +11426,7 @@ spec:
|
||||
value: 40000915ab58c3fa8fd888fb8b24711944e6cbb4
|
||||
- name: SENTINEL_ID_2
|
||||
value: 2bbec7894d954a8af3bb54d13eaec53cb024e2ca
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
|
||||
@@ -7790,7 +7790,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
image: ghcr.io/dexidp/dex:v2.35.3
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -7812,7 +7812,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -7845,7 +7845,7 @@ spec:
|
||||
containers:
|
||||
- command:
|
||||
- argocd-notifications
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -8084,7 +8084,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -8133,7 +8133,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -8360,7 +8360,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -8556,7 +8556,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -8638,7 +8638,7 @@ spec:
|
||||
- /data/conf/redis.conf
|
||||
command:
|
||||
- redis-server
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -8676,7 +8676,7 @@ spec:
|
||||
- /data/conf/sentinel.conf
|
||||
command:
|
||||
- redis-sentinel
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
@@ -8722,7 +8722,7 @@ spec:
|
||||
value: 40000915ab58c3fa8fd888fb8b24711944e6cbb4
|
||||
- name: SENTINEL_ID_2
|
||||
value: 2bbec7894d954a8af3bb54d13eaec53cb024e2ca
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
volumeMounts:
|
||||
|
||||
@@ -9864,7 +9864,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
image: ghcr.io/dexidp/dex:v2.35.3
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -9886,7 +9886,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -9919,7 +9919,7 @@ spec:
|
||||
containers:
|
||||
- command:
|
||||
- argocd-notifications
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -9988,7 +9988,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -10122,7 +10122,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -10171,7 +10171,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -10394,7 +10394,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -10584,7 +10584,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -7160,7 +7160,7 @@ spec:
|
||||
- command:
|
||||
- /shared/argocd-dex
|
||||
- rundex
|
||||
image: ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
image: ghcr.io/dexidp/dex:v2.35.3
|
||||
imagePullPolicy: Always
|
||||
name: dex
|
||||
ports:
|
||||
@@ -7182,7 +7182,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
@@ -7215,7 +7215,7 @@ spec:
|
||||
containers:
|
||||
- command:
|
||||
- argocd-notifications
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -7284,7 +7284,7 @@ spec:
|
||||
- ""
|
||||
- --appendonly
|
||||
- "no"
|
||||
image: redis:6.2.7-alpine
|
||||
image: redis:6.2.8-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -7418,7 +7418,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -7467,7 +7467,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
name: copyutil
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/argocd
|
||||
@@ -7690,7 +7690,7 @@ spec:
|
||||
key: server.http.cookie.maxnumber
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -7880,7 +7880,7 @@ spec:
|
||||
key: controller.default.cache.expiration
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.3.10
|
||||
image: quay.io/argoproj/argocd:v2.3.17
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -648,7 +648,7 @@ func helmTemplate(appPath string, repoRoot string, env *v1alpha1.Env, q *apiclie
|
||||
for _, val := range appHelm.ValueFiles {
|
||||
|
||||
// This will resolve val to an absolute path (or an URL)
|
||||
path, isRemote, err := pathutil.ResolveFilePath(appPath, repoRoot, val, q.GetValuesFileSchemes())
|
||||
path, isRemote, err := pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, val, q.GetValuesFileSchemes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -688,7 +688,7 @@ func helmTemplate(appPath string, repoRoot string, env *v1alpha1.Env, q *apiclie
|
||||
}
|
||||
}
|
||||
for _, p := range appHelm.FileParameters {
|
||||
resolvedPath, _, err := pathutil.ResolveFilePath(appPath, repoRoot, env.Envsubst(p.Path), q.GetValuesFileSchemes())
|
||||
resolvedPath, _, err := pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, env.Envsubst(p.Path), q.GetValuesFileSchemes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -828,6 +828,10 @@ func GenerateManifests(ctx context.Context, appPath, repoRoot, revision string,
|
||||
|
||||
manifests := make([]string, 0)
|
||||
for _, obj := range targetObjs {
|
||||
if obj == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var targets []*unstructured.Unstructured
|
||||
if obj.IsList() {
|
||||
err = obj.EachListItem(func(object runtime.Object) error {
|
||||
@@ -1288,7 +1292,7 @@ func makeJsonnetVm(appPath string, repoRoot string, sourceJsonnet v1alpha1.Appli
|
||||
jpaths := []string{appPath}
|
||||
for _, p := range sourceJsonnet.Libs {
|
||||
// the jsonnet library path is relative to the repository root, not application path
|
||||
jpath, _, err := pathutil.ResolveFilePath(repoRoot, repoRoot, p, nil)
|
||||
jpath, err := pathutil.ResolveFileOrDirectoryPath(repoRoot, repoRoot, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1563,7 +1567,7 @@ func populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse, appPath strin
|
||||
return err
|
||||
}
|
||||
|
||||
if resolvedValuesPath, _, err := pathutil.ResolveFilePath(appPath, repoRoot, "values.yaml", []string{}); err == nil {
|
||||
if resolvedValuesPath, _, err := pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, "values.yaml", []string{}); err == nil {
|
||||
if err := loadFileIntoIfExists(resolvedValuesPath, &res.Helm.Values); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1573,7 +1577,7 @@ func populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse, appPath strin
|
||||
var resolvedSelectedValueFiles []pathutil.ResolvedFilePath
|
||||
// drop not allowed values files
|
||||
for _, file := range selectedValueFiles {
|
||||
if resolvedFile, _, err := pathutil.ResolveFilePath(appPath, repoRoot, file, q.GetValuesFileSchemes()); err == nil {
|
||||
if resolvedFile, _, err := pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, file, q.GetValuesFileSchemes()); err == nil {
|
||||
resolvedSelectedValueFiles = append(resolvedSelectedValueFiles, resolvedFile)
|
||||
} else {
|
||||
log.Warnf("Values file %s is not allowed: %v", file, err)
|
||||
|
||||
@@ -363,6 +363,27 @@ func TestGenerateJsonnetManifestInDir(t *testing.T) {
|
||||
assert.Equal(t, 2, len(res1.Manifests))
|
||||
}
|
||||
|
||||
func TestGenerateJsonnetManifestInRootDir(t *testing.T) {
|
||||
service := newService("testdata/jsonnet-1")
|
||||
|
||||
q := apiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{},
|
||||
ApplicationSource: &argoappv1.ApplicationSource{
|
||||
Path: ".",
|
||||
Directory: &argoappv1.ApplicationSourceDirectory{
|
||||
Jsonnet: argoappv1.ApplicationSourceJsonnet{
|
||||
ExtVars: []argoappv1.JsonnetVar{{Name: "extVarString", Value: "extVarString"}, {Name: "extVarCode", Value: "\"extVarCode\"", Code: true}},
|
||||
TLAs: []argoappv1.JsonnetVar{{Name: "tlaString", Value: "tlaString"}, {Name: "tlaCode", Value: "\"tlaCode\"", Code: true}},
|
||||
Libs: []string{"."},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
res1, err := service.GenerateManifest(context.Background(), &q)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(res1.Manifests))
|
||||
}
|
||||
|
||||
func TestGenerateJsonnetLibOutside(t *testing.T) {
|
||||
service := newService(".")
|
||||
|
||||
@@ -379,7 +400,7 @@ func TestGenerateJsonnetLibOutside(t *testing.T) {
|
||||
}
|
||||
_, err := service.GenerateManifest(context.Background(), &q)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "value file '../../../testdata/jsonnet/vendor' resolved to outside repository root")
|
||||
require.Contains(t, err.Error(), "file '../../../testdata/jsonnet/vendor' resolved to outside repository root")
|
||||
}
|
||||
|
||||
func TestGenerateKsonnetManifest(t *testing.T) {
|
||||
|
||||
47
reposerver/repository/testdata/jsonnet-1/guestbook-ui.jsonnet
vendored
Normal file
47
reposerver/repository/testdata/jsonnet-1/guestbook-ui.jsonnet
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
local service = import 'vendor/nested/service.libsonnet';
|
||||
local params = import 'params.libsonnet';
|
||||
|
||||
function(tlaString, tlaCode)
|
||||
[
|
||||
service.new(params),
|
||||
{
|
||||
apiVersion: 'apps/v1beta2',
|
||||
kind: 'Deployment',
|
||||
metadata: {
|
||||
name: params.name,
|
||||
},
|
||||
spec: {
|
||||
replicas: params.replicas,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: params.name,
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: params.name,
|
||||
tlaString: tlaString,
|
||||
tlaCode: tlaCode,
|
||||
extVarString: std.extVar('extVarString'),
|
||||
extVarCode: std.extVar('extVarCode'),
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: params.image,
|
||||
name: params.name,
|
||||
ports: [
|
||||
{
|
||||
containerPort: params.containerPort,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
]
|
||||
8
reposerver/repository/testdata/jsonnet-1/params.libsonnet
vendored
Normal file
8
reposerver/repository/testdata/jsonnet-1/params.libsonnet
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
containerPort: 80,
|
||||
image: "gcr.io/heptio-images/ks-guestbook-demo:0.2",
|
||||
name: "guestbook-ui",
|
||||
replicas: 1,
|
||||
servicePort: 80,
|
||||
type: "ClusterIP",
|
||||
}
|
||||
23
reposerver/repository/testdata/jsonnet-1/vendor/nested/service.libsonnet
vendored
Normal file
23
reposerver/repository/testdata/jsonnet-1/vendor/nested/service.libsonnet
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
local new(params) = {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: params.name,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: params.servicePort,
|
||||
targetPort: params.containerPort,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: params.name,
|
||||
},
|
||||
type: params.type,
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
new:: new,
|
||||
}
|
||||
@@ -43,4 +43,5 @@ function(tlaString, tlaCode)
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
]
|
||||
|
||||
@@ -3,17 +3,21 @@ if obj.status ~= nil then
|
||||
if obj.status.conditions ~= nil then
|
||||
for i, condition in ipairs(obj.status.conditions) do
|
||||
health_status.message = condition.message
|
||||
if condition.reason == "Successful" then
|
||||
if condition.type == "Successful" and condition.status == "True" then
|
||||
health_status.status = "Healthy"
|
||||
elseif condition.reason == "Running" then
|
||||
health_status.status = "Progressing"
|
||||
else
|
||||
health_status.status = "Degraded"
|
||||
return health_status
|
||||
end
|
||||
if condition.type == "Failure" and condition.status == "True" then
|
||||
health_status.status = "Degraded"
|
||||
return health_status
|
||||
end
|
||||
if condition.type == "Running" and condition.reason == "Running" then
|
||||
health_status.status = "Progressing"
|
||||
return health_status
|
||||
end
|
||||
return health_status
|
||||
end
|
||||
end
|
||||
end
|
||||
health_status.status = "Progressing"
|
||||
health_status.message = "Waiting for Kiali"
|
||||
return health_status
|
||||
return health_status
|
||||
|
||||
@@ -9,5 +9,5 @@ tests:
|
||||
inputPath: testdata/degraded.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "Awaiting next reconciliation"
|
||||
message: "Last reconciliation succeeded"
|
||||
inputPath: testdata/healthy.yaml
|
||||
|
||||
@@ -14,14 +14,24 @@ metadata:
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- ansibleResult:
|
||||
changed: 1
|
||||
completion: 2020-06-08T13:41:20.133525
|
||||
failures: 0
|
||||
ok: 56
|
||||
skipped: 82
|
||||
lastTransitionTime: "2020-06-04T17:47:31Z"
|
||||
message: Error Reconciling
|
||||
reason: null
|
||||
status: "True"
|
||||
type: Running
|
||||
- lastTransitionTime: '2022-10-19T09:44:32Z'
|
||||
message: ''
|
||||
reason: ''
|
||||
status: 'False'
|
||||
type: Failure
|
||||
- ansibleResult:
|
||||
changed: 18
|
||||
completion: '2022-10-19T09:44:32.289505'
|
||||
failures: 0
|
||||
ok: 101
|
||||
skipped: 101
|
||||
lastTransitionTime: '2022-10-19T09:43:39Z'
|
||||
message: Awaiting next reconciliation
|
||||
reason: Successful
|
||||
status: 'True'
|
||||
type: Running
|
||||
- lastTransitionTime: '2022-10-19T09:44:32Z'
|
||||
message: Error Reconciling
|
||||
reason: Failure
|
||||
status: 'True'
|
||||
type: Failure
|
||||
|
||||
@@ -14,14 +14,24 @@ metadata:
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- ansibleResult:
|
||||
changed: 1
|
||||
completion: 2020-06-08T13:41:20.133525
|
||||
failures: 0
|
||||
ok: 56
|
||||
skipped: 82
|
||||
lastTransitionTime: "2020-06-04T17:47:31Z"
|
||||
message: Awaiting next reconciliation
|
||||
reason: Successful
|
||||
status: "True"
|
||||
type: Running
|
||||
- lastTransitionTime: '2022-10-19T09:44:32Z'
|
||||
message: ''
|
||||
reason: ''
|
||||
status: 'False'
|
||||
type: Failure
|
||||
- ansibleResult:
|
||||
changed: 18
|
||||
completion: '2022-10-19T09:44:32.289505'
|
||||
failures: 0
|
||||
ok: 101
|
||||
skipped: 101
|
||||
lastTransitionTime: '2022-10-19T09:43:39Z'
|
||||
message: Awaiting next reconciliation
|
||||
reason: Successful
|
||||
status: 'True'
|
||||
type: Running
|
||||
- lastTransitionTime: '2022-10-19T09:44:32Z'
|
||||
message: Last reconciliation succeeded
|
||||
reason: Successful
|
||||
status: 'True'
|
||||
type: Successful
|
||||
|
||||
@@ -1,4 +1,55 @@
|
||||
health_status = {}
|
||||
-- Can't use standard lib, math.huge equivalent
|
||||
infinity = 2^1024-1
|
||||
|
||||
local function executor_range_api()
|
||||
min_executor_instances = 0
|
||||
max_executor_instances = infinity
|
||||
if obj.spec.dynamicAllocation.maxExecutors then
|
||||
max_executor_instances = obj.spec.dynamicAllocation.maxExecutors
|
||||
end
|
||||
if obj.spec.dynamicAllocation.minExecutors then
|
||||
min_executor_instances = obj.spec.dynamicAllocation.minExecutors
|
||||
end
|
||||
return min_executor_instances, max_executor_instances
|
||||
end
|
||||
|
||||
local function maybe_executor_range_spark_conf()
|
||||
min_executor_instances = 0
|
||||
max_executor_instances = infinity
|
||||
if obj.spec.sparkConf["spark.streaming.dynamicAllocation.enabled"] ~= nil and
|
||||
obj.spec.sparkConf["spark.streaming.dynamicAllocation.enabled"] == "true" then
|
||||
if(obj.spec.sparkConf["spark.streaming.dynamicAllocation.maxExecutors"] ~= nil) then
|
||||
max_executor_instances = tonumber(obj.spec.sparkConf["spark.streaming.dynamicAllocation.maxExecutors"])
|
||||
end
|
||||
if(obj.spec.sparkConf["spark.streaming.dynamicAllocation.minExecutors"] ~= nil) then
|
||||
min_executor_instances = tonumber(obj.spec.sparkConf["spark.streaming.dynamicAllocation.minExecutors"])
|
||||
end
|
||||
return min_executor_instances, max_executor_instances
|
||||
elseif obj.spec.sparkConf["spark.dynamicAllocation.enabled"] ~= nil and
|
||||
obj.spec.sparkConf["spark.dynamicAllocation.enabled"] == "true" then
|
||||
if(obj.spec.sparkConf["spark.dynamicAllocation.maxExecutors"] ~= nil) then
|
||||
max_executor_instances = tonumber(obj.spec.sparkConf["spark.dynamicAllocation.maxExecutors"])
|
||||
end
|
||||
if(obj.spec.sparkConf["spark.dynamicAllocation.minExecutors"] ~= nil) then
|
||||
min_executor_instances = tonumber(obj.spec.sparkConf["spark.dynamicAllocation.minExecutors"])
|
||||
end
|
||||
return min_executor_instances, max_executor_instances
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
local function maybe_executor_range()
|
||||
if obj.spec["dynamicAllocation"] and obj.spec.dynamicAllocation.enabled then
|
||||
return executor_range_api()
|
||||
elseif obj.spec["sparkConf"] ~= nil then
|
||||
return maybe_executor_range_spark_conf()
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
if obj.status ~= nil then
|
||||
if obj.status.applicationState.state ~= nil then
|
||||
if obj.status.applicationState.state == "" then
|
||||
@@ -19,6 +70,13 @@ if obj.status ~= nil then
|
||||
health_status.status = "Healthy"
|
||||
health_status.message = "SparkApplication is Running"
|
||||
return health_status
|
||||
elseif maybe_executor_range() then
|
||||
min_executor_instances, max_executor_instances = maybe_executor_range()
|
||||
if count >= min_executor_instances and count <= max_executor_instances then
|
||||
health_status.status = "Healthy"
|
||||
health_status.message = "SparkApplication is Running"
|
||||
return health_status
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -72,4 +130,4 @@ if obj.status ~= nil then
|
||||
end
|
||||
health_status.status = "Progressing"
|
||||
health_status.message = "Waiting for Executor pods"
|
||||
return health_status
|
||||
return health_status
|
||||
|
||||
@@ -11,3 +11,15 @@ tests:
|
||||
status: Healthy
|
||||
message: "SparkApplication is Running"
|
||||
inputPath: testdata/healthy.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "SparkApplication is Running"
|
||||
inputPath: testdata/healthy_dynamic_alloc.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "SparkApplication is Running"
|
||||
inputPath: testdata/healthy_dynamic_alloc_dstream.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "SparkApplication is Running"
|
||||
inputPath: testdata/healthy_dynamic_alloc_operator_api.yaml
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
apiVersion: sparkoperator.k8s.io/v1beta2
|
||||
kind: SparkApplication
|
||||
metadata:
|
||||
generation: 4
|
||||
labels:
|
||||
argocd.argoproj.io/instance: spark-job
|
||||
name: spark-job-app
|
||||
namespace: spark-cluster
|
||||
resourceVersion: "31812990"
|
||||
uid: bfee52b0-74ca-4465-8005-f6643097ed64
|
||||
spec:
|
||||
executor:
|
||||
instances: 4
|
||||
sparkConf:
|
||||
spark.dynamicAllocation.enabled: 'true'
|
||||
spark.dynamicAllocation.maxExecutors: '10'
|
||||
spark.dynamicAllocation.minExecutors: '2'
|
||||
status:
|
||||
applicationState:
|
||||
state: RUNNING
|
||||
driverInfo:
|
||||
podName: ingestion-datalake-news-app-driver
|
||||
webUIAddress: 172.20.207.161:4040
|
||||
webUIPort: 4040
|
||||
webUIServiceName: ingestion-datalake-news-app-ui-svc
|
||||
executionAttempts: 13
|
||||
executorState:
|
||||
ingestion-datalake-news-app-1591613851251-exec-1: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-2: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-4: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-5: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-6: RUNNING
|
||||
lastSubmissionAttemptTime: "2020-06-08T10:57:32Z"
|
||||
sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82
|
||||
submissionAttempts: 1
|
||||
submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0
|
||||
terminationTime: null
|
||||
@@ -0,0 +1,35 @@
|
||||
apiVersion: sparkoperator.k8s.io/v1beta2
|
||||
kind: SparkApplication
|
||||
metadata:
|
||||
generation: 4
|
||||
labels:
|
||||
argocd.argoproj.io/instance: spark-job
|
||||
name: spark-job-app
|
||||
namespace: spark-cluster
|
||||
resourceVersion: "31812990"
|
||||
uid: bfee52b0-74ca-4465-8005-f6643097ed64
|
||||
spec:
|
||||
executor:
|
||||
instances: 4
|
||||
sparkConf:
|
||||
spark.streaming.dynamicAllocation.enabled: 'true'
|
||||
spark.streaming.dynamicAllocation.maxExecutors: '10'
|
||||
spark.streaming.dynamicAllocation.minExecutors: '2'
|
||||
status:
|
||||
applicationState:
|
||||
state: RUNNING
|
||||
driverInfo:
|
||||
podName: ingestion-datalake-news-app-driver
|
||||
webUIAddress: 172.20.207.161:4040
|
||||
webUIPort: 4040
|
||||
webUIServiceName: ingestion-datalake-news-app-ui-svc
|
||||
executionAttempts: 13
|
||||
executorState:
|
||||
ingestion-datalake-news-app-1591613851251-exec-1: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-4: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-6: RUNNING
|
||||
lastSubmissionAttemptTime: "2020-06-08T10:57:32Z"
|
||||
sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82
|
||||
submissionAttempts: 1
|
||||
submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0
|
||||
terminationTime: null
|
||||
@@ -0,0 +1,38 @@
|
||||
apiVersion: sparkoperator.k8s.io/v1beta2
|
||||
kind: SparkApplication
|
||||
metadata:
|
||||
generation: 4
|
||||
labels:
|
||||
argocd.argoproj.io/instance: spark-job
|
||||
name: spark-job-app
|
||||
namespace: spark-cluster
|
||||
resourceVersion: "31812990"
|
||||
uid: bfee52b0-74ca-4465-8005-f6643097ed64
|
||||
spec:
|
||||
executor:
|
||||
instances: 4
|
||||
dynamicAllocation:
|
||||
enabled: true
|
||||
initialExecutors: 2
|
||||
minExecutors: 2
|
||||
maxExecutors: 10
|
||||
status:
|
||||
applicationState:
|
||||
state: RUNNING
|
||||
driverInfo:
|
||||
podName: ingestion-datalake-news-app-driver
|
||||
webUIAddress: 172.20.207.161:4040
|
||||
webUIPort: 4040
|
||||
webUIServiceName: ingestion-datalake-news-app-ui-svc
|
||||
executionAttempts: 13
|
||||
executorState:
|
||||
ingestion-datalake-news-app-1591613851251-exec-1: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-2: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-4: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-5: RUNNING
|
||||
ingestion-datalake-news-app-1591613851251-exec-6: RUNNING
|
||||
lastSubmissionAttemptTime: "2020-06-08T10:57:32Z"
|
||||
sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82
|
||||
submissionAttempts: 1
|
||||
submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0
|
||||
terminationTime: null
|
||||
@@ -1,6 +1,7 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
servercache "github.com/argoproj/argo-cd/v2/server/cache"
|
||||
@@ -133,7 +135,7 @@ func (s *Server) Get(ctx context.Context, q *cluster.ClusterQuery) (*appv1.Clust
|
||||
func (s *Server) getClusterWith403IfNotExist(ctx context.Context, q *cluster.ClusterQuery) (*appv1.Cluster, error) {
|
||||
repo, err := s.getCluster(ctx, q)
|
||||
if err != nil || repo == nil {
|
||||
return nil, status.Error(codes.PermissionDenied, "permission denied")
|
||||
return nil, common.PermissionDeniedAPIError
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
@@ -144,6 +146,12 @@ func (s *Server) getCluster(ctx context.Context, q *cluster.ClusterQuery) (*appv
|
||||
q.Name = ""
|
||||
if q.Id.Type == "name" {
|
||||
q.Name = q.Id.Value
|
||||
} else if q.Id.Type == "name_escaped" {
|
||||
nameUnescaped, err := url.QueryUnescape(q.Id.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q.Name = nameUnescaped
|
||||
} else {
|
||||
q.Server = q.Id.Value
|
||||
}
|
||||
@@ -213,14 +221,14 @@ func (s *Server) Update(ctx context.Context, q *cluster.ClusterUpdateRequest) (*
|
||||
}
|
||||
|
||||
// verify that user can do update inside project where cluster is located
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionUpdate, createRBACObject(c.Project, q.Cluster.Server)); err != nil {
|
||||
return nil, err
|
||||
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionUpdate, createRBACObject(c.Project, c.Server)) {
|
||||
return nil, common.PermissionDeniedAPIError
|
||||
}
|
||||
|
||||
if len(q.UpdatedFields) == 0 || sets.NewString(q.UpdatedFields...).Has("project") {
|
||||
// verify that user can do update inside project where cluster will be located
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionUpdate, createRBACObject(q.Cluster.Project, q.Cluster.Server)); err != nil {
|
||||
return nil, err
|
||||
if !s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionUpdate, createRBACObject(q.Cluster.Project, c.Server)) {
|
||||
return nil, common.PermissionDeniedAPIError
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -43,6 +44,177 @@ func newNoopEnforcer() *rbac.Enforcer {
|
||||
return enf
|
||||
}
|
||||
|
||||
func TestUpdateCluster_RejectInvalidParams(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
request clusterapi.ClusterUpdateRequest
|
||||
}{
|
||||
{
|
||||
name: "allowed cluster URL in body, disallowed cluster URL in query",
|
||||
request: clusterapi.ClusterUpdateRequest{Cluster: &v1alpha1.Cluster{Name: "", Server: "https://127.0.0.1", Project: "", ClusterResources: true}, Id: &clusterapi.ClusterID{Type: "", Value: "https://127.0.0.2"}, UpdatedFields: []string{"clusterResources", "project"}},
|
||||
},
|
||||
{
|
||||
name: "allowed cluster URL in body, disallowed cluster name in query",
|
||||
request: clusterapi.ClusterUpdateRequest{Cluster: &v1alpha1.Cluster{Name: "", Server: "https://127.0.0.1", Project: "", ClusterResources: true}, Id: &clusterapi.ClusterID{Type: "name", Value: "disallowed-unscoped"}, UpdatedFields: []string{"clusterResources", "project"}},
|
||||
},
|
||||
{
|
||||
name: "allowed cluster URL in body, disallowed cluster name in query, changing unscoped to scoped",
|
||||
request: clusterapi.ClusterUpdateRequest{Cluster: &v1alpha1.Cluster{Name: "", Server: "https://127.0.0.1", Project: "allowed-project", ClusterResources: true}, Id: &clusterapi.ClusterID{Type: "", Value: "https://127.0.0.2"}, UpdatedFields: []string{"clusterResources", "project"}},
|
||||
},
|
||||
{
|
||||
name: "allowed cluster URL in body, disallowed cluster URL in query, changing unscoped to scoped",
|
||||
request: clusterapi.ClusterUpdateRequest{Cluster: &v1alpha1.Cluster{Name: "", Server: "https://127.0.0.1", Project: "allowed-project", ClusterResources: true}, Id: &clusterapi.ClusterID{Type: "name", Value: "disallowed-unscoped"}, UpdatedFields: []string{"clusterResources", "project"}},
|
||||
},
|
||||
}
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
clusters := []v1alpha1.Cluster{
|
||||
{
|
||||
Name: "allowed-unscoped",
|
||||
Server: "https://127.0.0.1",
|
||||
},
|
||||
{
|
||||
Name: "disallowed-unscoped",
|
||||
Server: "https://127.0.0.2",
|
||||
},
|
||||
{
|
||||
Name: "allowed-scoped",
|
||||
Server: "https://127.0.0.3",
|
||||
Project: "allowed-project",
|
||||
},
|
||||
{
|
||||
Name: "disallowed-scoped",
|
||||
Server: "https://127.0.0.4",
|
||||
Project: "disallowed-project",
|
||||
},
|
||||
}
|
||||
|
||||
db.On("ListClusters", mock.Anything).Return(
|
||||
func(ctx context.Context) *v1alpha1.ClusterList {
|
||||
return &v1alpha1.ClusterList{
|
||||
ListMeta: v1.ListMeta{},
|
||||
Items: clusters,
|
||||
}
|
||||
},
|
||||
func(ctx context.Context) error {
|
||||
return nil
|
||||
},
|
||||
)
|
||||
db.On("UpdateCluster", mock.Anything, mock.Anything).Return(
|
||||
func(ctx context.Context, c *v1alpha1.Cluster) *v1alpha1.Cluster {
|
||||
for _, cluster := range clusters {
|
||||
if c.Server == cluster.Server {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(ctx context.Context, c *v1alpha1.Cluster) error {
|
||||
for _, cluster := range clusters {
|
||||
if c.Server == cluster.Server {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("cluster '%s' not found", c.Server)
|
||||
},
|
||||
)
|
||||
db.On("GetCluster", mock.Anything, mock.Anything).Return(
|
||||
func(ctx context.Context, server string) *v1alpha1.Cluster {
|
||||
for _, cluster := range clusters {
|
||||
if server == cluster.Server {
|
||||
return &cluster
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(ctx context.Context, server string) error {
|
||||
for _, cluster := range clusters {
|
||||
if server == cluster.Server {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("cluster '%s' not found", server)
|
||||
},
|
||||
)
|
||||
|
||||
enf := rbac.NewEnforcer(fake.NewSimpleClientset(test.NewFakeConfigMap()), test.FakeArgoCDNamespace, common.ArgoCDConfigMapName, nil)
|
||||
_ = enf.SetBuiltinPolicy(`p, role:test, clusters, *, https://127.0.0.1, allow
|
||||
p, role:test, clusters, *, allowed-project/*, allow`)
|
||||
enf.SetDefaultRole("role:test")
|
||||
server := NewServer(db, enf, newServerInMemoryCache(), &kubetest.MockKubectlCmd{})
|
||||
|
||||
for _, c := range testCases {
|
||||
cc := c
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
out, err := server.Update(context.Background(), &cc.request)
|
||||
require.Nil(t, out)
|
||||
assert.ErrorIs(t, err, common.PermissionDeniedAPIError)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCluster_UrlEncodedName(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
mockCluster := v1alpha1.Cluster{
|
||||
Name: "test/ing",
|
||||
Server: "https://127.0.0.1",
|
||||
Namespaces: []string{"default", "kube-system"},
|
||||
}
|
||||
mockClusterList := v1alpha1.ClusterList{
|
||||
ListMeta: v1.ListMeta{},
|
||||
Items: []v1alpha1.Cluster{
|
||||
mockCluster,
|
||||
},
|
||||
}
|
||||
|
||||
db.On("ListClusters", mock.Anything).Return(&mockClusterList, nil)
|
||||
|
||||
server := NewServer(db, newNoopEnforcer(), newServerInMemoryCache(), &kubetest.MockKubectlCmd{})
|
||||
|
||||
cluster, err := server.Get(context.Background(), &clusterapi.ClusterQuery{
|
||||
Id: &clusterapi.ClusterID{
|
||||
Type: "name_escaped",
|
||||
Value: "test%2fing",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, cluster.Name, "test/ing")
|
||||
}
|
||||
|
||||
func TestGetCluster_NameWithUrlEncodingButShouldNotBeUnescaped(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
mockCluster := v1alpha1.Cluster{
|
||||
Name: "test%2fing",
|
||||
Server: "https://127.0.0.1",
|
||||
Namespaces: []string{"default", "kube-system"},
|
||||
}
|
||||
mockClusterList := v1alpha1.ClusterList{
|
||||
ListMeta: v1.ListMeta{},
|
||||
Items: []v1alpha1.Cluster{
|
||||
mockCluster,
|
||||
},
|
||||
}
|
||||
|
||||
db.On("ListClusters", mock.Anything).Return(&mockClusterList, nil)
|
||||
|
||||
server := NewServer(db, newNoopEnforcer(), newServerInMemoryCache(), &kubetest.MockKubectlCmd{})
|
||||
|
||||
cluster, err := server.Get(context.Background(), &clusterapi.ClusterQuery{
|
||||
Id: &clusterapi.ClusterID{
|
||||
Type: "name",
|
||||
Value: "test%2fing",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, cluster.Name, "test%2fing")
|
||||
}
|
||||
|
||||
func TestUpdateCluster_NoFieldsPaths(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
var updated *v1alpha1.Cluster
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/rbac"
|
||||
settings_util "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
testutil "github.com/argoproj/argo-cd/v2/util/test"
|
||||
)
|
||||
|
||||
func fakeServer() (*ArgoCDServer, func()) {
|
||||
@@ -500,7 +501,7 @@ func dexMockHandler(t *testing.T, url string) func(http.ResponseWriter, *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
func getTestServer(t *testing.T, anonymousEnabled bool, withFakeSSO bool) (argocd *ArgoCDServer, dexURL string) {
|
||||
func getTestServer(t *testing.T, anonymousEnabled bool, withFakeSSO bool, useDexForSSO bool) (argocd *ArgoCDServer, oidcURL string) {
|
||||
cm := test.NewFakeConfigMap()
|
||||
if anonymousEnabled {
|
||||
cm.Data["users.anonymous.enabled"] = "true"
|
||||
@@ -511,9 +512,14 @@ func getTestServer(t *testing.T, anonymousEnabled bool, withFakeSSO bool) (argoc
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
dexMockHandler(t, ts.URL)(w, r)
|
||||
})
|
||||
oidcServer := ts
|
||||
if !useDexForSSO {
|
||||
oidcServer = testutil.GetOIDCTestServer(t)
|
||||
}
|
||||
if withFakeSSO {
|
||||
cm.Data["url"] = ts.URL
|
||||
cm.Data["dex.config"] = `
|
||||
if useDexForSSO {
|
||||
cm.Data["dex.config"] = `
|
||||
connectors:
|
||||
# OIDC
|
||||
- type: OIDC
|
||||
@@ -523,6 +529,19 @@ connectors:
|
||||
issuer: https://auth.example.gom
|
||||
clientID: test-client
|
||||
clientSecret: $dex.oidc.clientSecret`
|
||||
} else {
|
||||
oidcConfig := settings_util.OIDCConfig{
|
||||
Name: "Okta",
|
||||
Issuer: oidcServer.URL,
|
||||
ClientID: "argo-cd",
|
||||
ClientSecret: "$oidc.okta.clientSecret",
|
||||
}
|
||||
oidcConfigString, err := yaml.Marshal(oidcConfig)
|
||||
require.NoError(t, err)
|
||||
cm.Data["oidc.config"] = string(oidcConfigString)
|
||||
// Avoid bothering with certs for local tests.
|
||||
cm.Data["oidc.tls.insecure.skip.verify"] = "true"
|
||||
}
|
||||
}
|
||||
secret := test.NewFakeSecret()
|
||||
kubeclientset := fake.NewSimpleClientset(cm, secret)
|
||||
@@ -532,27 +551,32 @@ connectors:
|
||||
KubeClientset: kubeclientset,
|
||||
AppClientset: appClientSet,
|
||||
}
|
||||
if withFakeSSO {
|
||||
if withFakeSSO && useDexForSSO {
|
||||
argoCDOpts.DexServerAddr = ts.URL
|
||||
}
|
||||
argocd = NewServer(context.Background(), argoCDOpts)
|
||||
return argocd, ts.URL
|
||||
return argocd, oidcServer.URL
|
||||
}
|
||||
|
||||
func TestAuthenticate_3rd_party_JWTs(t *testing.T) {
|
||||
// Marshaling single strings to strings is typical, so we test for this relatively common behavior.
|
||||
jwt.MarshalSingleStringAsArray = false
|
||||
|
||||
type testData struct {
|
||||
test string
|
||||
anonymousEnabled bool
|
||||
claims jwt.RegisteredClaims
|
||||
expectedErrorContains string
|
||||
expectedClaims interface{}
|
||||
useDex bool
|
||||
}
|
||||
var tests = []testData{
|
||||
// Dex
|
||||
{
|
||||
test: "anonymous disabled, no audience",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{},
|
||||
expectedErrorContains: "no audience found in the token",
|
||||
claims: jwt.RegisteredClaims{ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
@@ -565,31 +589,95 @@ func TestAuthenticate_3rd_party_JWTs(t *testing.T) {
|
||||
{
|
||||
test: "anonymous disabled, unexpired token, admin claim",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
expectedErrorContains: "id token signed with unsupported algorithm",
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, unexpired token, admin claim",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, expired token, admin claim",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now())},
|
||||
expectedErrorContains: "token is expired",
|
||||
expectedClaims: jwt.RegisteredClaims{Issuer:"sso"},
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now())},
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: jwt.RegisteredClaims{Issuer: "sso"},
|
||||
},
|
||||
{
|
||||
test: "anonymous enabled, expired token, admin claim",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"test-client"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now())},
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now())},
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "anonymous disabled, unexpired token, admin claim, incorrect audience",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"incorrect-audience"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
// External OIDC (not bundled Dex)
|
||||
{
|
||||
test: "external OIDC: anonymous disabled, no audience",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
useDex: true,
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "external OIDC: anonymous enabled, no audience",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.RegisteredClaims{},
|
||||
useDex: true,
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "external OIDC: anonymous disabled, unexpired token, admin claim",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
useDex: true,
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
test: "external OIDC: anonymous enabled, unexpired token, admin claim",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
useDex: true,
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "external OIDC: anonymous disabled, expired token, admin claim",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now())},
|
||||
useDex: true,
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: jwt.RegisteredClaims{Issuer: "sso"},
|
||||
},
|
||||
{
|
||||
test: "external OIDC: anonymous enabled, expired token, admin claim",
|
||||
anonymousEnabled: true,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{common.ArgoCDClientAppID}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now())},
|
||||
useDex: true,
|
||||
expectedErrorContains: "",
|
||||
expectedClaims: "",
|
||||
},
|
||||
{
|
||||
test: "external OIDC: anonymous disabled, unexpired token, admin claim, incorrect audience",
|
||||
anonymousEnabled: false,
|
||||
claims: jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"incorrect-audience"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))},
|
||||
useDex: true,
|
||||
expectedErrorContains: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testData := range tests {
|
||||
@@ -599,10 +687,15 @@ func TestAuthenticate_3rd_party_JWTs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Must be declared here to avoid race.
|
||||
ctx := context.Background() //nolint:ineffassign,staticcheck
|
||||
ctx := context.Background() //nolint:ineffassign,staticcheck
|
||||
|
||||
argocd, dexURL := getTestServer(t, testDataCopy.anonymousEnabled, true)
|
||||
testDataCopy.claims.Issuer = fmt.Sprintf("%s/api/dex", dexURL)
|
||||
argocd, oidcURL := getTestServer(t, testDataCopy.anonymousEnabled, true, testDataCopy.useDex)
|
||||
|
||||
if testDataCopy.useDex {
|
||||
testDataCopy.claims.Issuer = fmt.Sprintf("%s/api/dex", oidcURL)
|
||||
} else {
|
||||
testDataCopy.claims.Issuer = oidcURL
|
||||
}
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, testDataCopy.claims)
|
||||
tokenString, err := token.SignedString([]byte("key"))
|
||||
require.NoError(t, err)
|
||||
@@ -653,7 +746,7 @@ func TestAuthenticate_no_request_metadata(t *testing.T) {
|
||||
t.Run(testDataCopy.test, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argocd, _ := getTestServer(t, testDataCopy.anonymousEnabled, true)
|
||||
argocd, _ := getTestServer(t, testDataCopy.anonymousEnabled, true, true)
|
||||
ctx := context.Background()
|
||||
|
||||
ctx, err := argocd.Authenticate(ctx)
|
||||
@@ -698,9 +791,9 @@ func TestAuthenticate_no_SSO(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Must be declared here to avoid race.
|
||||
ctx := context.Background() //nolint:ineffassign,staticcheck
|
||||
ctx := context.Background() //nolint:ineffassign,staticcheck
|
||||
|
||||
argocd, dexURL := getTestServer(t, testDataCopy.anonymousEnabled, false)
|
||||
argocd, dexURL := getTestServer(t, testDataCopy.anonymousEnabled, false, true)
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.RegisteredClaims{Issuer: fmt.Sprintf("%s/api/dex", dexURL)})
|
||||
tokenString, err := token.SignedString([]byte("key"))
|
||||
require.NoError(t, err)
|
||||
@@ -774,7 +867,7 @@ func TestAuthenticate_bad_request_metadata(t *testing.T) {
|
||||
test: "anonymous disabled, bad auth header",
|
||||
anonymousEnabled: false,
|
||||
metadata: metadata.MD{"authorization": []string{"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ"}},
|
||||
expectedErrorMessage: "no audience found in the token",
|
||||
expectedErrorMessage: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
@@ -788,7 +881,7 @@ func TestAuthenticate_bad_request_metadata(t *testing.T) {
|
||||
test: "anonymous disabled, bad auth cookie",
|
||||
anonymousEnabled: false,
|
||||
metadata: metadata.MD{"grpcgateway-cookie": []string{"argocd.token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ"}},
|
||||
expectedErrorMessage: "no audience found in the token",
|
||||
expectedErrorMessage: common.TokenVerificationError,
|
||||
expectedClaims: nil,
|
||||
},
|
||||
{
|
||||
@@ -807,9 +900,9 @@ func TestAuthenticate_bad_request_metadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Must be declared here to avoid race.
|
||||
ctx := context.Background() //nolint:ineffassign,staticcheck
|
||||
ctx := context.Background() //nolint:ineffassign,staticcheck
|
||||
|
||||
argocd, _ := getTestServer(t, testDataCopy.anonymousEnabled, true)
|
||||
argocd, _ := getTestServer(t, testDataCopy.anonymousEnabled, true, true)
|
||||
ctx = metadata.NewIncomingContext(context.Background(), testDataCopy.metadata)
|
||||
|
||||
ctx, err := argocd.Authenticate(ctx)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-application-controller go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-server go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080}"
|
||||
dex: sh -c "test $ARGOCD_IN_CI = true && exit 0; ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.0 serve /dex.yaml"
|
||||
dex: sh -c "test $ARGOCD_IN_CI = true && exit 0; ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.35.3 serve /dex.yaml"
|
||||
redis: sh -c "/usr/local/bin/redis-server --save "" --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_BINARY_NAME=argocd-repo-server go run ./cmd/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
ui: sh -c "test $ARGOCD_IN_CI = true && exit 0; cd ui && ARGOCD_E2E_YARN_HOST=0.0.0.0 ${ARGOCD_E2E_YARN_CMD:-yarn} start"
|
||||
|
||||
@@ -3,12 +3,10 @@ package e2e
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -198,23 +196,18 @@ func TestDeleteAppResource(t *testing.T) {
|
||||
// demonstrate that we cannot use a standard sync when an immutable field is changed, we must use "force"
|
||||
func TestImmutableChange(t *testing.T) {
|
||||
SkipOnEnv(t, "OPENSHIFT")
|
||||
text := FailOnErr(Run(".", "kubectl", "get", "service", "-n", "kube-system", "kube-dns", "-o", "jsonpath={.spec.clusterIP}")).(string)
|
||||
parts := strings.Split(text, ".")
|
||||
n := rand.Intn(254)
|
||||
ip1 := fmt.Sprintf("%s.%s.%s.%d", parts[0], parts[1], parts[2], n)
|
||||
ip2 := fmt.Sprintf("%s.%s.%s.%d", parts[0], parts[1], parts[2], n+1)
|
||||
Given(t).
|
||||
Path("service").
|
||||
Path("secrets").
|
||||
When().
|
||||
CreateApp().
|
||||
PatchFile("service.yaml", fmt.Sprintf(`[{"op": "add", "path": "/spec/clusterIP", "value": "%s"}]`, ip1)).
|
||||
PatchFile("secrets.yaml", `[{"op": "add", "path": "/data/new-field", "value": "dGVzdA=="}, {"op": "add", "path": "/immutable", "value": true}]`).
|
||||
Sync().
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
When().
|
||||
PatchFile("service.yaml", fmt.Sprintf(`[{"op": "add", "path": "/spec/clusterIP", "value": "%s"}]`, ip2)).
|
||||
PatchFile("secrets.yaml", `[{"op": "add", "path": "/data/new-field", "value": "dGVzdDI="}]`).
|
||||
IgnoreErrors().
|
||||
Sync().
|
||||
DoNotIgnoreErrors().
|
||||
@@ -223,14 +216,14 @@ func TestImmutableChange(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
|
||||
Expect(ResourceResultNumbering(1)).
|
||||
Expect(ResourceResultMatches(ResourceResult{
|
||||
Kind: "Service",
|
||||
Kind: "Secret",
|
||||
Version: "v1",
|
||||
Namespace: DeploymentNamespace(),
|
||||
Name: "my-service",
|
||||
Name: "test-secret",
|
||||
SyncPhase: "Sync",
|
||||
Status: "SyncFailed",
|
||||
HookPhase: "Failed",
|
||||
Message: `Service "my-service" is invalid`,
|
||||
Message: `Secret "test-secret" is invalid`,
|
||||
})).
|
||||
// now we can do this will a force
|
||||
Given().
|
||||
|
||||
@@ -717,6 +717,8 @@ func Declarative(filename string, values interface{}) (string, error) {
|
||||
}
|
||||
|
||||
func CreateSubmoduleRepos(repoType string) {
|
||||
oldEnv := os.Getenv("GIT_ALLOW_PROTOCOL")
|
||||
CheckError(os.Setenv("GIT_ALLOW_PROTOCOL", "file"))
|
||||
|
||||
// set-up submodule repo
|
||||
FailOnErr(Run("", "cp", "-Rf", "testdata/git-submodule/", submoduleDirectory()))
|
||||
@@ -748,6 +750,8 @@ func CreateSubmoduleRepos(repoType string) {
|
||||
FailOnErr(Run(submoduleParentDirectory(), "git", "remote", "add", "origin", os.Getenv("ARGOCD_E2E_GIT_SERVICE_SUBMODULE_PARENT")))
|
||||
FailOnErr(Run(submoduleParentDirectory(), "git", "push", "origin", "master", "-f"))
|
||||
}
|
||||
|
||||
CheckError(os.Setenv("GIT_ALLOW_PROTOCOL", oldEnv))
|
||||
}
|
||||
|
||||
// RestartRepoServer performs a restart of the repo server deployment and waits
|
||||
|
||||
@@ -40,6 +40,7 @@ $header: 120px;
|
||||
&__warning {
|
||||
font-size: 0.8em;
|
||||
color: darken($argo-status-warning-color, 20%);
|
||||
min-height: 1.2rem;
|
||||
}
|
||||
|
||||
&__refreshing-label {
|
||||
|
||||
@@ -169,9 +169,9 @@ export const ApplicationSyncPanel = ({application, selectedResource, hide}: {app
|
||||
none
|
||||
</a>
|
||||
</div>
|
||||
{!formApi.values.resources.every((item: boolean) => item) && (
|
||||
<div className='application-details__warning'>WARNING: partial synchronization is not recorded in history</div>
|
||||
)}
|
||||
<div className='application-details__warning'>
|
||||
{!formApi.values.resources.every((item: boolean) => item) && <div>WARNING: partial synchronization is not recorded in history</div>}
|
||||
</div>
|
||||
<div>
|
||||
{application.status.resources
|
||||
.filter(item => !item.hook)
|
||||
|
||||
@@ -10,12 +10,7 @@ export class ClustersService {
|
||||
}
|
||||
|
||||
public get(url: string, name: string): Promise<models.Cluster> {
|
||||
let queryName = '';
|
||||
if (url === undefined) {
|
||||
url = '';
|
||||
queryName = `?name=${name}`;
|
||||
}
|
||||
const requestUrl = `/clusters/${encodeURIComponent(url)}` + queryName;
|
||||
const requestUrl = `/clusters/${url ? encodeURIComponent(url) : encodeURIComponent(name)}?id.type=${url ? 'url' : 'name_escaped'}`;
|
||||
return requests.get(requestUrl).then(res => res.body as models.Cluster);
|
||||
}
|
||||
|
||||
|
||||
@@ -3070,9 +3070,9 @@ decko@^1.2.0:
|
||||
integrity sha1-/UPHNelnuAEzBohKVvvmZZlraBc=
|
||||
|
||||
decode-uri-component@^0.2.0:
|
||||
version "0.2.0"
|
||||
resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545"
|
||||
integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=
|
||||
version "0.2.2"
|
||||
resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.2.tgz#e69dbe25d37941171dd540e024c444cd5188e1e9"
|
||||
integrity sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==
|
||||
|
||||
deep-diff@^0.3.5:
|
||||
version "0.3.8"
|
||||
|
||||
@@ -134,8 +134,7 @@ func Version(shortForm bool) (string, error) {
|
||||
func (h *helm) GetParameters(valuesFiles []pathutil.ResolvedFilePath, appPath, repoRoot string) (map[string]string, error) {
|
||||
var values []string
|
||||
// Don't load values.yaml if it's an out-of-bounds link.
|
||||
if resolved, _, err := pathutil.ResolveFilePath(appPath, repoRoot, "values.yaml", []string{}); err == nil {
|
||||
fmt.Println(resolved)
|
||||
if _, _, err := pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, "values.yaml", []string{}); err == nil {
|
||||
out, err := h.cmd.inspectValues(".")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -59,7 +59,7 @@ func TestHelmTemplateValues(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", false)
|
||||
assert.NoError(t, err)
|
||||
valuesPath, _, err := path.ResolveFilePath(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
require.NoError(t, err)
|
||||
opts := TemplateOpts{
|
||||
Name: "test",
|
||||
@@ -98,7 +98,7 @@ func TestHelmGetParamsValueFiles(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", false)
|
||||
assert.NoError(t, err)
|
||||
valuesPath, _, err := path.ResolveFilePath(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
require.NoError(t, err)
|
||||
params, err := h.GetParameters([]path.ResolvedFilePath{valuesPath}, repoRootAbs, repoRootAbs)
|
||||
assert.Nil(t, err)
|
||||
@@ -113,9 +113,9 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", false)
|
||||
assert.NoError(t, err)
|
||||
valuesMissingPath, _, err := path.ResolveFilePath(repoRootAbs, repoRootAbs, "values-missing.yaml", nil)
|
||||
valuesMissingPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-missing.yaml", nil)
|
||||
require.NoError(t, err)
|
||||
valuesProductionPath, _, err := path.ResolveFilePath(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
valuesProductionPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
require.NoError(t, err)
|
||||
params, err := h.GetParameters([]path.ResolvedFilePath{valuesMissingPath, valuesProductionPath}, repoRootAbs, repoRootAbs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -10,10 +10,14 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ResolvedFilePath represents a resolved file path and intended to prevent unintentional use of not verified file path.
|
||||
// It is always either a URL or an absolute path.
|
||||
// ResolvedFilePath represents a resolved file path and is intended to prevent unintentional use of an unverified file
|
||||
// path. It is always either a URL or an absolute path.
|
||||
type ResolvedFilePath string
|
||||
|
||||
// ResolvedFileOrDirectoryPath represents a resolved file or directory path and is intended to prevent unintentional use
|
||||
// of an unverified file or directory path. It is an absolute path.
|
||||
type ResolvedFileOrDirectoryPath string
|
||||
|
||||
// resolveSymbolicLinkRecursive resolves the symlink path recursively to its
|
||||
// canonical path on the file system, with a maximum nesting level of maxDepth.
|
||||
// If path is not a symlink, returns the verbatim copy of path and err of nil.
|
||||
@@ -60,7 +64,24 @@ func isURLSchemeAllowed(scheme string, allowed []string) bool {
|
||||
return isAllowed && scheme != ""
|
||||
}
|
||||
|
||||
// ResolveFilePath will inspect and resolve given file, and make sure that its final path is within the boundaries of
|
||||
// We do not provide the path in the error message, because it will be
|
||||
// returned to the user and could be used for information gathering.
|
||||
// Instead, we log the concrete error details.
|
||||
func resolveFailure(path string, err error) error {
|
||||
log.Errorf("failed to resolve path '%s': %v", path, err)
|
||||
return fmt.Errorf("internal error: failed to resolve path. Check logs for more details")
|
||||
}
|
||||
|
||||
func ResolveFileOrDirectoryPath(appPath, repoRoot, dir string) (ResolvedFileOrDirectoryPath, error) {
|
||||
path, err := resolveFileOrDirectory(appPath, repoRoot, dir, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ResolvedFileOrDirectoryPath(path), nil
|
||||
}
|
||||
|
||||
// ResolveValueFilePathOrUrl will inspect and resolve given file, and make sure that its final path is within the boundaries of
|
||||
// the path specified in repoRoot.
|
||||
//
|
||||
// appPath is the path we're operating in, e.g. where a Helm chart was unpacked
|
||||
@@ -88,15 +109,7 @@ func isURLSchemeAllowed(scheme string, allowed []string) bool {
|
||||
//
|
||||
// isRemote will be set to true if valueFile is an URL using an allowed
|
||||
// protocol scheme, or to false if it resolved to a local file.
|
||||
func ResolveFilePath(appPath, repoRoot, valueFile string, allowedURLSchemes []string) (resolvedPath ResolvedFilePath, isRemote bool, err error) {
|
||||
// We do not provide the path in the error message, because it will be
|
||||
// returned to the user and could be used for information gathering.
|
||||
// Instead, we log the concrete error details.
|
||||
resolveFailure := func(path string, err error) error {
|
||||
log.Errorf("failed to resolve path '%s': %v", path, err)
|
||||
return fmt.Errorf("internal error: failed to resolve path. Check logs for more details")
|
||||
}
|
||||
|
||||
func ResolveValueFilePathOrUrl(appPath, repoRoot, valueFile string, allowedURLSchemes []string) (resolvedPath ResolvedFilePath, isRemote bool, err error) {
|
||||
// A value file can be specified as an URL to a remote resource.
|
||||
// We only allow certain URL schemes for remote value files.
|
||||
url, err := url.Parse(valueFile)
|
||||
@@ -111,36 +124,45 @@ func ResolveFilePath(appPath, repoRoot, valueFile string, allowedURLSchemes []st
|
||||
}
|
||||
}
|
||||
|
||||
path, err := resolveFileOrDirectory(appPath, repoRoot, valueFile, false)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
return ResolvedFilePath(path), false, nil
|
||||
}
|
||||
|
||||
func resolveFileOrDirectory(appPath string, repoRoot string, fileOrDirectory string, allowResolveToRoot bool) (string, error) {
|
||||
// Ensure that our repository root is absolute
|
||||
absRepoPath, err := filepath.Abs(repoRoot)
|
||||
if err != nil {
|
||||
return "", false, resolveFailure(repoRoot, err)
|
||||
return "", resolveFailure(repoRoot, err)
|
||||
}
|
||||
|
||||
// If the path to the file is relative, join it with the current working directory (appPath)
|
||||
// If the path to the file or directory is relative, join it with the current working directory (appPath)
|
||||
// Otherwise, join it with the repository's root
|
||||
path := valueFile
|
||||
path := fileOrDirectory
|
||||
if !filepath.IsAbs(path) {
|
||||
absWorkDir, err := filepath.Abs(appPath)
|
||||
if err != nil {
|
||||
return "", false, resolveFailure(repoRoot, err)
|
||||
return "", resolveFailure(repoRoot, err)
|
||||
}
|
||||
path = filepath.Join(absWorkDir, path)
|
||||
} else {
|
||||
path = filepath.Join(absRepoPath, path)
|
||||
}
|
||||
|
||||
// Ensure any symbolic link is resolved before we
|
||||
// Ensure any symbolic link is resolved before we evaluate the path
|
||||
delinkedPath, err := resolveSymbolicLinkRecursive(path, 10)
|
||||
if err != nil {
|
||||
return "", false, resolveFailure(path, err)
|
||||
return "", resolveFailure(repoRoot, err)
|
||||
}
|
||||
path = delinkedPath
|
||||
|
||||
// Resolve the joined path to an absolute path
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return "", false, resolveFailure(path, err)
|
||||
return "", resolveFailure(repoRoot, err)
|
||||
}
|
||||
|
||||
// Ensure our root path has a trailing slash, otherwise the following check
|
||||
@@ -150,10 +172,17 @@ func ResolveFilePath(appPath, repoRoot, valueFile string, allowedURLSchemes []st
|
||||
requiredRootPath += string(os.PathSeparator)
|
||||
}
|
||||
|
||||
// Make sure that the resolved path to values file is within the repository's root path
|
||||
if !strings.HasPrefix(path, requiredRootPath) {
|
||||
return "", false, fmt.Errorf("value file '%s' resolved to outside repository root", valueFile)
|
||||
resolvedToRoot := path+string(os.PathSeparator) == requiredRootPath
|
||||
if resolvedToRoot {
|
||||
if !allowResolveToRoot {
|
||||
return "", resolveFailure(path, fmt.Errorf("path resolved to repository root, which is not allowed"))
|
||||
}
|
||||
} else {
|
||||
// Make sure that the resolved path to file is within the repository's root path
|
||||
if !strings.HasPrefix(path, requiredRootPath) {
|
||||
return "", fmt.Errorf("file '%s' resolved to outside repository root", fileOrDirectory)
|
||||
}
|
||||
}
|
||||
|
||||
return ResolvedFilePath(path), false, nil
|
||||
return path, nil
|
||||
}
|
||||
|
||||
@@ -98,19 +98,19 @@ var allowedRemoteProtocols = []string{"http", "https"}
|
||||
|
||||
func Test_resolveFilePath(t *testing.T) {
|
||||
t.Run("Resolve normal relative path into absolute path", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath("/foo/bar", "/foo", "baz/bim.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl("/foo/bar", "/foo", "baz/bim.yaml", allowedRemoteProtocols)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "/foo/bar/baz/bim.yaml", string(p))
|
||||
})
|
||||
t.Run("Resolve normal relative path into absolute path", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath("/foo/bar", "/foo", "baz/../../bim.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl("/foo/bar", "/foo", "baz/../../bim.yaml", allowedRemoteProtocols)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "/foo/bim.yaml", string(p))
|
||||
})
|
||||
t.Run("Error on path resolving outside repository root", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath("/foo/bar", "/foo", "baz/../../../bim.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl("/foo/bar", "/foo", "baz/../../../bim.yaml", allowedRemoteProtocols)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "outside repository root")
|
||||
assert.False(t, remote)
|
||||
@@ -118,26 +118,26 @@ func Test_resolveFilePath(t *testing.T) {
|
||||
})
|
||||
t.Run("Return verbatim URL", func(t *testing.T) {
|
||||
url := "https://some.where/foo,yaml"
|
||||
p, remote, err := ResolveFilePath("/foo/bar", "/foo", url, allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl("/foo/bar", "/foo", url, allowedRemoteProtocols)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, remote)
|
||||
assert.Equal(t, url, string(p))
|
||||
})
|
||||
t.Run("URL scheme not allowed", func(t *testing.T) {
|
||||
url := "file:///some.where/foo,yaml"
|
||||
p, remote, err := ResolveFilePath("/foo/bar", "/foo", url, allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl("/foo/bar", "/foo", url, allowedRemoteProtocols)
|
||||
assert.Error(t, err)
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "", string(p))
|
||||
})
|
||||
t.Run("Implicit URL by absolute path", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath("/foo/bar", "/foo", "/baz.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl("/foo/bar", "/foo", "/baz.yaml", allowedRemoteProtocols)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "/foo/baz.yaml", string(p))
|
||||
})
|
||||
t.Run("Relative app path", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath(".", "/foo", "/baz.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl(".", "/foo", "/baz.yaml", allowedRemoteProtocols)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "/foo/baz.yaml", string(p))
|
||||
@@ -145,37 +145,42 @@ func Test_resolveFilePath(t *testing.T) {
|
||||
t.Run("Relative repo path", func(t *testing.T) {
|
||||
c, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
p, remote, err := ResolveFilePath(".", ".", "baz.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl(".", ".", "baz.yaml", allowedRemoteProtocols)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, c+"/baz.yaml", string(p))
|
||||
})
|
||||
t.Run("Overlapping root prefix without trailing slash", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath(".", "/foo", "../foo2/baz.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl(".", "/foo", "../foo2/baz.yaml", allowedRemoteProtocols)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "outside repository root")
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "", string(p))
|
||||
})
|
||||
t.Run("Overlapping root prefix with trailing slash", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath(".", "/foo/", "../foo2/baz.yaml", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl(".", "/foo/", "../foo2/baz.yaml", allowedRemoteProtocols)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "outside repository root")
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "", string(p))
|
||||
})
|
||||
t.Run("Garbage input as values file", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath(".", "/foo/", "kfdj\\ks&&&321209.,---e32908923%$§!\"", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl(".", "/foo/", "kfdj\\ks&&&321209.,---e32908923%$§!\"", allowedRemoteProtocols)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "outside repository root")
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "", string(p))
|
||||
})
|
||||
t.Run("NUL-byte path input as values file", func(t *testing.T) {
|
||||
p, remote, err := ResolveFilePath(".", "/foo/", "\000", allowedRemoteProtocols)
|
||||
p, remote, err := ResolveValueFilePathOrUrl(".", "/foo/", "\000", allowedRemoteProtocols)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "outside repository root")
|
||||
assert.False(t, remote)
|
||||
assert.Equal(t, "", string(p))
|
||||
})
|
||||
t.Run("Resolve root path into absolute path - jsonnet library path", func(t *testing.T) {
|
||||
p, err := ResolveFileOrDirectoryPath("/foo", "/foo", "./")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/foo", string(p))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -348,9 +348,12 @@ func (a *ClientApp) HandleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "no id_token in token response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
idToken, err := a.provider.Verify(a.clientID, idTokenRAW)
|
||||
|
||||
idToken, err := a.provider.Verify(idTokenRAW, a.settings)
|
||||
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("invalid session token: %v", err), http.StatusInternalServerError)
|
||||
log.Warnf("Failed to verify token: %s", err)
|
||||
http.Error(w, common.TokenVerificationError, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
path := "/"
|
||||
|
||||
@@ -89,7 +89,7 @@ func (p *fakeProvider) ParseConfig() (*OIDCConfiguration, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *fakeProvider) Verify(_, _ string) (*gooidc.IDToken, error) {
|
||||
func (p *fakeProvider) Verify(_ string, _ *settings.ArgoCDSettings) (*gooidc.IDToken, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package oidc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -9,8 +10,13 @@ import (
|
||||
gooidc "github.com/coreos/go-oidc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/security"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
var ErrTokenExpired = errors.New("token is expired")
|
||||
|
||||
// Provider is a wrapper around go-oidc provider to also provide the following features:
|
||||
// 1. lazy initialization/querying of the provider
|
||||
// 2. automatic detection of change in signing keys
|
||||
@@ -23,7 +29,7 @@ type Provider interface {
|
||||
|
||||
ParseConfig() (*OIDCConfiguration, error)
|
||||
|
||||
Verify(clientID, tokenString string) (*gooidc.IDToken, error)
|
||||
Verify(tokenString string, argoSettings *settings.ArgoCDSettings) (*gooidc.IDToken, error)
|
||||
}
|
||||
|
||||
type providerImpl struct {
|
||||
@@ -69,13 +75,70 @@ func (p *providerImpl) newGoOIDCProvider() (*gooidc.Provider, error) {
|
||||
return prov, nil
|
||||
}
|
||||
|
||||
func (p *providerImpl) Verify(clientID, tokenString string) (*gooidc.IDToken, error) {
|
||||
func (p *providerImpl) Verify(tokenString string, argoSettings *settings.ArgoCDSettings) (*gooidc.IDToken, error) {
|
||||
// According to the JWT spec, the aud claim is optional. The spec also says (emphasis mine):
|
||||
//
|
||||
// If the principal processing the claim does not identify itself with a value in the "aud" claim _when this
|
||||
// claim is present_, then the JWT MUST be rejected.
|
||||
//
|
||||
// - https://www.rfc-editor.org/rfc/rfc7519#section-4.1.3
|
||||
//
|
||||
// If the claim is not present, we can skip the audience claim check (called the "ClientID check" in go-oidc's
|
||||
// terminology).
|
||||
//
|
||||
// The OIDC spec says that the aud claim is required (https://openid.net/specs/openid-connect-core-1_0.html#IDToken).
|
||||
// But we cannot assume that all OIDC providers will follow the spec. For Argo CD <2.6.0, we will default to
|
||||
// allowing the aud claim to be optional. In Argo CD >=2.6.0, we will default to requiring the aud claim to be
|
||||
// present and give users the skipAudienceCheckWhenTokenHasNoAudience setting to revert the behavior if necessary.
|
||||
//
|
||||
// At this point, we have not verified that the token has not been altered. All code paths below MUST VERIFY
|
||||
// THE TOKEN SIGNATURE to confirm that an attacker did not maliciously remove the "aud" claim.
|
||||
unverifiedHasAudClaim, err := security.UnverifiedHasAudClaim(tokenString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine whether the token has an aud claim: %w", err)
|
||||
}
|
||||
|
||||
var idToken *gooidc.IDToken
|
||||
if !unverifiedHasAudClaim {
|
||||
idToken, err = p.verify("", tokenString, argoSettings.SkipAudienceCheckWhenTokenHasNoAudience())
|
||||
} else {
|
||||
allowedAudiences := argoSettings.OAuth2AllowedAudiences()
|
||||
if len(allowedAudiences) == 0 {
|
||||
return nil, errors.New("token has an audience claim, but no allowed audiences are configured")
|
||||
}
|
||||
// Token must be verified for at least one allowed audience
|
||||
for _, aud := range allowedAudiences {
|
||||
idToken, err = p.verify(aud, tokenString, false)
|
||||
if err != nil && strings.HasPrefix(err.Error(), "oidc: token is expired") {
|
||||
// If the token is expired, we won't bother checking other audiences. It's important to return a
|
||||
// ErrTokenExpired instead of an error related to an incorrect audience, because the caller may
|
||||
// have specific behavior to handle expired tokens.
|
||||
break
|
||||
}
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "oidc: token is expired") {
|
||||
return nil, ErrTokenExpired
|
||||
}
|
||||
return nil, fmt.Errorf("failed to verify token: %w", err)
|
||||
}
|
||||
|
||||
return idToken, nil
|
||||
}
|
||||
|
||||
func (p *providerImpl) verify(clientID, tokenString string, skipClientIDCheck bool) (*gooidc.IDToken, error) {
|
||||
ctx := context.Background()
|
||||
prov, err := p.provider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
verifier := prov.Verifier(&gooidc.Config{ClientID: clientID})
|
||||
config := &gooidc.Config{ClientID: clientID, SkipClientIDCheck: skipClientIDCheck}
|
||||
verifier := prov.Verifier(config)
|
||||
idToken, err := verifier.Verify(ctx, tokenString)
|
||||
if err != nil {
|
||||
// HACK: if we failed token verification, it's possible the reason was because dex
|
||||
@@ -93,7 +156,7 @@ func (p *providerImpl) Verify(clientID, tokenString string) (*gooidc.IDToken, er
|
||||
// return original error if we fail to re-initialize OIDC
|
||||
return nil, err
|
||||
}
|
||||
verifier = newProvider.Verifier(&gooidc.Config{ClientID: clientID})
|
||||
verifier = newProvider.Verifier(config)
|
||||
idToken, err = verifier.Verify(ctx, tokenString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
80
util/security/jwt.go
Normal file
80
util/security/jwt.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseJWT parses a jwt and returns it as json bytes.
|
||||
//
|
||||
// This function DOES NOT VERIFY THE TOKEN. You still have to verify the token to confirm that the token holder has not
|
||||
// altered the claims.
|
||||
//
|
||||
// This code is copied almost verbatim from go-oidc (https://github.com/coreos/go-oidc).
|
||||
func parseJWT(p string) ([]byte, error) {
|
||||
parts := strings.Split(p, ".")
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("malformed jwt, expected 3 parts got %d", len(parts))
|
||||
}
|
||||
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed jwt payload: %v", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
type audience []string
|
||||
|
||||
// UnmarshalJSON allows us to unmarshal either a single audience or a list of audiences.
|
||||
// Taken from: https://github.com/coreos/go-oidc/blob/a8ceb9a2043fca2e43518633920db746808b1138/oidc/oidc.go#L475
|
||||
func (a *audience) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if json.Unmarshal(b, &s) == nil {
|
||||
*a = audience{s}
|
||||
return nil
|
||||
}
|
||||
var auds []string
|
||||
if err := json.Unmarshal(b, &auds); err != nil {
|
||||
return err
|
||||
}
|
||||
*a = auds
|
||||
return nil
|
||||
}
|
||||
|
||||
// jwtWithOnlyAudClaim represents a jwt where only the "aud" claim is present. This struct allows us to unmarshal a jwt
|
||||
// and be confident that the only information retrieved from that jwt is the "aud" claim.
|
||||
type jwtWithOnlyAudClaim struct {
|
||||
Aud audience `json:"aud"`
|
||||
}
|
||||
|
||||
// getUnverifiedAudClaim gets the "aud" claim from a jwt.
|
||||
//
|
||||
// This function DOES NOT VERIFY THE TOKEN. You still have to verify the token to confirm that the token holder has not
|
||||
// altered the "aud" claim.
|
||||
//
|
||||
// This code is copied almost verbatim from go-oidc (https://github.com/coreos/go-oidc).
|
||||
func getUnverifiedAudClaim(rawIDToken string) ([]string, error) {
|
||||
payload, err := parseJWT(rawIDToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed jwt: %v", err)
|
||||
}
|
||||
var token jwtWithOnlyAudClaim
|
||||
if err = json.Unmarshal(payload, &token); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal claims: %v", err)
|
||||
}
|
||||
return token.Aud, nil
|
||||
}
|
||||
|
||||
// UnverifiedHasAudClaim returns whether the "aud" claim is present in the given JWT.
|
||||
//
|
||||
// This function DOES NOT VERIFY THE TOKEN. You still have to verify the token to confirm that the token holder has not
|
||||
// altered the "aud" claim.
|
||||
func UnverifiedHasAudClaim(rawIDToken string) (bool, error) {
|
||||
aud, err := getUnverifiedAudClaim(rawIDToken)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to determine whether token had an audience claim: %w", err)
|
||||
}
|
||||
return aud != nil, nil
|
||||
}
|
||||
56
util/security/jwt_test.go
Normal file
56
util/security/jwt_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
utiltest "github.com/argoproj/argo-cd/v2/util/test"
|
||||
)
|
||||
|
||||
func Test_UnverifiedHasAudClaim(t *testing.T) {
|
||||
tokenForAud := func(t *testing.T, aud jwt.ClaimStrings) string {
|
||||
claims := jwt.RegisteredClaims{Audience: aud, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS512, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
return tokenString
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
aud jwt.ClaimStrings
|
||||
expectedHasAud bool
|
||||
}{
|
||||
{
|
||||
name: "no audience",
|
||||
aud: jwt.ClaimStrings{},
|
||||
expectedHasAud: false,
|
||||
},
|
||||
{
|
||||
name: "one empty audience",
|
||||
aud: jwt.ClaimStrings{""},
|
||||
expectedHasAud: true,
|
||||
},
|
||||
{
|
||||
name: "one non-empty audience",
|
||||
aud: jwt.ClaimStrings{"test"},
|
||||
expectedHasAud: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
out, err := UnverifiedHasAudClaim(tokenForAud(t, testCaseCopy.aud))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expectedHasAud, out)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
oidc "github.com/coreos/go-oidc"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/google/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -413,7 +412,7 @@ func (mgr *SessionManager) VerifyUsernamePassword(username string, password stri
|
||||
// introduces random delay to protect from timing-based user enumeration attack
|
||||
delayNanoseconds := verificationDelayNoiseMin.Nanoseconds() +
|
||||
int64(rand.Intn(int(verificationDelayNoiseMax.Nanoseconds()-verificationDelayNoiseMin.Nanoseconds())))
|
||||
// take into account amount of time spent since the request start
|
||||
// take into account amount of time spent since the request start
|
||||
delayNanoseconds = delayNanoseconds - time.Since(start).Nanoseconds()
|
||||
if delayNanoseconds > 0 {
|
||||
mgr.sleep(time.Duration(delayNanoseconds))
|
||||
@@ -477,31 +476,28 @@ func (mgr *SessionManager) VerifyToken(tokenString string) (jwt.Claims, string,
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// Token must be verified for at least one audience
|
||||
// TODO(jannfis): Is this the right way? Shouldn't we know our audience and only validate for the correct one?
|
||||
var idToken *oidc.IDToken
|
||||
for _, aud := range claims.Audience {
|
||||
idToken, err = prov.Verify(aud, tokenString)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
argoSettings, err := mgr.settingsMgr.GetSettings()
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("cannot access settings while verifying the token: %w", err)
|
||||
}
|
||||
if argoSettings == nil {
|
||||
return nil, "", fmt.Errorf("settings are not available while verifying the token")
|
||||
}
|
||||
|
||||
idToken, err := prov.Verify(tokenString, argoSettings)
|
||||
|
||||
// The token verification has failed. If the token has expired, we will
|
||||
// return a dummy claims only containing a value for the issuer, so the
|
||||
// UI can handle expired tokens appropriately.
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "oidc: token is expired") {
|
||||
log.Warnf("Failed to verify token: %s", err)
|
||||
if errors.Is(err, oidcutil.ErrTokenExpired) {
|
||||
claims = jwt.RegisteredClaims{
|
||||
Issuer: "sso",
|
||||
}
|
||||
return claims, "", err
|
||||
return claims, "", common.TokenVerificationErr
|
||||
}
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if idToken == nil {
|
||||
return nil, "", fmt.Errorf("no audience found in the token")
|
||||
return nil, "", common.TokenVerificationErr
|
||||
}
|
||||
|
||||
var claims jwt.MapClaims
|
||||
|
||||
@@ -559,9 +559,7 @@ rootCA: |
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
if !strings.Contains(err.Error(), "certificate signed by unknown authority") && !strings.Contains(err.Error(), "certificate is not trusted") {
|
||||
t.Fatal("did not receive expected certificate verification failure error")
|
||||
}
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, TLS is configured", func(t *testing.T) {
|
||||
@@ -596,9 +594,7 @@ requestedScopes: ["oidc"]`, oidcTestServer.URL),
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
if !strings.Contains(err.Error(), "certificate signed by unknown authority") && !strings.Contains(err.Error(), "certificate is not trusted") {
|
||||
t.Fatal("did not receive expected certificate verification failure error")
|
||||
}
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is Dex, TLS is configured", func(t *testing.T) {
|
||||
@@ -633,9 +629,7 @@ requestedScopes: ["oidc"]`, oidcTestServer.URL),
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
if !strings.Contains(err.Error(), "certificate signed by unknown authority") && !strings.Contains(err.Error(), "certificate is not trusted") {
|
||||
t.Fatal("did not receive expected certificate verification failure error")
|
||||
}
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, TLS is configured, OIDCTLSInsecureSkipVerify is true", func(t *testing.T) {
|
||||
@@ -703,4 +697,295 @@ requestedScopes: ["oidc"]`, oidcTestServer.URL),
|
||||
assert.NotContains(t, err.Error(), "certificate is not trusted")
|
||||
assert.NotContains(t, err.Error(), "certificate signed by unknown authority")
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is not specified", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is not specified but is required", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]
|
||||
skipAudienceCheckWhenTokenHasNoAudience: false`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS512, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is client ID, no allowed list specified", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"xxx"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is in allowed list", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]
|
||||
allowedAudiences:
|
||||
- something`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"something"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is not in allowed list", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]
|
||||
allowedAudiences:
|
||||
- something-else`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"something"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is not client ID, and there is no allow list", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"something"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is specified, but allow list is empty", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]
|
||||
allowedAudiences: []`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Audience: jwt.ClaimStrings{"something"}, Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
|
||||
t.Run("OIDC provider is external, audience is not specified, token is signed with the wrong key", func(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"url": "",
|
||||
"oidc.config": fmt.Sprintf(`
|
||||
name: Test
|
||||
issuer: %s
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]`, oidcTestServer.URL),
|
||||
"oidc.tls.insecure.skip.verify": "true", // This isn't what we're testing.
|
||||
}
|
||||
|
||||
// This is not actually used in the test. The test only calls the OIDC test server. But a valid cert/key pair
|
||||
// must be set to test VerifyToken's behavior when Argo CD is configured with TLS enabled.
|
||||
secretConfig := map[string][]byte{
|
||||
"tls.crt": utiltest.Cert,
|
||||
"tls.key": utiltest.PrivateKey,
|
||||
}
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClientWithConfig(config, secretConfig), "argocd")
|
||||
mgr := NewSessionManager(settingsMgr, getProjLister(), "", NewUserStateStorage(nil))
|
||||
mgr.verificationDelayNoiseEnabled = false
|
||||
|
||||
claims := jwt.RegisteredClaims{Subject: "admin", ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour * 24))}
|
||||
claims.Issuer = oidcTestServer.URL
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM(utiltest.PrivateKey2)
|
||||
require.NoError(t, err)
|
||||
tokenString, err := token.SignedString(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = mgr.VerifyToken(tokenString)
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, common.TokenVerificationErr)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -118,6 +118,33 @@ type Help struct {
|
||||
BinaryURLs map[string]string `json:"binaryUrl,omitempty"`
|
||||
}
|
||||
|
||||
// oidcConfig is the same as the public OIDCConfig, except the public one excludes the AllowedAudiences and the
|
||||
// SkipAudienceCheckWhenTokenHasNoAudience fields.
|
||||
// AllowedAudiences should be accessed via ArgoCDSettings.OAuth2AllowedAudiences.
|
||||
// SkipAudienceCheckWhenTokenHasNoAudience should be accessed via ArgoCDSettings.SkipAudienceCheckWhenTokenHasNoAudience.
|
||||
type oidcConfig struct {
|
||||
OIDCConfig
|
||||
AllowedAudiences []string `json:"allowedAudiences,omitempty"`
|
||||
SkipAudienceCheckWhenTokenHasNoAudience *bool `json:"skipAudienceCheckWhenTokenHasNoAudience,omitempty"`
|
||||
}
|
||||
|
||||
func (o *oidcConfig) toExported() *OIDCConfig {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
return &OIDCConfig{
|
||||
Name: o.Name,
|
||||
Issuer: o.Issuer,
|
||||
ClientID: o.ClientID,
|
||||
ClientSecret: o.ClientSecret,
|
||||
CLIClientID: o.CLIClientID,
|
||||
RequestedScopes: o.RequestedScopes,
|
||||
RequestedIDTokenClaims: o.RequestedIDTokenClaims,
|
||||
LogoutURL: o.LogoutURL,
|
||||
RootCA: o.RootCA,
|
||||
}
|
||||
}
|
||||
|
||||
type OIDCConfig struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Issuer string `json:"issuer,omitempty"`
|
||||
@@ -1550,24 +1577,37 @@ func UnmarshalDexConfig(config string) (map[string]interface{}, error) {
|
||||
return dexCfg, err
|
||||
}
|
||||
|
||||
func (a *ArgoCDSettings) OIDCConfig() *OIDCConfig {
|
||||
func (a *ArgoCDSettings) oidcConfig() *oidcConfig {
|
||||
if a.OIDCConfigRAW == "" {
|
||||
return nil
|
||||
}
|
||||
oidcConfig, err := UnmarshalOIDCConfig(a.OIDCConfigRAW)
|
||||
config, err := unmarshalOIDCConfig(a.OIDCConfigRAW)
|
||||
if err != nil {
|
||||
log.Warnf("invalid oidc config: %v", err)
|
||||
return nil
|
||||
}
|
||||
oidcConfig.ClientSecret = ReplaceStringSecret(oidcConfig.ClientSecret, a.Secrets)
|
||||
oidcConfig.ClientID = ReplaceStringSecret(oidcConfig.ClientID, a.Secrets)
|
||||
return &oidcConfig
|
||||
config.ClientSecret = ReplaceStringSecret(config.ClientSecret, a.Secrets)
|
||||
config.ClientID = ReplaceStringSecret(config.ClientID, a.Secrets)
|
||||
return &config
|
||||
}
|
||||
|
||||
func UnmarshalOIDCConfig(config string) (OIDCConfig, error) {
|
||||
var oidcConfig OIDCConfig
|
||||
err := yaml.Unmarshal([]byte(config), &oidcConfig)
|
||||
return oidcConfig, err
|
||||
func (a *ArgoCDSettings) OIDCConfig() *OIDCConfig {
|
||||
config := a.oidcConfig()
|
||||
if config == nil {
|
||||
return nil
|
||||
}
|
||||
return config.toExported()
|
||||
}
|
||||
|
||||
func unmarshalOIDCConfig(configStr string) (oidcConfig, error) {
|
||||
var config oidcConfig
|
||||
err := yaml.Unmarshal([]byte(configStr), &config)
|
||||
return config, err
|
||||
}
|
||||
|
||||
func ValidateOIDCConfig(configStr string) error {
|
||||
_, err := unmarshalOIDCConfig(configStr)
|
||||
return err
|
||||
}
|
||||
|
||||
// TLSConfig returns a tls.Config with the configured certificates
|
||||
@@ -1606,6 +1646,37 @@ func (a *ArgoCDSettings) OAuth2ClientID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// OAuth2AllowedAudiences returns a list of audiences that are allowed for the OAuth2 client. If the user has not
|
||||
// explicitly configured the list of audiences (or has configured an empty list), then the OAuth2 client ID is returned
|
||||
// as the only allowed audience. When using the bundled Dex, that client ID is always "argo-cd".
|
||||
func (a *ArgoCDSettings) OAuth2AllowedAudiences() []string {
|
||||
if config := a.oidcConfig(); config != nil {
|
||||
if len(config.AllowedAudiences) == 0 {
|
||||
allowedAudiences := []string{config.ClientID}
|
||||
if config.CLIClientID != "" {
|
||||
allowedAudiences = append(allowedAudiences, config.CLIClientID)
|
||||
}
|
||||
return allowedAudiences
|
||||
}
|
||||
return config.AllowedAudiences
|
||||
}
|
||||
if a.DexConfig != "" {
|
||||
return []string{common.ArgoCDClientAppID, common.ArgoCDCLIClientAppID}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ArgoCDSettings) SkipAudienceCheckWhenTokenHasNoAudience() bool {
|
||||
if config := a.oidcConfig(); config != nil {
|
||||
if config.SkipAudienceCheckWhenTokenHasNoAudience != nil {
|
||||
return *config.SkipAudienceCheckWhenTokenHasNoAudience
|
||||
}
|
||||
return true
|
||||
}
|
||||
// When using the bundled Dex, the audience check is required. Dex will always send JWTs with an audience.
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *ArgoCDSettings) OAuth2ClientSecret() string {
|
||||
if oidcConfig := a.OIDCConfig(); oidcConfig != nil {
|
||||
return oidcConfig.ClientSecret
|
||||
|
||||
@@ -1173,3 +1173,68 @@ rootCA: "invalid"`},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_OAuth2AllowedAudiences(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
settings *ArgoCDSettings
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "Empty",
|
||||
settings: &ArgoCDSettings{},
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "OIDC configured, no audiences specified, clientID used",
|
||||
settings: &ArgoCDSettings{OIDCConfigRAW: `name: Test
|
||||
issuer: aaa
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]`},
|
||||
expected: []string{"xxx"},
|
||||
},
|
||||
{
|
||||
name: "OIDC configured, no audiences specified, clientID and cliClientID used",
|
||||
settings: &ArgoCDSettings{OIDCConfigRAW: `name: Test
|
||||
issuer: aaa
|
||||
clientID: xxx
|
||||
cliClientID: cli-xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]`},
|
||||
expected: []string{"xxx", "cli-xxx"},
|
||||
},
|
||||
{
|
||||
name: "OIDC configured, audiences specified",
|
||||
settings: &ArgoCDSettings{OIDCConfigRAW: `name: Test
|
||||
issuer: aaa
|
||||
clientID: xxx
|
||||
clientSecret: yyy
|
||||
requestedScopes: ["oidc"]
|
||||
allowedAudiences: ["aud1", "aud2"]`},
|
||||
expected: []string{"aud1", "aud2"},
|
||||
},
|
||||
{
|
||||
name: "Dex configured",
|
||||
settings: &ArgoCDSettings{DexConfig: `connectors:
|
||||
- type: github
|
||||
id: github
|
||||
name: GitHub
|
||||
config:
|
||||
clientID: aabbccddeeff00112233
|
||||
clientSecret: $dex.github.clientSecret
|
||||
orgs:
|
||||
- name: your-github-org
|
||||
`},
|
||||
expected: []string{common.ArgoCDClientAppID, common.ArgoCDCLIClientAppID},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tcc := tc
|
||||
t.Run(tcc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.ElementsMatch(t, tcc.expected, tcc.settings.OAuth2AllowedAudiences())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,22 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
// Cert is a certificate for tests. It was generated like this:
|
||||
// opts := tls.CertOptions{Hosts: []string{"localhost"}, Organization: "Acme"}
|
||||
// certBytes, privKey, err := tls.generatePEM(opts)
|
||||
//
|
||||
// opts := tls.CertOptions{Hosts: []string{"localhost"}, Organization: "Acme"}
|
||||
// certBytes, privKey, err := tls.generatePEM(opts)
|
||||
var Cert = []byte(`-----BEGIN CERTIFICATE-----
|
||||
MIIC8zCCAdugAwIBAgIQCSoocl6e/FR4mQy1wX6NbjANBgkqhkiG9w0BAQsFADAP
|
||||
MQ0wCwYDVQQKEwRBY21lMB4XDTIyMDYyMjE3Mjk1MloXDTIzMDYyMjE3Mjk1Mlow
|
||||
@@ -61,6 +65,48 @@ SDpz4h+Bov5qTKkzcxuu1QWtA4M0K8Iy6IYLwb83DZEm1OsAf4i0pODz21PY/I+O
|
||||
VHzjB10oYgaInHZgMUdyb6F571UdiYSB6a/IlZ3ngj5touy3VIM=
|
||||
-----END RSA PRIVATE KEY-----`)
|
||||
|
||||
// PrivateKey2 is a second RSA key used only for tests. You can use it to see if signing a JWT with a different key
|
||||
// fails validation (as it should).
|
||||
var PrivateKey2 = []byte(`-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4gIBAAKCAYEAqGvlMTqPxJ844hNAneTzh9lPlYx0swai2RONOGLF0/0I9Ej5
|
||||
TIgVvGykcoH3e39VGAUFd8qbLKneX3nPMjhe1+0dmLPhEGffO2ZEkhMBM0x6bhYX
|
||||
XIsCXly5unN/Boosibvsd9isItsnC+4m3ELyREj1gTsCqIoZxFEq2iCPhfS7uPlQ
|
||||
z8G0q0FJohNOJEXYzH96Z4xuI3zudux5PPiHNsCzoUs/X0ogda14zaolvvZPYaqg
|
||||
g5zmZz6dHWnnKogsp0+Q1V3Nz1/GTCs6IDURSX+EPxst5qcin92Ft6TLOb0pu/dQ
|
||||
BW90AGspoelB54iElwbmib58KBzLC8U0FZIfcuN/vOfEnv7ON4RAS/R6wKRMdPEy
|
||||
Fm+Lr65QntaW2AVdxFM7EZfWLFOv741fMT3a1/l3Wou+nalxe7M+epFcn67XrkIi
|
||||
fLnvg/rOUESNHmfuFIa9CAJdekM1WxCFBq6/rAxmHdnbEX3SCl0h1SrzaF336JQc
|
||||
PMSNGiNjra5xO8CxAgMBAAECggGAfvBLXy5HO6fSJLrkAd2VG3fTfuDM+D3xMXGG
|
||||
B9CSUDOvswbpNyB+WXT9AP0p/V+8UA1A0MfY6vHhE87oNm68NTyXCQfSgx3253su
|
||||
BXbjebmTsTNfSjXPhDWZGomAXPp5lRoZoT6ihubsaBaIHY0rsgHXYB6M42CrCQcw
|
||||
KBVQd2M8ta7blKrntAfSKqEoTTiDraYLKM50GLVJukKDIkwjBUZ6XQAs9HIXQvqL
|
||||
SV+LcYGN1QvYTTpNgdV0b73pKGpXG8AvuwXrYFKTZeNMxPnbXd5NHLE6efuOHfeb
|
||||
gYoDFy7NLSJa7DdpJIYMf0yMZQVOwdcKXiK2st+e0mUS0WHNhGKQAVc3wd+gzgtS
|
||||
+s/hJk/ya/4CJwXahtbn5zhNDdbgMSt+m2LVRCIGd+JL14cd1bPySD9QL3EU7+9P
|
||||
nt4S9wvu2lqa6VSK2I9tsjIgm7I7T5SUI3m+DnrpTzlpDCOqFccsSIlY5I+BD9ES
|
||||
7bT57cRkyeWh5w43UQeSFhul5T0tAoHBAN7BjlT22hynPNPshNtJIj+YbAX9+MV9
|
||||
FIjyPa1Say/PSXf9SvRWaTDuRWnFy4B9c12p6zwtbFjewn6OBCope26mmjVtii6t
|
||||
4ABhA/v17nPUjLMQQZGIE0pHGKMpspmd3hqZcNomTtdTNy9X7NBCigJNeZR17TFm
|
||||
3F2qh9oNJVbAgO54PbmFiWk0vMr0x6PWA0p3Ns/qPdu7s7EFonyOHs7f3E9MCYEd
|
||||
3rp5IOJ5rzFR0acYbYhsOX4zRgMRrMYb5wKBwQDBjnlFzZVF56eK5iseEZrnay5p
|
||||
CsLqxDGKr8wHFHQ8G9hGLTGOsaPd3RvAD2A7rQSNNHj2S2gv8I8DBOXzFhifE31q
|
||||
Cy7Zh0HjAt6Tx5yL/lKAPMbDC3trUdITJugepR72t27UmLY0ZAX0SS8ZCRg+3dAS
|
||||
Vdp3zkfOhlg3w92eQSdnU+hmr44AJL1cU+CLN7pCZgkaXzuULfs/+tPVVyeOHZX7
|
||||
iA2fJ2ITRzO9XjclQ49itRJWqWcq22JqsgQ6a6cCgcBn9blxmcttd/eBiG7w0I71
|
||||
UzOHEGKb+KYuy69RRpfTtlA5ebMTmYh6V5l5peA11VaULgslCKX6S+xFmA4Fh1qd
|
||||
548sxDSrWGakhqKPYtWopVgM8ddIDlPCZK/w5jL+UpknnNj4VsyQ3btxkv1orMUw
|
||||
EexeBzNtzO2noUDJ2TzF4g3KPb/A57ubqAs8RUUvB2B9zml8W3wHIvDX+yM8Mi/a
|
||||
qMtvDrOY2NHsAUABsny67c6Ex3fHJYsnhNJ1+DfENZ0CgcBuewR983rhC/l2Lyst
|
||||
Xp8suOEk1B+uIY6luvKal/JA3SP16pX+/Sar3SmZ1yz24ytV7j2dWC2AL69x6bnX
|
||||
pyUmp9lOTlPPloTlLx4c/DM/NUuiJw7NBiDMgUeH5w1XcKjb6pg4gXJ/NRiw95UK
|
||||
lUZhm/rIfHjXKceS+twf+IznaAk10Y82Db7gFhiAOuBQlt6aR+OqSfGYAycGvgVs
|
||||
IPNTC1Aw4tfjoHc6ycmerciMXKPbk7+D9+4LaG4kuLfxIMECgcANm3mBWWJCFH3h
|
||||
s2PXArzk1G9RKEmfUpfhVkeMhtD2/TMG3NPvrGpmjmPx5rf1DUxOUMJyu+B1VdZg
|
||||
u0GOSkEiOfI3DxNs0GwzsL9/EYoelgGj7uc6IV9awhbzRPwro5nceGJspnWqXIVp
|
||||
rawN1NFkKr5MCxl5Q4veocU94ThOlFdYgreyVX6s40ZL1eF0RvAQ+e0oFT7SfCHu
|
||||
B3XwyYtAFsaO5r7oEc1Bv6oNSbE+FNJzRdjkWEIhdLVKlepil/w=
|
||||
-----END RSA PRIVATE KEY-----`)
|
||||
|
||||
func dexMockHandler(t *testing.T, url string) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -123,6 +169,20 @@ func oidcMockHandler(t *testing.T, url string) func(http.ResponseWriter, *http.R
|
||||
"claims_supported": ["sub", "aud", "exp"]
|
||||
}`, url))
|
||||
require.NoError(t, err)
|
||||
case "/keys":
|
||||
pubKey, err := jwt.ParseRSAPublicKeyFromPEM(Cert)
|
||||
require.NoError(t, err)
|
||||
jwks := jose.JSONWebKeySet{
|
||||
Keys: []jose.JSONWebKey{
|
||||
{
|
||||
Key: pubKey,
|
||||
},
|
||||
},
|
||||
}
|
||||
out, err := json.Marshal(jwks)
|
||||
require.NoError(t, err)
|
||||
_, err = io.WriteString(w, string(out))
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
w.WriteHeader(404)
|
||||
}
|
||||
@@ -137,4 +197,4 @@ func GetOIDCTestServer(t *testing.T) *httptest.Server {
|
||||
oidcMockHandler(t, ts.URL)(w, r)
|
||||
})
|
||||
return ts
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user