Compare commits

..

2 Commits

2100 changed files with 309678 additions and 283512 deletions

88
.argo-ci/ci.yaml Normal file
View File

@@ -0,0 +1,88 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argo-cd-ci-
spec:
entrypoint: argo-cd-ci
arguments:
parameters:
- name: revision
value: master
- name: repo
value: https://github.com/argoproj/argo-cd.git
templates:
- name: argo-cd-ci
steps:
- - name: build
template: ci-dind
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- make controller-image server-image repo-server-image
- name: test
template: ci-builder
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- dep ensure && make cli lint test
- name: test-e2e
template: ci-builder
arguments:
parameters:
- name: cmd
value: "dep ensure && make test-e2e"
- name: ci-builder
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["{{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
resources:
requests:
memory: 1024Mi
cpu: 200m
- name: ci-dind
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["until docker ps; do sleep 3; done && {{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: DOCKER_HOST
value: 127.0.0.1
resources:
requests:
memory: 1024Mi
cpu: 200m
sidecars:
- name: dind
image: docker:17.10-dind
securityContext:
privileged: true
mirrorVolumeMounts: true

View File

@@ -1,17 +0,0 @@
ignore:
- "**/*.pb.go"
- "**/*.pb.gw.go"
- "**/*generated.go"
- "**/*generated.deepcopy.go"
- "**/*_test.go"
- "pkg/apis/client/.*"
- "pkg/client/.*"
- "vendor/.*"
coverage:
status:
# we've found this not to be useful
patch: off
project:
default:
# allow test coverage to drop by 2%, assume that it's typically due to CI problems
threshold: 2

View File

@@ -1,13 +1,4 @@
# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from
# a user's workspace, and are only building off of what is locked by dep.
.vscode/
.idea/
.DS_Store
vendor/
dist/
*.iml
# delve debug binaries
cmd/**/debug
debug.test
coverage.out
ui/node_modules/
vendor
dist

View File

@@ -1,43 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'bug'
assignees: ''
---
If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a question in argocd slack [channel](https://argoproj.github.io/community/join-slack).
Checklist:
* [ ] I've searched in the docs and FAQ for my answer: https://bit.ly/argocd-faq.
* [ ] I've included steps to reproduce the bug.
* [ ] I've pasted the output of `argocd version`.
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Version**
```shell
Paste the output from `argocd version` here.
```
**Logs**
```
Paste any relevant application logs here.
```

View File

@@ -1,12 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Have you read the docs?
url: https://argo-cd.readthedocs.io/
about: Much help can be found in the docs
- name: Ask a question
url: https://github.com/argoproj/argo-cd/discussions/new
about: Ask a question or start a discussion about Argo CD
- name: Chat on Slack
url: https://argoproj.github.io/community/join-slack
about: Maybe chatting with the community can help

View File

@@ -1,18 +0,0 @@
---
name: Enhancement proposal
about: Propose an enhancement for this project
title: ''
labels: 'enhancement'
assignees: ''
---
# Summary
What change you think needs making.
# Motivation
Please give examples of your use case, e.g. when would you use this.
# Proposal
How do you think this should be implemented?

View File

@@ -1 +0,0 @@
# See https://github.com/probot/no-response

View File

@@ -1,17 +0,0 @@
Note on DCO:
If the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.
Checklist:
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
* [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue.
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
* [ ] Does this PR require documentation updates?
* [ ] I've updated documentation as required by this PR.
* [ ] Optional. My organization is added to USERS.md.
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)
* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).

4
.github/stale.yml vendored
View File

@@ -1,4 +0,0 @@
# See https://github.com/probot/stale
# See https://github.com/probot/stale
exemptLabels:
- backlog

View File

@@ -1,446 +0,0 @@
name: Integration tests
on:
push:
branches:
- 'master'
- 'release-*'
- '!release-1.4'
- '!release-1.5'
pull_request:
branches:
- 'master'
- 'release-*'
env:
# Golang version to use across CI steps
GOLANG_VERSION: '1.16.11'
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build-docker:
name: Build Docker image
runs-on: ubuntu-latest
if: github.head_ref != ''
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build Docker image
run: |
make image
check-go:
name: Ensure Go modules synchronicity
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup Golang
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Download all Go modules
run: |
go mod download
- name: Check for tidyness of go.mod and go.sum
run: |
go mod tidy
git diff --exit-code -- .
build-go:
name: Build & cache Go code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup Golang
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Restore go build cache
uses: actions/cache@v3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Download all Go modules
run: |
go mod download
- name: Compile all packages
run: make build-local
lint-go:
permissions:
contents: read # for actions/checkout to fetch code
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
name: Lint Go code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.46.2
args: --timeout 10m --exclude SA5011 --verbose
test-go:
name: Run unit tests for Go packages
runs-on: ubuntu-latest
needs:
- build-go
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
- name: Checkout code
uses: actions/checkout@v3
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
run: |
sudo apt-get install git -y
- name: Switch to temporal branch so we re-attach head
run: |
git switch -c temporal-pr-branch
git status
- name: Fetch complete history for blame information
run: |
git fetch --prune --no-tags --depth=1 origin +refs/heads/*:refs/remotes/origin/*
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@v3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Download and vendor all required packages
run: |
go mod download
- name: Run all unit tests
run: make test-local
- name: Generate code coverage artifacts
uses: actions/upload-artifact@v2
with:
name: code-coverage
path: coverage.out
- name: Generate test results artifacts
uses: actions/upload-artifact@v2
with:
name: test-results
path: test-results/
test-go-race:
name: Run unit tests with -race, for Go packages
runs-on: ubuntu-latest
needs:
- build-go
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
- name: Checkout code
uses: actions/checkout@v3
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
run: |
sudo apt-get install git -y
- name: Switch to temporal branch so we re-attach head
run: |
git switch -c temporal-pr-branch
git status
- name: Fetch complete history for blame information
run: |
git fetch --prune --no-tags --depth=1 origin +refs/heads/*:refs/remotes/origin/*
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@v3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Download and vendor all required packages
run: |
go mod download
- name: Run all unit tests
run: make test-race-local
- name: Generate test results artifacts
uses: actions/upload-artifact@v2
with:
name: race-results
path: test-results/
codegen:
name: Check changes to generated code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup Golang
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Create symlink in GOPATH
run: |
mkdir -p ~/go/src/github.com/argoproj
cp -a ../argo-cd ~/go/src/github.com/argoproj
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Download & vendor dependencies
run: |
# We need to vendor go modules for codegen yet
go mod download
go mod vendor -v
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
- name: Install toolchain for codegen
run: |
make install-codegen-tools-local
make install-go-tools-local
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
- name: Initialize local Helm
run: |
helm2 init --client-only
- name: Run codegen
run: |
set -x
export GOPATH=$(go env GOPATH)
git checkout -- go.mod go.sum
make codegen-local
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
- name: Check nothing has changed
run: |
set -xo pipefail
git diff --exit-code -- . ':!go.sum' ':!go.mod' ':!assets/swagger.json' | tee codegen.patch
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
build-ui:
name: Build, test & lint UI code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup NodeJS
uses: actions/setup-node@v1
with:
node-version: '12.18.4'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@v3
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
- name: Install node dependencies
run: |
cd ui && yarn install --frozen-lockfile --ignore-optional --non-interactive
- name: Build UI code
run: |
yarn test
yarn build
env:
NODE_ENV: production
NODE_ONLINE_ENV: online
working-directory: ui/
- name: Run ESLint
run: yarn lint
working-directory: ui/
analyze:
name: Process & analyze test artifacts
runs-on: ubuntu-latest
needs:
- test-go
- build-ui
env:
sonar_secret: ${{ secrets.SONAR_TOKEN }}
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@v3
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
- name: Remove other node_modules directory
run: |
rm -rf ui/node_modules/argo-ui/node_modules
- name: Create test-results directory
run: |
mkdir -p test-results
- name: Get code coverage artifiact
uses: actions/download-artifact@v2
with:
name: code-coverage
- name: Get test result artifact
uses: actions/download-artifact@v2
with:
name: test-results
path: test-results
- name: Upload code coverage information to codecov.io
uses: codecov/codecov-action@v1
with:
file: coverage.out
- name: Perform static code analysis using SonarCloud
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
SCANNER_VERSION: 4.2.0.1873
SCANNER_PATH: /tmp/cache/scanner
OS: linux
run: |
# We do not use the provided action, because it does contain an old
# version of the scanner, and also takes time to build.
set -e
mkdir -p ${SCANNER_PATH}
export SONAR_USER_HOME=${SCANNER_PATH}/.sonar
if [[ ! -x "${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner" ]]; then
curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip
unzip -qq -o sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip -d ${SCANNER_PATH}
fi
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/jre/bin/java
# Explicitly set NODE_MODULES
export NODE_MODULES=${PWD}/ui/node_modules
export NODE_PATH=${PWD}/ui/node_modules
${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
if: env.sonar_secret != ''
test-e2e:
name: Run end-to-end tests
runs-on: ubuntu-latest
strategy:
matrix:
k3s-version: [v1.21.2, v1.20.2, v1.19.2]
needs:
- build-go
env:
GOPATH: /home/runner/go
ARGOCD_FAKE_IN_CLUSTER: "true"
ARGOCD_SSH_DATA_PATH: "/tmp/argo-e2e/app/config/ssh"
ARGOCD_TLS_DATA_PATH: "/tmp/argo-e2e/app/config/tls"
ARGOCD_E2E_SSH_KNOWN_HOSTS: "../fixture/certs/ssh_known_hosts"
ARGOCD_E2E_K3S: "true"
ARGOCD_IN_CI: "true"
ARGOCD_E2E_APISERVER_PORT: "8088"
ARGOCD_SERVER: "127.0.0.1:8088"
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup Golang
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: GH actions workaround - Kill XSP4 process
run: |
sudo pkill mono || true
- name: Install K3S
env:
INSTALL_K3S_VERSION: ${{ matrix.k3s-version }}+k3s1
run: |
set -x
curl -sfL https://get.k3s.io | sh -
sudo chmod -R a+rw /etc/rancher/k3s
sudo mkdir -p $HOME/.kube && sudo chown -R runner $HOME/.kube
sudo k3s kubectl config view --raw > $HOME/.kube/config
sudo chown runner $HOME/.kube/config
kubectl version
- name: Restore go build cache
uses: actions/cache@v3
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Download Go dependencies
run: |
go mod download
go get github.com/mattn/goreman
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Pull Docker image required for tests
run: |
docker pull ghcr.io/dexidp/dex:v2.35.3-distroless
docker pull argoproj/argo-cd-ci-builder:v1.0.0
docker pull redis:6.2.7-alpine
- name: Create target directory for binaries in the build-process
run: |
mkdir -p dist
chown runner dist
- name: Run E2E server and wait for it being available
timeout-minutes: 30
run: |
set -x
# Something is weird in GH runners -- there's a phantom listener for
# port 8080 which is not visible in netstat -tulpen, but still there
# with a HTTP listener. We have API server listening on port 8088
# instead.
make start-e2e-local 2>&1 | sed -r "s/[[:cntrl:]]\[[0-9]{1,3}m//g" > /tmp/e2e-server.log &
count=1
until curl -f http://127.0.0.1:8088/healthz; do
sleep 10;
if test $count -ge 60; then
echo "Timeout"
exit 1
fi
count=$((count+1))
done
- name: Run E2E testsuite
run: |
set -x
make test-e2e-local
- name: Upload e2e-server logs
uses: actions/upload-artifact@v2
with:
name: e2e-server-k8s${{ matrix.k3s-version }}.log
path: /tmp/e2e-server.log
if: ${{ failure() }}

View File

@@ -1,64 +0,0 @@
name: "Code scanning - action"
on:
push:
pull_request:
schedule:
- cron: '0 19 * * 0'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
CodeQL-Build:
permissions:
actions: read # for github/codeql-action/init to get workflow details
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/autobuild to send a status report
if: github.repository == 'argoproj/argo-cd'
# CodeQL runs on ubuntu-latest and windows-latest
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
# If this run was triggered by a pull request event, then checkout
# the head of the pull request instead of the merge commit.
- run: git checkout HEAD^2
if: ${{ github.event_name == 'pull_request' }}
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
# Override language selection by uncommenting this and choosing your languages
# with:
# languages: go, javascript, csharp, python, cpp, java
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -1,23 +0,0 @@
name: Deploy
on:
push:
branches:
- master
pull_request:
branches:
- 'master'
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.9.8
- name: build
run: |
pip install -r docs/requirements.txt
mkdocs build

View File

@@ -1,85 +0,0 @@
name: Image
on:
push:
branches:
- master
env:
GOLANG_VERSION: '1.16.11'
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
publish:
permissions:
contents: write # for git to push upgrade commit if not already deployed
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-latest
env:
GOPATH: /home/runner/work/argo-cd/argo-cd
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- uses: actions/checkout@master
with:
path: src/github.com/argoproj/argo-cd
# get image tag
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
working-directory: ./src/github.com/argoproj/argo-cd
id: image
# build
- run: |
docker images -a --format "{{.ID}}" | xargs -I {} docker rmi {}
make image DEV_IMAGE=true DOCKER_PUSH=false IMAGE_NAMESPACE=ghcr.io/argoproj IMAGE_TAG=${{ steps.image.outputs.tag }}
working-directory: ./src/github.com/argoproj/argo-cd
# publish
- run: |
docker login ghcr.io --username $USERNAME --password $PASSWORD
docker push ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
docker tag ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} argoproj/argocd:latest
docker push argoproj/argocd:latest
env:
USERNAME: ${{ secrets.USERNAME }}
PASSWORD: ${{ secrets.TOKEN }}
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
# sign container images
- name: Install cosign
uses: sigstore/cosign-installer@main
with:
cosign-release: 'v1.13.0'
- name: Sign Argo CD latest image
run: |
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd:latest
# Displays the public key to share.
cosign public-key --key env://COSIGN_PRIVATE_KEY
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ github.event_name == 'push' }}
# deploy
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
env:
TOKEN: ${{ secrets.TOKEN }}
- run: |
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
git config --global user.email 'ci@argoproj.com'
git config --global user.name 'CI'
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
working-directory: argoproj-deployments/argocd
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-GitHub-Package-Registry/m-p/41202/thread-id/9811

View File

@@ -1,331 +0,0 @@
name: Create ArgoCD release
on:
push:
tags:
- 'release-v*'
- '!release-v1.5*'
- '!release-v1.4*'
- '!release-v1.3*'
- '!release-v1.2*'
- '!release-v1.1*'
- '!release-v1.0*'
- '!release-v0*'
env:
GOLANG_VERSION: '1.16.11'
permissions:
contents: read
jobs:
prepare-release:
permissions:
contents: write # To push changes to release branch
name: Perform automatic release on trigger ${{ github.ref }}
runs-on: ubuntu-latest
env:
# The name of the tag as supplied by the GitHub event
SOURCE_TAG: ${{ github.ref }}
# The image namespace where Docker image will be published to
IMAGE_NAMESPACE: quay.io/argoproj
# Whether to create & push image and release assets
DRY_RUN: false
# Whether a draft release should be created, instead of public one
DRAFT_RELEASE: false
# Whether to update homebrew with this release as well
# Set RELEASE_HOMEBREW_TOKEN secret in repository for this to work - needs
# access to public repositories
UPDATE_HOMEBREW: false
# Name of the GitHub user for Git config
GIT_USERNAME: argo-bot
# E-Mail of the GitHub user for Git config
GIT_EMAIL: argoproj@gmail.com
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Check if the published tag is well formed and setup vars
run: |
set -xue
# Target version must match major.minor.patch and optional -rcX suffix
# where X must be a number.
TARGET_VERSION=${SOURCE_TAG#*release-v}
if ! echo "${TARGET_VERSION}" | egrep '^[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)*$'; then
echo "::error::Target version '${TARGET_VERSION}' is malformed, refusing to continue." >&2
exit 1
fi
# Target branch is the release branch we're going to operate on
# Its name is 'release-<major>.<minor>'
TARGET_BRANCH="release-${TARGET_VERSION%\.[0-9]*}"
# The release tag is the source tag, minus the release- prefix
RELEASE_TAG="${SOURCE_TAG#*release-}"
# Whether this is a pre-release (indicated by -rc suffix)
PRE_RELEASE=false
if echo "${RELEASE_TAG}" | egrep -- '-rc[0-9]+$'; then
PRE_RELEASE=true
fi
# We must not have a release trigger within the same release branch,
# because that means a release for this branch is already running.
if git tag -l | grep "release-v${TARGET_VERSION%\.[0-9]*}" | grep -v "release-v${TARGET_VERSION}"; then
echo "::error::Another release for branch ${TARGET_BRANCH} is currently in progress."
exit 1
fi
# Ensure that release do not yet exist
if git rev-parse ${RELEASE_TAG}; then
echo "::error::Release tag ${RELEASE_TAG} already exists in repository. Refusing to continue."
exit 1
fi
# Make the variables available in follow-up steps
echo "TARGET_VERSION=${TARGET_VERSION}" >> $GITHUB_ENV
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> $GITHUB_ENV
echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV
echo "PRE_RELEASE=${PRE_RELEASE}" >> $GITHUB_ENV
- name: Check if our release tag has a correct annotation
run: |
set -ue
# Fetch all tag information as well
git fetch --prune --tags --force
echo "=========== BEGIN COMMIT MESSAGE ============="
git show ${SOURCE_TAG}
echo "============ END COMMIT MESSAGE =============="
# Quite dirty hack to get the release notes from the annotated tag
# into a temporary file.
RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX)
prefix=true
begin=false
git show ${SOURCE_TAG} | while read line; do
# Whatever is in commit history for the tag, we only want that
# annotation from our tag. We discard everything else.
if test "$begin" = "false"; then
if echo "$line" | grep -q "tag ${SOURCE_TAG#refs/tags/}"; then begin="true"; fi
continue
fi
if test "$prefix" = "true"; then
if test -z "$line"; then prefix=false; fi
else
if echo "$line" | egrep -q '^commit [0-9a-f]+'; then
break
fi
echo "$line" >> ${RELEASE_NOTES}
fi
done
# For debug purposes
echo "============BEGIN RELEASE NOTES================="
cat ${RELEASE_NOTES}
echo "=============END RELEASE NOTES=================="
# Too short release notes are suspicious. We need at least 100 bytes.
relNoteLen=$(stat -c '%s' $RELEASE_NOTES)
if test $relNoteLen -lt 100; then
echo "::error::No release notes provided in tag annotation (or tag is not annotated)"
exit 1
fi
# Check for magic string '## Quick Start' in head of release notes
if ! head -2 ${RELEASE_NOTES} | grep -iq '## Quick Start'; then
echo "::error::Release notes seem invalid, quick start section not found."
exit 1
fi
# We store path to temporary release notes file for later reading, we
# need it when creating release.
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
- name: Setup Golang
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Setup Git author information
run: |
set -ue
git config --global user.email "${GIT_EMAIL}"
git config --global user.name "${GIT_USERNAME}"
- name: Checkout corresponding release branch
run: |
set -ue
echo "Switching to release branch '${TARGET_BRANCH}'"
if ! git checkout ${TARGET_BRANCH}; then
echo "::error::Checking out release branch '${TARGET_BRANCH}' for target version '${TARGET_VERSION}' (tagged '${RELEASE_TAG}') failed. Does it exist in repo?"
exit 1
fi
- name: Create VERSION information
run: |
set -ue
echo "Bumping version from $(cat VERSION) to ${TARGET_VERSION}"
echo "${TARGET_VERSION}" > VERSION
git commit -m "Bump version to ${TARGET_VERSION}" VERSION
- name: Generate new set of manifests
run: |
set -ue
make install-codegen-tools-local
helm2 init --client-only
make manifests-local VERSION=${TARGET_VERSION}
git diff
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
- name: Create the release tag
run: |
set -ue
echo "Creating release ${RELEASE_TAG}"
git tag ${RELEASE_TAG}
- name: Build Docker image for release
run: |
set -ue
git clean -fd
mkdir -p dist/
make image IMAGE_TAG="${TARGET_VERSION}" DOCKER_PUSH=false
make release-cli
chmod +x ./dist/argocd-linux-amd64
./dist/argocd-linux-amd64 version --client
if: ${{ env.DRY_RUN != 'true' }}
- name: Push docker image to repository
env:
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
QUAY_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
run: |
set -ue
docker login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_TOKEN}"
docker push ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
# Remove the following when Docker Hub is gone
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
docker tag ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} argoproj/argocd:v${TARGET_VERSION}
docker push argoproj/argocd:v${TARGET_VERSION}
make release-cli
make checksums
chmod +x ./dist/argocd-linux-amd64
./dist/argocd-linux-amd64 version --client
if: ${{ env.DRY_RUN != 'true' }}
- name: Install cosign
uses: sigstore/cosign-installer@main
with:
cosign-release: 'v1.13.0'
- name: Sign Argo CD container images and assets
run: |
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
# Retrieves the public key to release as an asset
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ env.DRY_RUN != 'true' }}
- name: Read release notes file
id: release-notes
uses: juliangruber/read-file-action@v1
with:
path: ${{ env.RELEASE_NOTES }}
- name: Push changes to release branch
run: |
set -ue
git push origin ${TARGET_BRANCH}
git push origin ${RELEASE_TAG}
- name: Dry run GitHub release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
id: create_release
with:
tag_name: ${{ env.RELEASE_TAG }}
release_name: ${{ env.RELEASE_TAG }}
draft: ${{ env.DRAFT_RELEASE }}
prerelease: ${{ env.PRE_RELEASE }}
body: ${{ steps.release-notes.outputs.content }}
if: ${{ env.DRY_RUN == 'true' }}
- name: Generate SBOM (spdx)
id: spdx-builder
env:
# defines the spdx/spdx-sbom-generator version to use.
SPDX_GEN_VERSION: v0.0.13
# defines the sigs.k8s.io/bom version to use.
SIGS_BOM_VERSION: v0.2.1
# comma delimited list of project relative folders to inspect for package
# managers (gomod, yarn, npm).
PROJECT_FOLDERS: ".,./ui"
# full qualified name of the docker image to be inspected
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
run: |
yarn install --cwd ./ui
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
go install sigs.k8s.io/bom/cmd/bom@$SIGS_BOM_VERSION
# Generate SPDX for project dependencies analyzing package managers
for folder in $(echo $PROJECT_FOLDERS | sed "s/,/ /g")
do
generator -p $folder -o /tmp
done
# Generate SPDX for binaries analyzing the docker image
if [[ ! -z $DOCKER_IMAGE ]]; then
bom generate -o /tmp/bom-docker-image.spdx -i $DOCKER_IMAGE
fi
cd /tmp && tar -zcf sbom.tar.gz *.spdx
if: ${{ env.DRY_RUN != 'true' }}
- name: Sign sbom
run: |
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ env.DRY_RUN != 'true' }}
- name: Create GitHub release
uses: softprops/action-gh-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: ${{ env.RELEASE_TAG }}
tag_name: ${{ env.RELEASE_TAG }}
draft: ${{ env.DRAFT_RELEASE }}
prerelease: ${{ env.PRE_RELEASE }}
body: ${{ steps.release-notes.outputs.content }}
files: |
dist/argocd-*
/tmp/sbom.tar.gz
/tmp/sbom.tar.gz.sig
if: ${{ env.DRY_RUN != 'true' }}
- name: Update homebrew formula
env:
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
uses: dawidd6/action-homebrew-bump-formula@v3
with:
token: ${{env.HOMEBREW_TOKEN}}
formula: argocd
if: ${{ env.HOMEBREW_TOKEN != '' && env.UPDATE_HOMEBREW == 'true' && env.PRE_RELEASE != 'true' }}
- name: Delete original request tag from repository
run: |
set -ue
git push --delete origin ${SOURCE_TAG}
if: ${{ always() }}

17
.gitignore vendored
View File

@@ -2,23 +2,8 @@
.idea/
.DS_Store
vendor/
dist/*
ui/dist/app/*
!ui/dist/app/gitkeep
site/
dist/
*.iml
# delve debug binaries
cmd/**/debug
debug.test
coverage.out
test-results
.scannerwork
.scratch
node_modules/
.kube/
# ignore built binaries
cmd/argocd/argocd
cmd/argocd-application-controller/argocd-application-controller
cmd/argocd-repo-server/argocd-repo-server
cmd/argocd-server/argocd-server

17
.gitpod.Dockerfile vendored
View File

@@ -1,17 +0,0 @@
FROM gitpod/workspace-full
USER root
RUN curl -o /usr/local/bin/kubectl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x /usr/local/bin/kubectl
RUN curl -L https://go.kubebuilder.io/dl/2.3.1/$(go env GOOS)/$(go env GOARCH) | \
tar -xz -C /tmp/ && mv /tmp/kubebuilder_2.3.1_$(go env GOOS)_$(go env GOARCH) /usr/local/kubebuilder
RUN apt-get install redis-server -y
RUN go get github.com/mattn/goreman
USER gitpod
ENV ARGOCD_REDIS_LOCAL=true
ENV KUBECONFIG=/tmp/kubeconfig

View File

@@ -1,6 +0,0 @@
image:
file: .gitpod.Dockerfile
tasks:
- init: make mod-download-local dep-ui-local && GO111MODULE=off go get github.com/mattn/goreman
command: make start-test-k8s

View File

@@ -1,7 +0,0 @@
version: 2
formats: all
mkdocs:
fail_on_warning: false
python:
install:
- requirements: docs/requirements.txt

File diff suppressed because it is too large Load Diff

76
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,76 @@
## Requirements
Make sure you have following tools installed
* [docker](https://docs.docker.com/install/#supported-platforms)
* [golang](https://golang.org/)
* [dep](https://github.com/golang/dep)
* [protobuf](https://developers.google.com/protocol-buffers/)
* [ksonnet](https://github.com/ksonnet/ksonnet#install)
* [helm](https://github.com/helm/helm/releases)
* [go-swagger](https://github.com/go-swagger/go-swagger/blob/master/docs/install.md)
* [jq](https://stedolan.github.io/jq/)
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
```
$ brew tap go-swagger/go-swagger
$ brew install go dep protobuf kubectl ksonnet/tap/ks kubernetes-helm jq go-swagger
$ go get -u github.com/golang/protobuf/protoc-gen-go
$ go get -u github.com/go-swagger/go-swagger/cmd/swagger
$ go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
$ go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
```
Nice to have [gometalinter](https://github.com/alecthomas/gometalinter) and [goreman](https://github.com/mattn/goreman):
```
$ go get -u gopkg.in/alecthomas/gometalinter.v2 github.com/mattn/goreman && gometalinter.v2 --install
```
## Building
```
$ go get -u github.com/argoproj/argo-cd
$ dep ensure
$ make
```
NOTE: The make command can take a while, and we recommend building the specific component you are working on
* `make cli` - Make the argocd CLI tool
* `make server` - Make the API/repo/controller server
* `make codegen` - Builds protobuf and swagger files
* `make argocd-util` - Make the administrator's utility, used for certain tasks such as import/export
## Generating ArgoCD manifests for a specific image repository/tag
During development, the `update-manifests.sh` script, can be used to conveniently regenerate the
ArgoCD installation manifests with a customized image namespace and tag. This enables developers
to easily apply manifests which are using the images that they pushed into their personal container
repository.
```
$ IMAGE_NAMESPACE=jessesuen IMAGE_TAG=latest ./hack/update-manifests.sh
$ kubectl apply -n argocd -f ./manifests/install.yaml
```
## Running locally
You need to have access to kubernetes cluster (including [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) or [docker edge](https://docs.docker.com/docker-for-mac/install/) ) in order to run Argo CD on your laptop:
* install kubectl: `brew install kubectl`
* make sure `kubectl` is connected to your cluster (e.g. `kubectl get pods` should work).
* install application CRD using following command:
```
$ kubectl create -f install/manifests/01_application-crd.yaml
```
* start Argo CD services using [goreman](https://github.com/mattn/goreman):
```
$ goreman start
```
## Troubleshooting
* Ensure argocd is installed: ./dist/argocd install
* Ensure you're logged in: ./dist/argocd login --username admin --password <whatever password you set at install> localhost:8080
* Ensure that roles are configured: kubectl create -f install/manifests/02c_argocd-rbac-cm.yaml
* Ensure minikube is running: minikube stop && minikube start
* Ensure Argo CD is aware of minikube: ./dist/argocd cluster add minikube

View File

@@ -1,137 +0,0 @@
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.16.11 as builder
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
RUN apt-get update && apt-get install -y \
openssh-server \
nginx \
fcgiwrap \
git \
git-lfs \
make \
wget \
gcc \
sudo \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
WORKDIR /tmp
ADD hack/install.sh .
ADD hack/installers installers
ADD hack/tool-versions.sh .
RUN ./install.sh ksonnet-linux
RUN ./install.sh helm2-linux
RUN ./install.sh helm-linux
RUN ./install.sh kustomize-linux
####################################################################################################
# Argo CD Base - used as the base for both the release and dev argocd images
####################################################################################################
FROM $BASE_IMAGE as argocd-base
USER root
ENV DEBIAN_FRONTEND=noninteractive
RUN groupadd -g 999 argocd && \
useradd -r -u 999 -g argocd argocd && \
mkdir -p /home/argocd && \
chown argocd:0 /home/argocd && \
chmod g=u /home/argocd && \
apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y git git-lfs python3-pip tini gpg tzdata && \
apt-get clean && \
pip3 install awscli==1.18.80 && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
COPY hack/gpg-wrapper.sh /usr/local/bin/gpg-wrapper.sh
COPY hack/git-verify-wrapper.sh /usr/local/bin/git-verify-wrapper.sh
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
COPY --from=builder /usr/local/bin/helm2 /usr/local/bin/helm2
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
# keep uid_entrypoint.sh for backward compatibility
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
# support for mounting configuration from a configmap
RUN mkdir -p /app/config/ssh && \
touch /app/config/ssh/ssh_known_hosts && \
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
RUN mkdir -p /app/config/tls
RUN mkdir -p /app/config/gpg/source && \
mkdir -p /app/config/gpg/keys && \
chown argocd /app/config/gpg/keys && \
chmod 0700 /app/config/gpg/keys
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=argocd
USER 999
WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM docker.io/library/node:12.18.4 as argocd-ui
WORKDIR /src
ADD ["ui/package.json", "ui/yarn.lock", "./"]
RUN yarn install
ADD ["ui/", "."]
ARG ARGO_VERSION=latest
ENV ARGO_VERSION=$ARGO_VERSION
RUN NODE_ENV='production' NODE_ONLINE_ENV='online' yarn build
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM docker.io/library/golang:1.16.11 as argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd
COPY go.mod go.mod
COPY go.sum go.sum
RUN go mod download
# Perform the build
COPY . .
COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app
RUN make argocd-all
ARG BUILD_ALL_CLIS=true
RUN if [ "$BUILD_ALL_CLIS" = "true" ] ; then \
make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all && \
make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all \
; fi
####################################################################################################
# Final image
####################################################################################################
FROM argocd-base
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
USER root
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
USER 999

88
Dockerfile-argocd Normal file
View File

@@ -0,0 +1,88 @@
FROM debian:9.4 as builder
RUN apt-get update && apt-get install -y \
git \
make \
wget \
gcc \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Install go
ENV GO_VERSION 1.10.3
ENV GO_ARCH amd64
ENV GOPATH /root/go
ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH}
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
# Install protoc, dep, packr
ENV PROTOBUF_VERSION 3.5.1
RUN cd /usr/local && \
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-*.zip && \
wget https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -O /usr/local/bin/dep && \
chmod +x /usr/local/bin/dep && \
wget https://github.com/gobuffalo/packr/releases/download/v1.11.0/packr_1.11.0_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep
# to install all the packages of our dep lock file
COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml
COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock
RUN cd ${GOPATH}/src/dummy && \
dep ensure -vendor-only && \
mv vendor/* ${GOPATH}/src/ && \
rmdir vendor
# Perform the build
WORKDIR /root/go/src/github.com/argoproj/argo-cd
COPY . .
ARG MAKE_TARGET="cli server controller repo-server argocd-util"
RUN make ${MAKE_TARGET}
##############################################################
# This stage will pull in or build any CLI tooling we need for our final image
FROM golang:1.10 as cli-tooling
# NOTE: we frequently switch between tip of master ksonnet vs. official builds. Comment/uncomment
# the corresponding section to switch between the two options:
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /ks
# Option 2: use official tagged ksonnet release
env KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /ks
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl
env HELM_VERSION=2.9.1
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /helm
##############################################################
FROM debian:9.3
RUN apt-get update && apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=cli-tooling /ks /usr/local/bin/ks
COPY --from=cli-tooling /helm /usr/local/bin/helm
COPY --from=cli-tooling /kubectl /usr/local/bin/kubectl
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=root
COPY --from=builder /root/go/src/github.com/argoproj/argo-cd/dist/* /
ARG BINARY
CMD /${BINARY}

28
Dockerfile-ci-builder Normal file
View File

@@ -0,0 +1,28 @@
FROM golang:1.10.3
WORKDIR /tmp
RUN curl -O https://get.docker.com/builds/Linux/x86_64/docker-1.13.1.tgz && \
tar -xzf docker-1.13.1.tgz && \
mv docker/docker /usr/local/bin/docker && \
rm -rf ./docker && \
go get -u github.com/golang/dep/cmd/dep && \
go get -u gopkg.in/alecthomas/gometalinter.v2 && \
gometalinter.v2 --install
# Install kubectl
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl && mv /kubectl /usr/local/bin/kubectl
# Install ksonnet
env KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks && \
rm -rf /tmp/ks_${KSONNET_VERSION}
# Install helm
env HELM_VERSION=2.9.1
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /usr/local/bin/helm

View File

@@ -1,14 +0,0 @@
####################################################################################################
# argocd-dev
####################################################################################################
FROM argocd-base
COPY argocd /usr/local/bin/
COPY argocd-darwin-amd64 /usr/local/bin/
COPY argocd-windows-amd64.exe /usr/local/bin/
USER root
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
USER 999

1325
Gopkg.lock generated Normal file

File diff suppressed because it is too large Load Diff

55
Gopkg.toml Normal file
View File

@@ -0,0 +1,55 @@
required = [
"github.com/gogo/protobuf/protoc-gen-gofast",
"github.com/gogo/protobuf/protoc-gen-gogofast",
"golang.org/x/sync/errgroup",
"k8s.io/code-generator/cmd/go-to-protobuf",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
"github.com/golang/protobuf/protoc-gen-go",
]
[[constraint]]
name = "google.golang.org/grpc"
version = "1.9.2"
[[constraint]]
name = "github.com/grpc-ecosystem/grpc-gateway"
version = "v1.3.1"
# override argo outdated dependency
[[override]]
branch = "release-1.10"
name = "k8s.io/api"
[[override]]
branch = "release-1.10"
name = "k8s.io/apimachinery"
[[constraint]]
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.10"
[[constraint]]
branch = "release-1.10"
name = "k8s.io/code-generator"
[[override]]
branch = "release-7.0"
name = "k8s.io/client-go"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.1"
[[constraint]]
name = "github.com/ksonnet/ksonnet"
version = "v0.11.0"
[[constraint]]
name = "github.com/gobuffalo/packr"
version = "v1.11.0"
# override ksonnet's logrus dependency
[[override]]
name = "github.com/sirupsen/logrus"
revision = "ea8897e79973357ba785ac2533559a6297e83c44"

535
Makefile
View File

@@ -1,150 +1,33 @@
PACKAGE=github.com/argoproj/argo-cd/v2/common
PACKAGE=github.com/argoproj/argo-cd
CURRENT_DIR=$(shell pwd)
DIST_DIR=${CURRENT_DIR}/dist
CLI_NAME=argocd
BIN_NAME=argocd
HOST_OS:=$(shell go env GOOS)
HOST_ARCH:=$(shell go env GOARCH)
VERSION=$(shell cat ${CURRENT_DIR}/VERSION)
BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
GIT_COMMIT=$(shell git rev-parse HEAD)
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
VOLUME_MOUNT=$(shell if test "$(go env GOOS)" = "darwin"; then echo ":delegated"; elif test selinuxenabled; then echo ":delegated"; else echo ""; fi)
KUBECTL_VERSION=$(shell go list -m all | grep k8s.io/client-go | cut -d ' ' -f5)
GOPATH?=$(shell if test -x `which go`; then go env GOPATH; else echo "$(HOME)/go"; fi)
GOCACHE?=$(HOME)/.cache/go-build
DOCKER_SRCDIR?=$(GOPATH)/src
DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
ARGOCD_PROCFILE?=Procfile
# Strict mode has been disabled in latest versions of mkdocs-material.
# Thus pointing to the older image of mkdocs-material matching the version used by argo-cd.
MKDOCS_DOCKER_IMAGE?=squidfunk/mkdocs-material:4.1.1
MKDOCS_RUN_ARGS?=
# Configuration for building argocd-test-tools image
TEST_TOOLS_NAMESPACE?=
TEST_TOOLS_IMAGE=argocd-test-tools
TEST_TOOLS_TAG?=latest
ifdef TEST_TOOLS_NAMESPACE
TEST_TOOLS_PREFIX=${TEST_TOOLS_NAMESPACE}/
endif
# You can change the ports where ArgoCD components will be listening on by
# setting the appropriate environment variables before running make.
ARGOCD_E2E_APISERVER_PORT?=8080
ARGOCD_E2E_REPOSERVER_PORT?=8081
ARGOCD_E2E_REDIS_PORT?=6379
ARGOCD_E2E_DEX_PORT?=5556
ARGOCD_E2E_YARN_HOST?=localhost
ARGOCD_E2E_DISABLE_AUTH?=
ARGOCD_E2E_TEST_TIMEOUT?=20m
ARGOCD_IN_CI?=false
ARGOCD_TEST_E2E?=true
ARGOCD_LINT_GOGC?=20
# Depending on where we are (legacy or non-legacy pwd), we need to use
# different Docker volume mounts for our source tree
LEGACY_PATH=$(GOPATH)/src/github.com/argoproj/argo-cd
ifeq ("$(PWD)","$(LEGACY_PATH)")
DOCKER_SRC_MOUNT="$(DOCKER_SRCDIR):/go/src$(VOLUME_MOUNT)"
else
DOCKER_SRC_MOUNT="$(PWD):/go/src/github.com/argoproj/argo-cd$(VOLUME_MOUNT)"
endif
# Runs any command in the argocd-test-utils container in server mode
# Server mode container will start with uid 0 and drop privileges during runtime
define run-in-test-server
docker run --rm -it \
--name argocd-test-server \
-u $(shell id -u):$(shell id -g) \
-e USER_ID=$(shell id -u) \
-e HOME=/home/user \
-e GOPATH=/go \
-e GOCACHE=/tmp/go-build-cache \
-e ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
-e ARGOCD_E2E_TEST=$(ARGOCD_E2E_TEST) \
-e ARGOCD_E2E_YARN_HOST=$(ARGOCD_E2E_YARN_HOST) \
-e ARGOCD_E2E_DISABLE_AUTH=$(ARGOCD_E2E_DISABLE_AUTH) \
-e ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} \
-e ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} \
-e ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} \
-v ${DOCKER_SRC_MOUNT} \
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
-v /tmp:/tmp${VOLUME_MOUNT} \
-w ${DOCKER_WORKDIR} \
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
-p 4000:4000 \
-p 5000:5000 \
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
bash -c "$(1)"
endef
# Runs any command in the argocd-test-utils container in client mode
define run-in-test-client
docker run --rm -it \
--name argocd-test-client \
-u $(shell id -u):$(shell id -g) \
-e HOME=/home/user \
-e GOPATH=/go \
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
-e GOCACHE=/tmp/go-build-cache \
-e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \
-v ${DOCKER_SRC_MOUNT} \
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
-v /tmp:/tmp${VOLUME_MOUNT} \
-w ${DOCKER_WORKDIR} \
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
bash -c "$(1)"
endef
#
define exec-in-test-server
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
endef
PATH:=$(PATH):$(PWD)/hack
# docker image publishing options
DOCKER_PUSH?=false
IMAGE_NAMESPACE?=
# perform static compilation
STATIC_BUILD?=true
# build development images
DEV_IMAGE?=false
ARGOCD_GPG_ENABLED?=true
ARGOCD_E2E_APISERVER_PORT?=8080
PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run vendor/github.com/gobuffalo/packr/packr/main.go"; fi)
override LDFLAGS += \
-X ${PACKAGE}.version=${VERSION} \
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}
ifeq (${STATIC_BUILD}, true)
override LDFLAGS += -extldflags "-static"
endif
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}
# docker image publishing options
DOCKER_PUSH=false
IMAGE_TAG=latest
ifneq (${GIT_TAG},)
IMAGE_TAG=${GIT_TAG}
LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
else
IMAGE_TAG?=latest
endif
ifneq (${IMAGE_NAMESPACE},)
override LDFLAGS += -X ${PACKAGE}/install.imageNamespace=${IMAGE_NAMESPACE}
endif
ifneq (${IMAGE_TAG},)
override LDFLAGS += -X ${PACKAGE}/install.imageTag=${IMAGE_TAG}
endif
ifeq (${DOCKER_PUSH},true)
@@ -158,273 +41,97 @@ IMAGE_PREFIX=${IMAGE_NAMESPACE}/
endif
.PHONY: all
all: cli image
# We have some legacy requirements for being checked out within $GOPATH.
# The ensure-gopath target can be used as dependency to ensure we are running
# within these boundaries.
.PHONY: ensure-gopath
ensure-gopath:
ifneq ("$(PWD)","$(LEGACY_PATH)")
@echo "Due to legacy requirements for codegen, repository needs to be checked out within \$$GOPATH"
@echo "Location of this repo should be '$(LEGACY_PATH)' but is '$(PWD)'"
@exit 1
endif
.PHONY: gogen
gogen: ensure-gopath
export GO111MODULE=off
go generate ./util/argo/...
all: cli server-image controller-image repo-server-image argocd-util
.PHONY: protogen
protogen: ensure-gopath
export GO111MODULE=off
protogen:
./hack/generate-proto.sh
.PHONY: openapigen
openapigen: ensure-gopath
export GO111MODULE=off
./hack/update-openapi.sh
.PHONY: clientgen
clientgen: ensure-gopath
export GO111MODULE=off
clientgen:
./hack/update-codegen.sh
.PHONY: clidocsgen
clidocsgen: ensure-gopath
go run tools/cmd-docs/main.go
.PHONY: codegen-local
codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local
rm -rf vendor/
.PHONY: codegen
codegen: test-tools-image
$(call run-in-test-client,make codegen-local)
codegen: protogen clientgen
# NOTE: we use packr to do the build instead of go, since we embed .yaml files into the go binary.
# This enables ease of maintenance of the yaml files.
.PHONY: cli
cli: test-tools-image
$(call run-in-test-client, GOOS=${HOST_OS} GOARCH=${HOST_ARCH} make cli-local)
cli: clean-debug
${PACKR_CMD} build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
.PHONY: cli-local
cli-local: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
.PHONY: release-cli
release-cli: clean-debug image
docker create --name tmp-argocd-linux $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)
docker cp tmp-argocd-linux:/usr/local/bin/argocd ${DIST_DIR}/argocd-linux-amd64
docker cp tmp-argocd-linux:/usr/local/bin/argocd-darwin-amd64 ${DIST_DIR}/argocd-darwin-amd64
docker cp tmp-argocd-linux:/usr/local/bin/argocd-windows-amd64.exe ${DIST_DIR}/argocd-windows-amd64.exe
.PHONY: cli-linux
cli-linux: clean-debug
docker build --iidfile /tmp/argocd-linux-id --target builder --build-arg MAKE_TARGET="cli IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-linux-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-linux `cat /tmp/argocd-linux-id`
docker cp tmp-argocd-linux:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-linux-amd64 dist/
docker rm tmp-argocd-linux
.PHONY: test-tools-image
test-tools-image:
docker build --build-arg UID=$(shell id -u) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
.PHONY: cli-darwin
cli-darwin: clean-debug
docker build --iidfile /tmp/argocd-darwin-id --target builder --build-arg MAKE_TARGET="cli GOOS=darwin IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-darwin-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-darwin `cat /tmp/argocd-darwin-id`
docker cp tmp-argocd-darwin:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64 dist/
docker rm tmp-argocd-darwin
.PHONY: manifests-local
manifests-local:
.PHONY: argocd-util
argocd-util: clean-debug
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
.PHONY: install-manifest
install-manifest:
if [ "${IMAGE_NAMESPACE}" = "" ] ; then echo "IMAGE_NAMESPACE must be set to build install manifest" ; exit 1 ; fi
./hack/update-manifests.sh
.PHONY: manifests
manifests: test-tools-image
$(call run-in-test-client,make manifests-local IMAGE_NAMESPACE='${IMAGE_NAMESPACE}' IMAGE_TAG='${IMAGE_TAG}')
# consolidated binary for cli, util, server, repo-server, controller
.PHONY: argocd-all
argocd-all: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
.PHONY: server
server: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
.PHONY: server-image
server-image:
docker build --build-arg BINARY=argocd-server -t $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) ; fi
.PHONY: repo-server
repo-server:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
.PHONY: repo-server-image
repo-server-image:
docker build --build-arg BINARY=argocd-repo-server -t $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) ; fi
.PHONY: controller
controller:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
.PHONY: image
ifeq ($(DEV_IMAGE), true)
# The "dev" image builds the binaries from the users desktop environment (instead of in Docker)
# which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since
# the dist directory is under .dockerignore.
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
image:
docker build -t argocd-base --target argocd-base .
docker build -t argocd-ui --target argocd-ui .
find ./ui/dist -type f -not -name gitkeep -delete
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-darwin-amd64 ./cmd
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-windows-amd64.exe ./cmd
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
cp Dockerfile.dev dist
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
else
image:
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
endif
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
.PHONY: controller-image
controller-image:
docker build --build-arg BINARY=argocd-application-controller -t $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) ; fi
.PHONY: armimage
# The "BUILD_ALL_CLIS" argument is to skip building the CLIs for darwin and windows
# which would take a really long time.
armimage:
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm . --build-arg BUILD_ALL_CLIS="false"
.PHONY: cli-image
cli-image:
docker build --build-arg BINARY=argocd -t $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) ; fi
.PHONY: builder-image
builder-image:
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) ; fi
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) -f Dockerfile-ci-builder .
.PHONY: mod-download
mod-download: test-tools-image
$(call run-in-test-client,go mod download)
.PHONY: mod-download-local
mod-download-local:
go mod download && go mod tidy # go mod download changes go.sum https://github.com/golang/go/issues/42970
.PHONY: mod-vendor
mod-vendor: test-tools-image
$(call run-in-test-client,go mod vendor)
.PHONY: mod-vendor-local
mod-vendor-local: mod-download-local
go mod vendor
# Deprecated - replace by install-local-tools
.PHONY: install-lint-tools
install-lint-tools:
./hack/install.sh lint-tools
# Run linter on the code
.PHONY: lint
lint: test-tools-image
$(call run-in-test-client,make lint-local)
lint:
gometalinter.v2 --config gometalinter.json ./...
# Run linter on the code (local version)
.PHONY: lint-local
lint-local:
golangci-lint --version
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose --timeout 300s
.PHONY: lint-ui
lint-ui: test-tools-image
$(call run-in-test-client,make lint-ui-local)
.PHONY: lint-ui-local
lint-ui-local:
cd ui && yarn lint
# Build all Go code
.PHONY: build
build: test-tools-image
mkdir -p $(GOCACHE)
$(call run-in-test-client, make build-local)
# Build all Go code (local version)
.PHONY: build-local
build-local:
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
# Run all unit tests
#
# If TEST_MODULE is set (to fully qualified module name), only this specific
# module will be tested.
.PHONY: test
test: test-tools-image
mkdir -p $(GOCACHE)
$(call run-in-test-client,make TEST_MODULE=$(TEST_MODULE) test-local)
test:
go test -v `go list ./... | grep -v "github.com/argoproj/argo-cd/test/e2e"`
# Run all unit tests (local version)
.PHONY: test-local
test-local:
if test "$(TEST_MODULE)" = ""; then \
./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
else \
./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
.PHONY: test-race
test-race: test-tools-image
mkdir -p $(GOCACHE)
$(call run-in-test-client,make TEST_MODULE=$(TEST_MODULE) test-race-local)
# Run all unit tests, with data race detection, skipping known failures (local version)
.PHONY: test-race-local
test-race-local:
if test "$(TEST_MODULE)" = ""; then \
./hack/test.sh -race -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
else \
./hack/test.sh -race -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
# Run the E2E test suite. E2E test servers (see start-e2e target) must be
# started before.
.PHONY: test-e2e
test-e2e:
$(call exec-in-test-server,make test-e2e-local)
go test -v -failfast -timeout 20m ./test/e2e
# Run the E2E test suite (local version)
.PHONY: test-e2e-local
test-e2e-local: cli-local
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
export GO111MODULE=off
ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v ./test/e2e
# Spawns a shell in the test server container for debugging purposes
debug-test-server: test-tools-image
$(call run-in-test-server,/bin/bash)
# Spawns a shell in the test client container for debugging purposes
debug-test-client: test-tools-image
$(call run-in-test-client,/bin/bash)
# Starts e2e server in a container
.PHONY: start-e2e
start-e2e: test-tools-image
docker version
mkdir -p ${GOCACHE}
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-e2e-local)
# Starts e2e server locally (or within a container)
.PHONY: start-e2e-local
start-e2e-local:
kubectl create ns argocd-e2e || true
kubectl config set-context --current --namespace=argocd-e2e
kustomize build test/manifests/base | kubectl apply -f -
# Create GPG keys and source directories
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
# set paths for locally managed ssh known hosts and tls certs data
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
ARGOCD_E2E_DISABLE_AUTH=false \
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
ARGOCD_E2E_TEST=true \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in by golang embed
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
.PHONY: clean-debug
clean-debug:
-find ${CURRENT_DIR} -name debug.test | xargs rm -f
@@ -433,115 +140,13 @@ clean-debug:
clean: clean-debug
-rm -rf ${CURRENT_DIR}/dist
.PHONY: start
start: test-tools-image
docker version
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-local ARGOCD_START=${ARGOCD_START})
# Starts a local instance of ArgoCD
.PHONY: start-local
start-local: mod-vendor-local dep-ui-local
# check we can connect to Docker to start Redis
killall goreman || true
kubectl create ns argocd || true
rm -rf /tmp/argocd-local
mkdir -p /tmp/argocd-local
mkdir -p /tmp/argocd-local/gpg/keys && chmod 0700 /tmp/argocd-local/gpg/keys
mkdir -p /tmp/argocd-local/gpg/source
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=false \
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
ARGOCD_E2E_TEST=false \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
# Run goreman start with exclude option , provide exclude env variable with list of services
.PHONY: run
run:
bash ./hack/goreman-start.sh
# Runs pre-commit validation with the virtualized toolchain
.PHONY: pre-commit
pre-commit: codegen build lint test
# Runs pre-commit validation with the local toolchain
.PHONY: pre-commit-local
pre-commit-local: codegen-local build-local lint-local test-local
.PHONY: precheckin
precheckin: test lint
.PHONY: release-precheck
release-precheck: manifests
release-precheck: install-manifest
@if [ "$(GIT_TREE_STATE)" != "clean" ]; then echo 'git tree state is $(GIT_TREE_STATE)' ; exit 1; fi
@if [ -z "$(GIT_TAG)" ]; then echo 'commit must be tagged to perform release' ; exit 1; fi
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
.PHONY: release
release: pre-commit release-precheck image release-cli
.PHONY: build-docs-local
build-docs-local:
mkdocs build
.PHONY: build-docs
build-docs:
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} build
.PHONY: serve-docs-local
serve-docs-local:
mkdocs serve
.PHONY: serve-docs
serve-docs:
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000
.PHONY: lint-docs
lint-docs:
# https://github.com/dkhamsing/awesome_bot
find docs -name '*.md' -exec grep -l http {} + | xargs docker run --rm -v $(PWD):/mnt:ro dkhamsing/awesome_bot -t 3 --allow-dupe --allow-redirect --white-list `cat white-list | grep -v "#" | tr "\n" ','` --skip-save-results --
# Verify that kubectl can connect to your K8s cluster from Docker
.PHONY: verify-kube-connect
verify-kube-connect: test-tools-image
$(call run-in-test-client,kubectl version)
# Show the Go version of local and virtualized environments
.PHONY: show-go-version
show-go-version: test-tools-image
@echo -n "Local Go version: "
@go version
@echo -n "Docker Go version: "
$(call run-in-test-client,go version)
# Installs all tools required to build and test ArgoCD locally
.PHONY: install-tools-local
install-tools-local: install-test-tools-local install-codegen-tools-local install-go-tools-local
# Installs all tools required for running unit & end-to-end tests (Linux packages)
.PHONY: install-test-tools-local
install-test-tools-local:
./hack/install.sh kustomize-linux
./hack/install.sh ksonnet-linux
./hack/install.sh helm2-linux
./hack/install.sh helm-linux
# Installs all tools required for running codegen (Linux packages)
.PHONY: install-codegen-tools-local
install-codegen-tools-local:
./hack/install.sh codegen-tools
# Installs all tools required for running codegen (Go packages)
.PHONY: install-go-tools-local
install-go-tools-local:
./hack/install.sh codegen-go-tools
.PHONY: dep-ui
dep-ui: test-tools-image
$(call run-in-test-client,make dep-ui-local)
dep-ui-local:
cd ui && yarn install
start-test-k8s:
go run ./hack/k8s
checksums:
sha256sum ./dist/$(BIN_NAME)-* | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BIN_NAME)-$(TARGET_VERSION)-checksums.txt
release: release-precheck precheckin cli-darwin cli-linux server-image controller-image repo-server-image cli-image

17
OWNERS
View File

@@ -3,21 +3,6 @@ owners:
- jessesuen
approvers:
- alexec
- alexmt
- jannfis
- jessesuen
- jgwest
- mayzhang2000
- rbreeze
reviewers:
- dthomson25
- tetchel
- wtam2018
- ishitasequeira
- reginapizza
- hblixt
- chetan-rns
- wanghong230
- pasha-codefresh
- merenbach

View File

@@ -1,9 +1,4 @@
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} "
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.7-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} go run ./cmd/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
git-server: test/fixture/testrepos/start-git.sh
helm-registry: test/fixture/testrepos/start-helm-registry.sh
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
controller: go run ./cmd/argocd-application-controller/main.go
api-server: go run ./cmd/argocd-server/main.go --insecure --disable-auth
repo-server: go run ./cmd/argocd-repo-server/main.go --loglevel debug
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -p 5557:5557 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/coreos/dex:v2.10.0 serve /dex.yaml"

View File

@@ -1,8 +1,3 @@
[![Integration tests](https://github.com/argoproj/argo-cd/workflows/Integration%20tests/badge.svg?branch=master)](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd)
[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4486/badge)](https://bestpractices.coreinfrastructure.org/projects/4486)
# Argo CD - Declarative Continuous Delivery for Kubernetes
@@ -10,65 +5,65 @@
Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
![Argo CD UI](docs/assets/argocd-ui.gif)
[![Argo CD Demo](https://img.youtube.com/vi/0WAm0y2vLIo/0.jpg)](https://youtu.be/0WAm0y2vLIo)
![Argo CD UI](docs/argocd-ui.gif)
## Why Argo CD?
1. Application definitions, configurations, and environments should be declarative and version controlled.
1. Application deployment and lifecycle management should be automated, auditable, and easy to understand.
Application definitions, configurations, and environments should be declarative and version controlled.
Application deployment and lifecycle management should be automated, auditable, and easy to understand.
## Who uses Argo CD?
## Getting Started
[Official Argo CD user list](USERS.md)
Follow our [getting started guide](docs/getting_started.md). Further [documentation](docs/)
is provided for additional features.
## Documentation
## How it works
To learn more about Argo CD [go to the complete documentation](https://argo-cd.readthedocs.io/).
Check live demo at https://cd.apps.argoproj.io/.
Argo CD follows the **GitOps** pattern of using git repositories as the source of truth for defining
the desired application state. Kubernetes manifests can be specified in several ways:
* [ksonnet](https://ksonnet.io) applications
* [helm](https://helm.sh) charts
* Simple directory of YAML/json manifests
## Community
Argo CD automates the deployment of the desired application states in the specified target environments.
Application deployments can track updates to branches, tags, or pinned to a specific version of
manifests at a git commit. See [tracking strategies](docs/tracking_strategies.md) for additional
details about the different tracking strategies available.
### Contribution, Discussion and Support
## Architecture
You can reach the Argo CD community and developers via the following channels:
![Argo CD Architecture](docs/argocd_architecture.png)
* Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions)
* Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack)
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
* User Community meeting: [Every other Wednesday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
Argo CD is implemented as a kubernetes controller which continuously monitors running applications
and compares the current, live state against the desired target state (as specified in the git repo).
A deployed application whose live state deviates from the target state is considered `OutOfSync`.
Argo CD reports & visualizes the differences, while providing facilities to automatically or
manually sync the live state back to the desired target state. Any modifications made to the desired
target state in the git repo can be automatically applied and reflected in the specified target
environments.
For additional details, see [architecture overview](docs/architecture.md).
Participation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
## Features
* Automated deployment of applications to specified target environments
* Continuous monitoring of deployed applications
* Automated or manual syncing of applications to its desired state
* Web and CLI based visualization of applications and differences between live vs. desired state
* Rollback/Roll-anywhere to any application state committed in the git repository
* Health assessment statuses on all components of the application
* SSO Integration (OIDC, OAuth2, LDAP, SAML 2.0, GitLab, Microsoft, LinkedIn)
* Webhook Integration (GitHub, BitBucket, GitLab)
* PreSync, Sync, PostSync hooks to support complex application rollouts (e.g.blue/green & canary upgrades)
* Audit trails for application events and API calls
* Parameter overrides for overriding ksonnet/helm parameters in git
### Blogs and Presentations
## Development Status
* Argo CD is being used in production to deploy SaaS services at Intuit
1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)
1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)
1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)
1. [Couchbase - How To Run a Database Cluster in Kubernetes Using Argo CD](https://youtu.be/nkPoPaVzExY)
1. [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)
1. [Environments Based On Pull Requests (PRs): Using Argo CD To Apply GitOps Principles On Previews](https://youtu.be/cpAaI8p4R60)
1. [Argo CD: Applying GitOps Principles To Manage Production Environment In Kubernetes](https://youtu.be/vpWQeoaiRM4)
1. [Creating Temporary Preview Environments Based On Pull Requests With Argo CD And Codefresh](https://codefresh.io/continuous-deployment/creating-temporary-preview-environments-based-pull-requests-argo-cd-codefresh/)
1. [Tutorial: Everything You Need To Become A GitOps Ninja](https://www.youtube.com/watch?v=r50tRQjisxw) 90m tutorial on GitOps and Argo CD.
1. [Comparison of Argo CD, Spinnaker, Jenkins X, and Tekton](https://www.inovex.de/blog/spinnaker-vs-argo-cd-vs-tekton-vs-jenkins-x/)
1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://www.ibm.com/cloud/blog/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2)
1. [GitOps for Kubeflow using Argo CD](https://v0-6.kubeflow.org/docs/use-cases/gitops-for-kubeflow/)
1. [GitOps Toolsets on Kubernetes with CircleCI and Argo CD](https://www.digitalocean.com/community/tutorials/webinar-series-gitops-tool-sets-on-kubernetes-with-circleci-and-argo-cd)
1. [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be)
1. [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU). Among other things, describes how Kubeflow uses Argo CD to implement GitOPs for ML
1. [Argo CD - GitOps Continuous Delivery for Kubernetes](https://www.youtube.com/watch?v=aWDIQMbp1cc&feature=youtu.be&t=1m4s)
1. [Introduction to Argo CD : Kubernetes DevOps CI/CD](https://www.youtube.com/watch?v=2WSJF7d8dUg&feature=youtu.be)
1. [GitOps Deployment and Kubernetes - using Argo CD](https://medium.com/riskified-technology/gitops-deployment-and-kubernetes-f1ab289efa4b)
1. [Deploy Argo CD with Ingress and TLS in Three Steps: No YAML Yak Shaving Required](https://itnext.io/deploy-argo-cd-with-ingress-and-tls-in-three-steps-no-yaml-yak-shaving-required-bc536d401491)
1. [GitOps Continuous Delivery with Argo and Codefresh](https://codefresh.io/events/cncf-member-webinar-gitops-continuous-delivery-argo-codefresh/)
1. [Stay up to date with Argo CD and Renovate](https://mjpitz.com/blog/2020/12/03/renovate-your-gitops/)
1. [Setting up Argo CD with Helm](https://www.arthurkoziel.com/setting-up-argocd-with-helm/)
1. [Applied GitOps with Argo CD](https://thenewstack.io/applied-gitops-with-argocd/)
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
## Roadmap
* Auto-sync toggle to directly apply git state changes to live state
* Service account/access key management for CI pipelines
* Support for additional config management tools (Kustomize?)
* Revamped UI, and feature parity with CLI
* Customizable application actions

View File

@@ -1,66 +0,0 @@
# Security Policy for Argo CD
Version: **v1.2 (2020-08-07)**
## Preface
As a deployment tool, Argo CD needs to have production access which makes
security a very important topic. The Argoproj team takes security very
seriously and is continuously working on improving it.
## A word about security scanners
Many organisations these days employ security scanners to validate their
container images before letting them on their clusters, and that is a good
thing. However, the quality and results of these scanners vary greatly,
many of them produce false positives and require people to look at the
issues reported and validate them for correctness. A great example of that
is, that some scanners report kernel vulnerabilities for container images
just because they are derived from some distribution.
We kindly ask you to not raise issues or contact us regarding any issues
that are found by your security scanner. Many of those produce a lot of false
positives, and many of these issues don't affect Argo CD. We do have scanners
in place for our code, dependencies and container images that we publish. We
are well aware of the issues that may affect Argo CD and are constantly
working on the remediation of those that affect Argo CD and our users.
If you believe that we might have missed an issue that we should take a look
at (that can happen), then please discuss it with us. But please, do validate
that assumption before at least roughly.
## Supported Versions
We currently support the most recent release (`N`, e.g. `1.8`) and the release
previous to the most recent one (`N-1`, e.g. `1.7`). With the release of
`N+1`, `N-1` drops out of support and `N` becomes `N-1`.
We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the
supported versions, which will contain fixes for security vulnerabilities and
important bugs. Prior releases might receive critical security fixes on a best
effort basis, however, it cannot be guaranteed that security fixes get
back-ported to these unsupported versions.
In rare cases, where a security fix needs complex re-design of a feature or is
otherwise very intrusive, and there's a workaround available, we may decide to
provide a forward-fix only, e.g. to be released the next minor release, instead
of releasing it within a patch branch for the currently supported releases.
## Reporting a Vulnerability
If you find a security related bug in ArgoCD, we kindly ask you for responsible
disclosure and for giving us appropriate time to react, analyze and develop a
fix to mitigate the found security vulnerability.
We will do our best to react quickly on your inquiry, and to coordinate a fix
and disclosure with you. Sometimes, it might take a little longer for us to
react (e.g. out of office conditions), so please bear with us in these cases.
We will publish security advisiories using the
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
feature to keep our community well informed, and will credit you for your
findings (unless you prefer to stay anonymous, of course).
Please report vulnerabilities by e-mail to the following address:
* cncf-argo-security@lists.cncf.io

View File

@@ -1,8 +0,0 @@
# Defined below are the security contacts for this repo.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://argo-cd.readthedocs.io/en/latest/security_considerations/#reporting-vulnerabilities
alexmt
edlee2121
jessesuen

180
USERS.md
View File

@@ -1,180 +0,0 @@
## Who uses Argo CD?
As the Argo Community grows, we'd like to keep track of our users. Please send a PR with your organization name if you are using Argo CD.
Currently, the following organizations are **officially** using Argo CD:
1. [127Labs](https://127labs.com/)
1. [3Rein](https://www.3rein.com/)
1. [7shifts](https://www.7shifts.com/)
1. [Adevinta](https://www.adevinta.com/)
1. [Adventure](https://jp.adventurekk.com/)
1. [Akuity](https://akuity.io/)
1. [Alibaba Group](https://www.alibabagroup.com/)
1. [Ambassador Labs](https://www.getambassador.io/)
1. [Ant Group](https://www.antgroup.com/)
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
1. [AppDirect](https://www.appdirect.com)
1. [Arctiq Inc.](https://www.arctiq.ca)
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
1. [Axual B.V.](https://axual.com)
1. [Baloise](https://www.baloise.com)
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
1. [Beat](https://thebeat.co/en/)
1. [Beez Innovation Labs](https://www.beezlabs.com/)
1. [BioBox Analytics](https://biobox.io)
1. [BigPanda](https://bigpanda.io)
1. [BMW Group](https://www.bmwgroup.com/)
1. [Camptocamp](https://camptocamp.com)
1. [Capital One](https://www.capitalone.com)
1. [CARFAX](https://www.carfax.com)
1. [Celonis](https://www.celonis.com/)
1. [Chime](https://www.chime.com)
1. [Codefresh](https://www.codefresh.io/)
1. [Codility](https://www.codility.com/)
1. [Commonbond](https://commonbond.co/)
1. [Crédit Agricole CIB](https://www.ca-cib.com)
1. [CROZ d.o.o.](https://croz.net/)
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
1. [Cybozu](https://cybozu-global.com)
1. [Chargetrip](https://chargetrip.com)
1. [D2iQ](https://www.d2iq.com)
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
1. [EDF Renewables](https://www.edf-re.com/)
1. [edX](https://edx.org)
1. [Electronic Arts Inc. ](https://www.ea.com)
1. [Elium](https://www.elium.com)
1. [END.](https://www.endclothing.com/)
1. [Energisme](https://energisme.com/)
1. [Fave](https://myfave.com)
1. [Future PLC](https://www.futureplc.com/)
1. [Garner](https://www.garnercorp.com)
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
1. [Generali Deutschland AG](https://www.generali.de/)
1. [Gitpod](https://www.gitpod.io)
1. [Glovo](https://www.glovoapp.com)
1. [GMETRI](https://gmetri.com/)
1. [Gojek](https://www.gojek.io/)
1. [Greenpass](https://www.greenpass.com.br/)
1. [Handelsbanken](https://www.handelsbanken.se)
1. [Healy](https://www.healyworld.net)
1. [hipages](https://hipages.com.au/)
1. [Hiya](https://hiya.com)
1. [Honestbank](https://honestbank.com)
1. [IBM](https://www.ibm.com/)
1. [IITS-Consulting](https://iits-consulting.de)
1. [Index Exchange](https://www.indexexchange.com/)
1. [InsideBoard](https://www.insideboard.com)
1. [Intuit](https://www.intuit.com/)
1. [Joblift](https://joblift.com/)
1. [JovianX](https://www.jovianx.com/)
1. [Karrot](https://www.daangn.com/)
1. [KarrotPay](https://www.daangnpay.com/)
1. [Kasa](https://kasa.co.kr/)
1. [Keptn](https://keptn.sh)
1. [Kinguin](https://www.kinguin.net/)
1. [KintoHub](https://www.kintohub.com/)
1. [KompiTech GmbH](https://www.kompitech.com/)
1. [LexisNexis](https://www.lexisnexis.com/)
1. [LINE](https://linecorp.com/en/)
1. [Lytt](https://www.lytt.co/)
1. [Major League Baseball](https://mlb.com)
1. [Mambu](https://www.mambu.com/)
1. [Mattermost](https://www.mattermost.com)
1. [Max Kelsen](https://www.maxkelsen.com/)
1. [MindSpore](https://mindspore.cn)
1. [Mirantis](https://mirantis.com/)
1. [mixi Group](https://mixi.co.jp/)
1. [Moengage](https://www.moengage.com/)
1. [Money Forward](https://corp.moneyforward.com/en/)
1. [MOO Print](https://www.moo.com/)
1. [MTN Group](https://www.mtn.com/)
1. [Natura &Co](https://naturaeco.com/)
1. [New Relic](https://newrelic.com/)
1. [Nextdoor](https://nextdoor.com/)
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
1. [Octadesk](https://octadesk.com)
1. [openEuler](https://openeuler.org)
1. [openGauss](https://opengauss.org/)
1. [openLooKeng](https://openlookeng.io)
1. [OpenSaaS Studio](https://opensaas.studio)
1. [Opensurvey](https://www.opensurvey.co.kr/)
1. [Optoro](https://www.optoro.com/)
1. [Orbital Insight](https://orbitalinsight.com/)
1. [Packlink](https://www.packlink.com/)
1. [PayPay](https://paypay.ne.jp/)
1. [Peloton Interactive](https://www.onepeloton.com/)
1. [Pipefy](https://www.pipefy.com/)
1. [Polarpoint.io](https://polarpoint.io)
1. [Preferred Networks](https://preferred.jp/en/)
1. [Prudential](https://prudential.com.sg)
1. [PUBG](https://www.pubg.com)
1. [Qonto](https://qonto.com)
1. [QuintoAndar](https://quintoandar.com.br)
1. [Quipper](https://www.quipper.com/)
1. [Recreation.gov](https://www.recreation.gov/)
1. [Red Hat](https://www.redhat.com/)
1. [Riskified](https://www.riskified.com/)
1. [Robotinfra](https://www.robotinfra.com)
1. [Saildrone](https://www.saildrone.com/)
1. [Saloodo! GmbH](https://www.saloodo.com)
1. [Schwarz IT](https://jobs.schwarz/it-mission)
1. [Snyk](https://snyk.io/)
1. [Speee](https://speee.jp/)
1. [Spendesk](https://spendesk.com/)
1. [Sumo Logic](https://sumologic.com/)
1. [Sutpc](http://www.sutpc.com/)
1. [Swisscom](https://www.swisscom.ch)
1. [Swissquote](https://github.com/swissquote)
1. [Syncier](https://syncier.com/)
1. [TableCheck](https://tablecheck.com/)
1. [Tailor Brands](https://www.tailorbrands.com)
1. [Tesla](https://tesla.com/)
1. [ThousandEyes](https://www.thousandeyes.com/)
1. [Ticketmaster](https://ticketmaster.com)
1. [Tiger Analytics](https://www.tigeranalytics.com/)
1. [Tigera](https://www.tigera.io/)
1. [Toss](https://toss.im/en)
1. [tru.ID](https://tru.id)
1. [Twilio SendGrid](https://sendgrid.com)
1. [tZERO](https://www.tzero.com/)
1. [ungleich.ch](https://ungleich.ch/)
1. [UBIO](https://ub.io/)
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
1. [Viaduct](https://www.viaduct.ai/)
1. [Virtuo](https://www.govirtuo.com/)
1. [VISITS Technologies](https://visits.world/en)
1. [Volvo Cars](https://www.volvocars.com/)
1. [VSHN - The DevOps Company](https://vshn.ch/)
1. [Walkbase](https://www.walkbase.com/)
1. [Wehkamp](https://www.wehkamp.nl/)
1. [WeMo Scooter](https://www.wemoscooter.com/)
1. [Webstores](https://www.webstores.nl)
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
1. [Witick](https://witick.io/)
1. [WooliesX](https://wooliesx.com.au/)
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
1. [WSpot](https://www.wspot.com.br/)
1. [Yieldlab](https://www.yieldlab.de/)
1. [Zimpler](https://www.zimpler.com/)
1. [Sap Labs](http://sap.com)
1. [Smilee.io](https://smilee.io)
1. [Metanet](http://www.metanet.co.kr/en/)
1. [Unifonic Inc](https://www.unifonic.com/)
1. [Tamkeen Technologies](https://tamkeentech.sa/)
1. [Kaltura](https://corp.kaltura.com/)
1. [Boticario](https://www.boticario.com.br/)
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
1. [MariaDB](https://mariadb.com)
1. [Lightricks](https://www.lightricks.com/)
1. [RightRev](https://rightrev.com/)
1. [MeDirect](https://medirect.com.mt/)
1. [Snapp](https://snapp.ir/)
1. [Technacy](https://www.technacy.it/)
1. [freee](https://corp.freee.co.jp/en/company/)
1. [Youverify](https://youverify.co/)
1. [Keeeb](https://www.keeeb.com/)
1. [p3r](https://www.p3r.one/)
1. [Faro](https://www.faro.com/)
1. [Rise](https://www.risecard.eu/)

View File

@@ -1 +1 @@
2.2.16
0.7.2

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 26 KiB

View File

@@ -1,40 +0,0 @@
# Built-in policy which defines two roles: role:readonly and role:admin,
# and additionally assigns the admin user to the role:admin role.
# There are two policy formats:
# 1. Applications (which belong to a project):
# p, <user/group>, <resource>, <action>, <project>/<object>
# 2. All other resources:
# p, <user/group>, <resource>, <action>, <object>
p, role:readonly, applications, get, */*, allow
p, role:readonly, certificates, get, *, allow
p, role:readonly, clusters, get, *, allow
p, role:readonly, repositories, get, *, allow
p, role:readonly, projects, get, *, allow
p, role:readonly, accounts, get, *, allow
p, role:readonly, gpgkeys, get, *, allow
p, role:admin, applications, create, */*, allow
p, role:admin, applications, update, */*, allow
p, role:admin, applications, delete, */*, allow
p, role:admin, applications, sync, */*, allow
p, role:admin, applications, override, */*, allow
p, role:admin, applications, action/*, */*, allow
p, role:admin, certificates, create, *, allow
p, role:admin, certificates, update, *, allow
p, role:admin, certificates, delete, *, allow
p, role:admin, clusters, create, *, allow
p, role:admin, clusters, update, *, allow
p, role:admin, clusters, delete, *, allow
p, role:admin, repositories, create, *, allow
p, role:admin, repositories, update, *, allow
p, role:admin, repositories, delete, *, allow
p, role:admin, projects, create, *, allow
p, role:admin, projects, update, *, allow
p, role:admin, projects, delete, *, allow
p, role:admin, accounts, update, *, allow
p, role:admin, gpgkeys, create, *, allow
p, role:admin, gpgkeys, delete, *, allow
g, role:admin, role:readonly
g, admin, role:admin
1 # Built-in policy which defines two roles: role:readonly and role:admin,
2 # and additionally assigns the admin user to the role:admin role.
3 # There are two policy formats:
4 # 1. Applications (which belong to a project):
5 # p, <user/group>, <resource>, <action>, <project>/<object>
6 # 2. All other resources:
7 # p, <user/group>, <resource>, <action>, <object>
8 p, role:readonly, applications, get, */*, allow
9 p, role:readonly, certificates, get, *, allow
10 p, role:readonly, clusters, get, *, allow
11 p, role:readonly, repositories, get, *, allow
12 p, role:readonly, projects, get, *, allow
13 p, role:readonly, accounts, get, *, allow
14 p, role:readonly, gpgkeys, get, *, allow
15 p, role:admin, applications, create, */*, allow
16 p, role:admin, applications, update, */*, allow
17 p, role:admin, applications, delete, */*, allow
18 p, role:admin, applications, sync, */*, allow
19 p, role:admin, applications, override, */*, allow
20 p, role:admin, applications, action/*, */*, allow
21 p, role:admin, certificates, create, *, allow
22 p, role:admin, certificates, update, *, allow
23 p, role:admin, certificates, delete, *, allow
24 p, role:admin, clusters, create, *, allow
25 p, role:admin, clusters, update, *, allow
26 p, role:admin, clusters, delete, *, allow
27 p, role:admin, repositories, create, *, allow
28 p, role:admin, repositories, update, *, allow
29 p, role:admin, repositories, delete, *, allow
30 p, role:admin, projects, create, *, allow
31 p, role:admin, projects, update, *, allow
32 p, role:admin, projects, delete, *, allow
33 p, role:admin, accounts, update, *, allow
34 p, role:admin, gpgkeys, create, *, allow
35 p, role:admin, gpgkeys, delete, *, allow
36 g, role:admin, role:readonly
37 g, admin, role:admin

View File

@@ -1,7 +0,0 @@
package assets
import "embed"
// Embedded contains embedded assets
//go:embed *
var Embedded embed.FS

View File

@@ -1,14 +0,0 @@
[request_definition]
r = sub, res, act, obj
[policy_definition]
p = sub, res, act, obj, eft
[role_definition]
g = _, _
[policy_effect]
e = some(where (p.eft == allow)) && !some(where (p.eft == deny))
[matchers]
m = g(r.sub, p.sub) && globOrRegexMatch(r.res, p.res) && globOrRegexMatch(r.act, p.act) && globOrRegexMatch(r.obj, p.obj)

File diff suppressed because it is too large Load Diff

View File

@@ -1,190 +0,0 @@
package commands
import (
"context"
"fmt"
"math"
"time"
"github.com/argoproj/pkg/stats"
"github.com/go-redis/redis/v8"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/controller"
"github.com/argoproj/argo-cd/v2/controller/sharding"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/env"
"github.com/argoproj/argo-cd/v2/util/errors"
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
"github.com/argoproj/argo-cd/v2/util/settings"
"github.com/argoproj/argo-cd/v2/util/tls"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-application-controller"
// Default time in seconds for application resync period
defaultAppResyncPeriod = 180
)
func NewCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
repoServerTimeoutSeconds int
selfHealTimeoutSeconds int
statusProcessors int
operationProcessors int
glogLevel int
metricsPort int
metricsCacheExpiration time.Duration
metricsAplicationLabels []string
kubectlParallelismLimit int64
cacheSrc func() (*appstatecache.Cache, error)
redisClient *redis.Client
repoServerPlaintext bool
repoServerStrictTLS bool
)
var command = cobra.Command{
Use: cliName,
Short: "Run ArgoCD Application Controller",
Long: "ArgoCD application controller is a Kubernetes controller that continuously monitors running applications and compares the current, live state against the desired target state (as specified in the repo). This command runs Application Controller in the foreground. It can be configured by following options.",
DisableAutoGenTag: true,
RunE: func(c *cobra.Command, args []string) error {
cli.SetLogFormat(cmdutil.LogFormat)
cli.SetLogLevel(cmdutil.LogLevel)
cli.SetGLogLevel(glogLevel)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
kubeClient := kubernetes.NewForConfigOrDie(config)
appClient := appclientset.NewForConfigOrDie(config)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
var resyncDuration time.Duration
if appResyncPeriod == 0 {
// Re-sync should be disabled if period is 0. Set duration to a very long duration
resyncDuration = time.Hour * 24 * 365 * 100
} else {
resyncDuration = time.Duration(appResyncPeriod) * time.Second
}
tlsConfig := apiclient.TLSConfiguration{
DisableTLS: repoServerPlaintext,
StrictValidation: repoServerStrictTLS,
}
// Load CA information to use for validating connections to the
// repository server, if strict TLS validation was requested.
if !repoServerPlaintext && repoServerStrictTLS {
pool, err := tls.LoadX509CertPool(
fmt.Sprintf("%s/controller/tls/tls.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)),
fmt.Sprintf("%s/controller/tls/ca.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)),
)
if err != nil {
log.Fatalf("%v", err)
}
tlsConfig.Certificates = pool
}
repoClientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds, tlsConfig)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cache, err := cacheSrc()
errors.CheckError(err)
cache.Cache.SetClient(cacheutil.NewTwoLevelClient(cache.Cache.GetClient(), 10*time.Minute))
var appController *controller.ApplicationController
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace, settings.WithRepoOrClusterChangedHandler(func() {
appController.InvalidateProjectsCache()
}))
kubectl := kubeutil.NewKubectl()
clusterFilter := getClusterFilter()
appController, err = controller.NewApplicationController(
namespace,
settingsMgr,
kubeClient,
appClient,
repoClientset,
cache,
kubectl,
resyncDuration,
time.Duration(selfHealTimeoutSeconds)*time.Second,
metricsPort,
metricsCacheExpiration,
metricsAplicationLabels,
kubectlParallelismLimit,
clusterFilter)
errors.CheckError(err)
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
vers := common.GetVersion()
log.Infof("Application Controller (version: %s, built: %s) starting (namespace: %s)", vers.Version, vers.BuildDate, namespace)
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
go appController.Run(ctx, statusProcessors, operationProcessors)
// Wait forever
select {}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.")
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().IntVar(&statusProcessors, "status-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS", 20, 0, math.MaxInt32), "Number of application status processors")
command.Flags().IntVar(&operationProcessors, "operation-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_OPERATION_PROCESSORS", 10, 0, math.MaxInt32), "Number of application operation processors")
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", 20, "Number of allowed concurrent kubectl fork/execs. Any value less the 1 means no limit.")
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
command.Flags().StringSliceVar(&metricsAplicationLabels, "metrics-application-labels", []string{}, "List of Application labels that will be added to the argocd_application_labels metric")
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
redisClient = client
})
return &command
}
func getClusterFilter() func(cluster *v1alpha1.Cluster) bool {
replicas := env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
var clusterFilter func(cluster *v1alpha1.Cluster) bool
if replicas > 1 {
if shard < 0 {
var err error
shard, err = sharding.InferShard()
errors.CheckError(err)
}
log.Infof("Processing clusters from shard %d", shard)
clusterFilter = sharding.GetClusterFilter(replicas, shard)
} else {
log.Info("Processing all cluster shards")
}
return clusterFilter
}

View File

@@ -0,0 +1,120 @@
package main
import (
"context"
"flag"
"fmt"
"os"
"strconv"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/stats"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-application-controller"
// Default time in seconds for application resync period
defaultAppResyncPeriod = 180
)
func newCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
statusProcessors int
operationProcessors int
logLevel string
glogLevel int
)
var command = cobra.Command{
Use: cliName,
Short: "application-controller is a controller to operate on applications CRD",
RunE: func(c *cobra.Command, args []string) error {
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
// Set the glog level for the k8s go-client
_ = flag.CommandLine.Parse([]string{})
_ = flag.Lookup("logtostderr").Value.Set("true")
_ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel))
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
kubeClient := kubernetes.NewForConfigOrDie(config)
appClient := appclientset.NewForConfigOrDie(config)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
// TODO (amatyushentsev): Use config map to store controller configuration
controllerConfig := controller.ApplicationControllerConfig{
Namespace: namespace,
InstanceID: "",
}
db := db.NewDB(namespace, kubeClient)
resyncDuration := time.Duration(appResyncPeriod) * time.Second
repoClientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
appStateManager := controller.NewAppStateManager(db, appClient, repoClientset, namespace)
appController := controller.NewApplicationController(
namespace,
kubeClient,
appClient,
repoClientset,
db,
appStateManager,
resyncDuration,
&controllerConfig)
secretController := controller.NewSecretController(kubeClient, repoClientset, resyncDuration, namespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
log.Infof("Application Controller (version: %s) starting (namespace: %s)", argocd.GetVersion(), namespace)
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
go secretController.Run(ctx)
go appController.Run(ctx, statusProcessors, operationProcessors)
// Wait forever
select {}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", defaultAppResyncPeriod, "Time period in seconds for application resync.")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().IntVar(&statusProcessors, "status-processors", 1, "Number of application status processors")
command.Flags().IntVar(&operationProcessors, "operation-processors", 1, "Number of application operation processors")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
return &command
}
func main() {
if err := newCommand().Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@@ -1,58 +0,0 @@
package commands
import (
"time"
"github.com/argoproj/pkg/stats"
"github.com/spf13/cobra"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/cmpserver"
"github.com/argoproj/argo-cd/v2/cmpserver/plugin"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-cmp-server"
)
func NewCommand() *cobra.Command {
var (
configFilePath string
)
var command = cobra.Command{
Use: cliName,
Short: "Run ArgoCD ConfigManagementPlugin Server",
Long: "ArgoCD ConfigManagementPlugin Server is an internal service which runs as sidecar container in reposerver deployment. It can be configured by following options.",
DisableAutoGenTag: true,
RunE: func(c *cobra.Command, args []string) error {
cli.SetLogFormat(cmdutil.LogFormat)
cli.SetLogLevel(cmdutil.LogLevel)
config, err := plugin.ReadPluginConfig(configFilePath)
errors.CheckError(err)
server, err := cmpserver.NewServer(plugin.CMPServerInitConstants{
PluginConfig: *config,
})
errors.CheckError(err)
// register dumper
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
// run argocd-cmp-server server
server.Run()
return nil
},
}
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().StringVar(&configFilePath, "config-dir-path", common.DefaultPluginConfigFilePath, "Config management plugin configuration file location, Default is '/home/argocd/cmp-server/config/'")
return &command
}

View File

@@ -1,202 +0,0 @@
package commands
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"syscall"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/dex"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/settings"
)
const (
cliName = "argocd-dex"
)
func NewCommand() *cobra.Command {
var command = &cobra.Command{
Use: cliName,
Short: "argocd-dex tools used by Argo CD",
Long: "argocd-dex has internal utility tools used by Argo CD",
DisableAutoGenTag: true,
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewRunDexCommand())
command.AddCommand(NewGenDexConfigCommand())
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return command
}
func NewRunDexCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = cobra.Command{
Use: "rundex",
Short: "Runs dex generating a config using settings from the Argo CD configmap and secret",
RunE: func(c *cobra.Command, args []string) error {
_, err := exec.LookPath("dex")
errors.CheckError(err)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
prevSettings, err := settingsMgr.GetSettings()
errors.CheckError(err)
updateCh := make(chan *settings.ArgoCDSettings, 1)
settingsMgr.Subscribe(updateCh)
for {
var cmd *exec.Cmd
dexCfgBytes, err := dex.GenerateDexConfigYAML(prevSettings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
} else {
err = ioutil.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0644)
errors.CheckError(err)
log.Debug(redactor(string(dexCfgBytes)))
cmd = exec.Command("dex", "serve", "/tmp/dex.yaml")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Start()
errors.CheckError(err)
}
// loop until the dex config changes
for {
newSettings := <-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(newSettings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(dexCfgBytes) {
prevSettings = newSettings
log.Infof("dex config modified. restarting dex")
if cmd != nil && cmd.Process != nil {
err = cmd.Process.Signal(syscall.SIGTERM)
errors.CheckError(err)
_, err = cmd.Process.Wait()
errors.CheckError(err)
}
break
} else {
log.Infof("dex config unmodified")
}
}
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
return &command
}
func NewGenDexConfigCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = cobra.Command{
Use: "gendexcfg",
Short: "Generates a dex config from Argo CD settings",
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
return nil
}
if out == "" {
dexCfg := make(map[string]interface{})
err := yaml.Unmarshal(dexCfgBytes, &dexCfg)
errors.CheckError(err)
if staticClientsInterface, ok := dexCfg["staticClients"]; ok {
if staticClients, ok := staticClientsInterface.([]interface{}); ok {
for i := range staticClients {
staticClient := staticClients[i]
if mappings, ok := staticClient.(map[string]interface{}); ok {
for key := range mappings {
if key == "secret" {
mappings[key] = "******"
}
}
staticClients[i] = mappings
}
}
dexCfg["staticClients"] = staticClients
}
}
errors.CheckError(err)
maskedDexCfgBytes, err := yaml.Marshal(dexCfg)
errors.CheckError(err)
fmt.Print(string(maskedDexCfgBytes))
} else {
err = ioutil.WriteFile(out, dexCfgBytes, 0644)
errors.CheckError(err)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVarP(&out, "out", "o", "", "Output to the specified file instead of stdout")
return &command
}
func iterateStringFields(obj interface{}, callback func(name string, val string) string) {
if mapField, ok := obj.(map[string]interface{}); ok {
for field, val := range mapField {
if strVal, ok := val.(string); ok {
mapField[field] = callback(field, strVal)
} else {
iterateStringFields(val, callback)
}
}
} else if arrayField, ok := obj.([]interface{}); ok {
for i := range arrayField {
iterateStringFields(arrayField[i], callback)
}
}
}
func redactor(dirtyString string) string {
config := make(map[string]interface{})
err := yaml.Unmarshal([]byte(dirtyString), &config)
errors.CheckError(err)
iterateStringFields(config, func(name string, val string) string {
if name == "clientSecret" || name == "secret" || name == "bindPW" {
return "********"
} else {
return val
}
})
data, err := yaml.Marshal(config)
errors.CheckError(err)
return string(data)
}

View File

@@ -1,176 +0,0 @@
package commands
import (
"fmt"
"math"
"net"
"net/http"
"os"
"time"
"github.com/argoproj/pkg/stats"
"github.com/go-redis/redis/v8"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/health/grpc_health_v1"
"k8s.io/apimachinery/pkg/api/resource"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/reposerver"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
"github.com/argoproj/argo-cd/v2/reposerver/metrics"
"github.com/argoproj/argo-cd/v2/reposerver/repository"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/env"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/gpg"
"github.com/argoproj/argo-cd/v2/util/healthz"
ioutil "github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/tls"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-repo-server"
gnuPGSourcePath = "/app/config/gpg/source"
defaultPauseGenerationAfterFailedGenerationAttempts = 3
defaultPauseGenerationOnFailureForMinutes = 60
defaultPauseGenerationOnFailureForRequests = 0
)
func getGnuPGSourcePath() string {
if path := os.Getenv("ARGOCD_GPG_DATA_PATH"); path != "" {
return path
} else {
return gnuPGSourcePath
}
}
func getPauseGenerationAfterFailedGenerationAttempts() int {
return env.ParseNumFromEnv(common.EnvPauseGenerationAfterFailedAttempts, defaultPauseGenerationAfterFailedGenerationAttempts, 0, math.MaxInt32)
}
func getPauseGenerationOnFailureForMinutes() int {
return env.ParseNumFromEnv(common.EnvPauseGenerationMinutes, defaultPauseGenerationOnFailureForMinutes, 0, math.MaxInt32)
}
func getPauseGenerationOnFailureForRequests() int {
return env.ParseNumFromEnv(common.EnvPauseGenerationRequests, defaultPauseGenerationOnFailureForRequests, 0, math.MaxInt32)
}
func NewCommand() *cobra.Command {
var (
parallelismLimit int64
listenPort int
metricsPort int
cacheSrc func() (*reposervercache.Cache, error)
tlsConfigCustomizer tls.ConfigCustomizer
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
redisClient *redis.Client
disableTLS bool
maxCombinedDirectoryManifestsSize string
)
var command = cobra.Command{
Use: cliName,
Short: "Run ArgoCD Repository Server",
Long: "ArgoCD Repository Server is an internal service which maintains a local cache of the Git repository holding the application manifests, and is responsible for generating and returning the Kubernetes manifests. This command runs Repository Server in the foreground. It can be configured by following options.",
DisableAutoGenTag: true,
RunE: func(c *cobra.Command, args []string) error {
cli.SetLogFormat(cmdutil.LogFormat)
cli.SetLogLevel(cmdutil.LogLevel)
if !disableTLS {
var err error
tlsConfigCustomizer, err = tlsConfigCustomizerSrc()
errors.CheckError(err)
}
cache, err := cacheSrc()
errors.CheckError(err)
maxCombinedDirectoryManifestsQuantity, err := resource.ParseQuantity(maxCombinedDirectoryManifestsSize)
errors.CheckError(err)
metricsServer := metrics.NewMetricsServer()
cacheutil.CollectMetrics(redisClient, metricsServer)
server, err := reposerver.NewServer(metricsServer, cache, tlsConfigCustomizer, repository.RepoServerInitConstants{
ParallelismLimit: parallelismLimit,
PauseGenerationAfterFailedGenerationAttempts: getPauseGenerationAfterFailedGenerationAttempts(),
PauseGenerationOnFailureForMinutes: getPauseGenerationOnFailureForMinutes(),
PauseGenerationOnFailureForRequests: getPauseGenerationOnFailureForRequests(),
MaxCombinedDirectoryManifestsSize: maxCombinedDirectoryManifestsQuantity,
})
errors.CheckError(err)
grpc := server.CreateGRPC()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", listenPort))
errors.CheckError(err)
healthz.ServeHealthCheck(http.DefaultServeMux, func(r *http.Request) error {
if val, ok := r.URL.Query()["full"]; ok && len(val) > 0 && val[0] == "true" {
// connect to itself to make sure repo server is able to serve connection
// used by liveness probe to auto restart repo server
// see https://github.com/argoproj/argo-cd/issues/5110 for more information
conn, err := apiclient.NewConnection(fmt.Sprintf("localhost:%d", listenPort), 60, &apiclient.TLSConfiguration{DisableTLS: disableTLS})
if err != nil {
return err
}
defer ioutil.Close(conn)
client := grpc_health_v1.NewHealthClient(conn)
res, err := client.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{})
if err != nil {
return err
}
if res.Status != grpc_health_v1.HealthCheckResponse_SERVING {
return fmt.Errorf("grpc health check status is '%v'", res.Status)
}
return nil
}
return nil
})
http.Handle("/metrics", metricsServer.GetHandler())
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf(":%d", metricsPort), nil)) }()
if gpg.IsGPGEnabled() {
log.Infof("Initializing GnuPG keyring at %s", common.GetGnuPGHomePath())
err = gpg.InitializeGnuPG()
errors.CheckError(err)
log.Infof("Populating GnuPG keyring with keys from %s", getGnuPGSourcePath())
added, removed, err := gpg.SyncKeyRingFromDirectory(getGnuPGSourcePath())
errors.CheckError(err)
log.Infof("Loaded %d (and removed %d) keys from keyring", len(added), len(removed))
go func() { errors.CheckError(reposerver.StartGPGWatcher(getGnuPGSourcePath())) }()
}
log.Infof("argocd-repo-server %s serving on %s", common.GetVersion(), listener.Addr())
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
err = grpc.Serve(listener)
errors.CheckError(err)
return nil
},
}
if cmdutil.LogFormat == "" {
cmdutil.LogFormat = os.Getenv("ARGOCD_REPO_SERVER_LOGLEVEL")
}
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_REPO_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_REPO_SERVER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
command.Flags().Int64Var(&parallelismLimit, "parallelismlimit", int64(env.ParseNumFromEnv("ARGOCD_REPO_SERVER_PARALLELISM_LIMIT", 0, 0, math.MaxInt32)), "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.")
command.Flags().IntVar(&listenPort, "port", common.DefaultPortRepoServer, "Listen on given port for incoming connections")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port")
command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_TLS", false), "Disable TLS on the gRPC endpoint")
command.Flags().StringVar(&maxCombinedDirectoryManifestsSize, "max-combined-directory-manifests-size", env.StringFromEnv("ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE", "10M"), "Max combined size of manifest files in a directory-type Application")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
redisClient = client
})
return &command
}

View File

@@ -0,0 +1,78 @@
package main
import (
"fmt"
"net"
"os"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/ksonnet"
"github.com/argoproj/argo-cd/util/stats"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-repo-server"
port = 8081
)
func newCommand() *cobra.Command {
var (
logLevel string
)
var command = cobra.Command{
Use: cliName,
Short: "Run argocd-repo-server",
RunE: func(c *cobra.Command, args []string) error {
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
server := reposerver.NewServer(git.NewFactory(), newCache())
grpc := server.CreateGRPC()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
errors.CheckError(err)
ksVers, err := ksonnet.KsonnetVersion()
errors.CheckError(err)
log.Infof("argocd-repo-server %s serving on %s", argocd.GetVersion(), listener.Addr())
log.Infof("ksonnet version: %s", ksVers)
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
err = grpc.Serve(listener)
errors.CheckError(err)
return nil
},
}
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return &command
}
func newCache() cache.Cache {
return cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
// client := redis.NewClient(&redis.Options{
// Addr: "localhost:6379",
// Password: "",
// DB: 0,
// })
// return cache.NewRedisCache(client, repository.DefaultRepoCacheExpiration)
}
func main() {
if err := newCommand().Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@@ -1,184 +0,0 @@
package commands
import (
"context"
"fmt"
"math"
"time"
"github.com/argoproj/pkg/stats"
"github.com/go-redis/redis/v8"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
"github.com/argoproj/argo-cd/v2/server"
servercache "github.com/argoproj/argo-cd/v2/server/cache"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/env"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/kube"
"github.com/argoproj/argo-cd/v2/util/tls"
)
const (
failureRetryCountEnv = "ARGOCD_K8S_RETRY_COUNT"
failureRetryPeriodMilliSecondsEnv = "ARGOCD_K8S_RETRY_DURATION_MILLISECONDS"
)
var (
failureRetryCount = 0
failureRetryPeriodMilliSeconds = 100
)
func init() {
failureRetryCount = env.ParseNumFromEnv(failureRetryCountEnv, failureRetryCount, 0, 10)
failureRetryPeriodMilliSeconds = env.ParseNumFromEnv(failureRetryPeriodMilliSecondsEnv, failureRetryPeriodMilliSeconds, 0, 1000)
}
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
redisClient *redis.Client
insecure bool
listenPort int
metricsPort int
glogLevel int
clientConfig clientcmd.ClientConfig
repoServerTimeoutSeconds int
baseHRef string
rootPath string
repoServerAddress string
dexServerAddress string
disableAuth bool
enableGZip bool
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
cacheSrc func() (*servercache.Cache, error)
frameOptions string
repoServerPlaintext bool
repoServerStrictTLS bool
staticAssetsDir string
)
var command = &cobra.Command{
Use: cliName,
Short: "Run the ArgoCD API server",
Long: "The API server is a gRPC/REST server which exposes the API consumed by the Web UI, CLI, and CI/CD systems. This command runs API server in the foreground. It can be configured by following options.",
DisableAutoGenTag: true,
Run: func(c *cobra.Command, args []string) {
cli.SetLogFormat(cmdutil.LogFormat)
cli.SetLogLevel(cmdutil.LogLevel)
cli.SetGLogLevel(glogLevel)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
errors.CheckError(err)
cache, err := cacheSrc()
errors.CheckError(err)
kubeclientset := kubernetes.NewForConfigOrDie(config)
appclientsetConfig, err := clientConfig.ClientConfig()
errors.CheckError(err)
errors.CheckError(v1alpha1.SetK8SConfigDefaults(appclientsetConfig))
if failureRetryCount > 0 {
appclientsetConfig = kube.AddFailureRetryWrapper(appclientsetConfig, failureRetryCount, failureRetryPeriodMilliSeconds)
}
appclientset := appclientset.NewForConfigOrDie(appclientsetConfig)
tlsConfig := apiclient.TLSConfiguration{
DisableTLS: repoServerPlaintext,
StrictValidation: repoServerStrictTLS,
}
// Load CA information to use for validating connections to the
// repository server, if strict TLS validation was requested.
if !repoServerPlaintext && repoServerStrictTLS {
pool, err := tls.LoadX509CertPool(
fmt.Sprintf("%s/server/tls/tls.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)),
fmt.Sprintf("%s/server/tls/ca.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)),
)
if err != nil {
log.Fatalf("%v", err)
}
tlsConfig.Certificates = pool
}
repoclientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds, tlsConfig)
if rootPath != "" {
if baseHRef != "" && baseHRef != rootPath {
log.Warnf("--basehref and --rootpath had conflict: basehref: %s rootpath: %s", baseHRef, rootPath)
}
baseHRef = rootPath
}
argoCDOpts := server.ArgoCDServerOpts{
Insecure: insecure,
ListenPort: listenPort,
MetricsPort: metricsPort,
Namespace: namespace,
BaseHRef: baseHRef,
RootPath: rootPath,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DexServerAddr: dexServerAddress,
DisableAuth: disableAuth,
EnableGZip: enableGZip,
TLSConfigCustomizer: tlsConfigCustomizer,
Cache: cache,
XFrameOptions: frameOptions,
RedisClient: redisClient,
StaticAssetsDir: staticAssetsDir,
}
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
for {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
argocd := server.NewServer(ctx, argoCDOpts)
argocd.Run(ctx, listenPort, metricsPort)
cancel()
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().BoolVar(&insecure, "insecure", env.ParseBoolFromEnv("ARGOCD_SERVER_INSECURE", false), "Run server without TLS")
command.Flags().StringVar(&staticAssetsDir, "staticassets", env.StringFromEnv("ARGOCD_SERVER_STATIC_ASSETS", "/shared/app"), "Directory path that contains additional static assets")
command.Flags().StringVar(&baseHRef, "basehref", env.StringFromEnv("ARGOCD_SERVER_BASEHREF", "/"), "Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /")
command.Flags().StringVar(&rootPath, "rootpath", env.StringFromEnv("ARGOCD_SERVER_ROOTPATH", ""), "Used if Argo CD is running behind reverse proxy under subpath different from /")
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_REPO_SERVER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", false), "Enable GZIP compression")
command.AddCommand(cli.NewVersionCmd(cliName))
command.Flags().IntVar(&listenPort, "port", common.DefaultPortAPIServer, "Listen on given port")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDAPIServerMetrics, "Start metrics on given port")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().StringVar(&frameOptions, "x-frame-options", env.StringFromEnv("ARGOCD_SERVER_X_FRAME_OPTIONS", "sameorigin"), "Set X-Frame-Options header in HTTP responses to `value`. To disable, set to \"\".")
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_SERVER_REPO_SERVER_PLAINTEXT", false), "Use a plaintext client (non-TLS) to connect to repository server")
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_SERVER_REPO_SERVER_STRICT_TLS", false), "Perform strict validation of TLS certificates when connecting to repo server")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
cacheSrc = servercache.AddCacheFlagsToCmd(command, func(client *redis.Client) {
redisClient = client
})
return command
}

View File

@@ -0,0 +1,90 @@
package commands
import (
"context"
"flag"
"strconv"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/stats"
)
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
insecure bool
logLevel string
glogLevel int
clientConfig clientcmd.ClientConfig
staticAssetsDir string
repoServerAddress string
disableAuth bool
)
var command = &cobra.Command{
Use: cliName,
Short: "Run the argocd API server",
Long: "Run the argocd API server",
Run: func(c *cobra.Command, args []string) {
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
// Set the glog level for the k8s go-client
_ = flag.CommandLine.Parse([]string{})
_ = flag.Lookup("logtostderr").Value.Set("true")
_ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel))
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeclientset := kubernetes.NewForConfigOrDie(config)
appclientset := appclientset.NewForConfigOrDie(config)
repoclientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
argoCDOpts := server.ArgoCDServerOpts{
Insecure: insecure,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DisableAuth: disableAuth,
}
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
for {
argocd := server.NewServer(argoCDOpts)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
argocd.Run(ctx, 8080)
cancel()
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().BoolVar(&insecure, "insecure", false, "Run server without TLS")
command.Flags().StringVar(&staticAssetsDir, "staticassets", "", "Static assets directory path")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().BoolVar(&disableAuth, "disable-auth", false, "Disable client authentication")
command.AddCommand(cli.NewVersionCmd(cliName))
return command
}

16
cmd/argocd-server/main.go Normal file
View File

@@ -0,0 +1,16 @@
package main
import (
commands "github.com/argoproj/argo-cd/cmd/argocd-server/commands"
"github.com/argoproj/argo-cd/errors"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
func main() {
err := commands.NewCommand().Execute()
errors.CheckError(err)
}

384
cmd/argocd-util/main.go Normal file
View File

@@ -0,0 +1,384 @@
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"syscall"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/dex"
"github.com/argoproj/argo-cd/util/settings"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-util"
// YamlSeparator separates sections of a YAML file
yamlSeparator = "\n---\n"
)
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
logLevel string
)
var command = &cobra.Command{
Use: cliName,
Short: "argocd-util has internal tools used by ArgoCD",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(cli.NewVersionCmd(cliName))
command.AddCommand(NewRunDexCommand())
command.AddCommand(NewGenDexConfigCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.AddCommand(NewSettingsCommand())
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return command
}
func NewRunDexCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = cobra.Command{
Use: "rundex",
Short: "Runs dex generating a config using settings from the ArgoCD configmap and secret",
RunE: func(c *cobra.Command, args []string) error {
_, err := exec.LookPath("dex")
errors.CheckError(err)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
ctx := context.Background()
settingsMgr.StartNotifier(ctx, settings)
updateCh := make(chan struct{}, 1)
settingsMgr.Subscribe(updateCh)
for {
var cmd *exec.Cmd
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
} else {
err = ioutil.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0644)
errors.CheckError(err)
log.Info(string(dexCfgBytes))
cmd = exec.Command("dex", "serve", "/tmp/dex.yaml")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Start()
errors.CheckError(err)
}
// loop until the dex config changes
for {
<-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(dexCfgBytes) {
log.Infof("dex config modified. restarting dex")
if cmd != nil && cmd.Process != nil {
err = cmd.Process.Signal(syscall.SIGTERM)
errors.CheckError(err)
_, err = cmd.Process.Wait()
errors.CheckError(err)
}
break
} else {
log.Infof("dex config unmodified")
}
}
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
return &command
}
func NewGenDexConfigCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = cobra.Command{
Use: "gendexcfg",
Short: "Generates a dex config from ArgoCD settings",
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
return nil
}
if out == "" {
fmt.Printf(string(dexCfgBytes))
} else {
err = ioutil.WriteFile(out, dexCfgBytes, 0644)
errors.CheckError(err)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVarP(&out, "out", "o", "", "Output to the specified file instead of stdout")
return &command
}
// NewImportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewImportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = cobra.Command{
Use: "import SOURCE",
Short: "Import Argo CD data from stdin (specify `-') or a file",
RunE: func(c *cobra.Command, args []string) error {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
var (
input []byte
err error
newSettings *settings.ArgoCDSettings
newRepos []*v1alpha1.Repository
newClusters []*v1alpha1.Cluster
newApps []*v1alpha1.Application
newRBACCM *apiv1.ConfigMap
)
if in := args[0]; in == "-" {
input, err = ioutil.ReadAll(os.Stdin)
errors.CheckError(err)
} else {
input, err = ioutil.ReadFile(in)
errors.CheckError(err)
}
inputStrings := strings.Split(string(input), yamlSeparator)
err = yaml.Unmarshal([]byte(inputStrings[0]), &newSettings)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[1]), &newRepos)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[2]), &newClusters)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[3]), &newApps)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[4]), &newRBACCM)
errors.CheckError(err)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
err = settingsMgr.SaveSettings(newSettings)
errors.CheckError(err)
db := db.NewDB(namespace, kubeClientset)
_, err = kubeClientset.CoreV1().ConfigMaps(namespace).Create(newRBACCM)
errors.CheckError(err)
for _, repo := range newRepos {
_, err := db.CreateRepository(context.Background(), repo)
if err != nil {
log.Warn(err)
}
}
for _, cluster := range newClusters {
_, err := db.CreateCluster(context.Background(), cluster)
if err != nil {
log.Warn(err)
}
}
appClientset := appclientset.NewForConfigOrDie(config)
for _, app := range newApps {
out, err := appClientset.ArgoprojV1alpha1().Applications(namespace).Create(app)
errors.CheckError(err)
log.Println(out)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
return &command
}
// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewExportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = cobra.Command{
Use: "export",
Short: "Export all Argo CD data to stdout (default) or a file",
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
// certificate data is included in secrets that are exported alongside
settings.Certificate = nil
db := db.NewDB(namespace, kubeClientset)
clusters, err := db.ListClusters(context.Background())
errors.CheckError(err)
repos, err := db.ListRepositories(context.Background())
errors.CheckError(err)
appClientset := appclientset.NewForConfigOrDie(config)
apps, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(metav1.ListOptions{})
errors.CheckError(err)
rbacCM, err := kubeClientset.CoreV1().ConfigMaps(namespace).Get(common.ArgoCDRBACConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
// remove extraneous cruft from output
rbacCM.ObjectMeta = metav1.ObjectMeta{
Name: rbacCM.ObjectMeta.Name,
}
// remove extraneous cruft from output
for idx, app := range apps.Items {
apps.Items[idx].ObjectMeta = metav1.ObjectMeta{
Name: app.ObjectMeta.Name,
Finalizers: app.ObjectMeta.Finalizers,
}
apps.Items[idx].Status = v1alpha1.ApplicationStatus{
History: app.Status.History,
}
apps.Items[idx].Operation = nil
}
// take a list of exportable objects, marshal them to YAML,
// and return a string joined by a delimiter
output := func(delimiter string, oo ...interface{}) string {
out := make([]string, 0)
for _, o := range oo {
data, err := yaml.Marshal(o)
errors.CheckError(err)
out = append(out, string(data))
}
return strings.Join(out, delimiter)
}(yamlSeparator, settings, repos.Items, clusters.Items, apps.Items, rbacCM)
if out == "-" {
fmt.Println(output)
} else {
err = ioutil.WriteFile(out, []byte(output), 0644)
errors.CheckError(err)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout")
return &command
}
// NewSettingsCommand returns a new instance of `argocd-util settings` command
func NewSettingsCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
updateSuperuser bool
superuserPassword string
updateSignature bool
)
var command = &cobra.Command{
Use: "settings",
Short: "Creates or updates ArgoCD settings",
Long: "Creates or updates ArgoCD settings",
Run: func(c *cobra.Command, args []string) {
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if !(wasSpecified) {
namespace = "argocd"
}
kubeclientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
settingsMgr := settings.NewSettingsManager(kubeclientset, namespace)
_ = settings.UpdateSettings(superuserPassword, settingsMgr, updateSignature, updateSuperuser, namespace)
},
}
command.Flags().BoolVar(&updateSuperuser, "update-superuser", false, "force updating the superuser password")
command.Flags().StringVar(&superuserPassword, "superuser-password", "", "password for super user")
command.Flags().BoolVar(&updateSignature, "update-signature", false, "force updating the server-side token signing signature")
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}
func main() {
if err := NewCommand().Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@@ -2,29 +2,17 @@ package commands
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
"syscall"
timeutil "github.com/argoproj/pkg/time"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/account"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/settings"
"github.com/spf13/cobra"
"golang.org/x/term"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
accountpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/account"
"github.com/argoproj/argo-cd/v2/pkg/apiclient/session"
"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/localconfig"
sessionutil "github.com/argoproj/argo-cd/v2/util/session"
"golang.org/x/crypto/ssh/terminal"
)
func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
@@ -37,380 +25,48 @@ func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
},
}
command.AddCommand(NewAccountUpdatePasswordCommand(clientOpts))
command.AddCommand(NewAccountGetUserInfoCommand(clientOpts))
command.AddCommand(NewAccountCanICommand(clientOpts))
command.AddCommand(NewAccountListCommand(clientOpts))
command.AddCommand(NewAccountGenerateTokenCommand(clientOpts))
command.AddCommand(NewAccountGetCommand(clientOpts))
command.AddCommand(NewAccountDeleteTokenCommand(clientOpts))
return command
}
func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
account string
currentPassword string
newPassword string
)
var command = &cobra.Command{
Use: "update-password",
Short: "Update an account's password",
Long: `
This command can be used to update the password of the currently logged on
user, or an arbitrary local user account when the currently logged on user
has appropriate RBAC permissions to change other accounts.
`,
Example: `
# Update the current user's password
argocd account update-password
# Update the password for user foobar
argocd account update-password --account foobar
`,
Short: "Update password",
Run: func(c *cobra.Command, args []string) {
if len(args) != 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
acdClient := argocdclient.NewClientOrDie(clientOpts)
conn, usrIf := acdClient.NewAccountClientOrDie()
defer io.Close(conn)
userInfo := getCurrentAccount(acdClient)
if userInfo.Iss == sessionutil.SessionManagerClaimsIssuer && currentPassword == "" {
fmt.Printf("*** Enter password of currently logged in user (%s): ", userInfo.Username)
password, err := term.ReadPassword(int(os.Stdin.Fd()))
if currentPassword == "" {
fmt.Print("*** Enter current password: ")
password, err := terminal.ReadPassword(syscall.Stdin)
errors.CheckError(err)
currentPassword = string(password)
fmt.Print("\n")
}
if account == "" {
account = userInfo.Username
}
if newPassword == "" {
var err error
newPassword, err = cli.ReadAndConfirmPassword(account)
errors.CheckError(err)
newPassword = settings.ReadAndConfirmPassword()
}
updatePasswordRequest := accountpkg.UpdatePasswordRequest{
updatePasswordRequest := account.UpdatePasswordRequest{
NewPassword: newPassword,
CurrentPassword: currentPassword,
Name: account,
}
ctx := context.Background()
_, err := usrIf.UpdatePassword(ctx, &updatePasswordRequest)
conn, usrIf := argocdclient.NewClientOrDie(clientOpts).NewAccountClientOrDie()
defer util.Close(conn)
_, err := usrIf.UpdatePassword(context.Background(), &updatePasswordRequest)
errors.CheckError(err)
fmt.Printf("Password updated\n")
if account == "" || account == userInfo.Username {
// Get a new JWT token after updating the password
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
configCtx, err := localCfg.ResolveContext(clientOpts.Context)
errors.CheckError(err)
claims, err := configCtx.User.Claims()
errors.CheckError(err)
tokenString := passwordLogin(acdClient, localconfig.GetUsername(claims.Subject), newPassword)
localCfg.UpsertUser(localconfig.User{
Name: localCfg.CurrentContext,
AuthToken: tokenString,
})
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
errors.CheckError(err)
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
}
},
}
command.Flags().StringVar(&currentPassword, "current-password", "", "password of the currently logged on user")
command.Flags().StringVar(&currentPassword, "current-password", "", "current password you wish to change")
command.Flags().StringVar(&newPassword, "new-password", "", "new password you want to update to")
command.Flags().StringVar(&account, "account", "", "an account name that should be updated. Defaults to current user account")
return command
}
func NewAccountGetUserInfoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "get-user-info",
Short: "Get user info",
Run: func(c *cobra.Command, args []string) {
if len(args) != 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, client := argocdclient.NewClientOrDie(clientOpts).NewSessionClientOrDie()
defer io.Close(conn)
ctx := context.Background()
response, err := client.GetUserInfo(ctx, &session.GetUserInfoRequest{})
errors.CheckError(err)
switch output {
case "yaml":
yamlBytes, err := yaml.Marshal(response)
errors.CheckError(err)
fmt.Println(string(yamlBytes))
case "json":
jsonBytes, err := json.MarshalIndent(response, "", " ")
errors.CheckError(err)
fmt.Println(string(jsonBytes))
case "":
fmt.Printf("Logged In: %v\n", response.LoggedIn)
if response.LoggedIn {
fmt.Printf("Username: %s\n", response.Username)
fmt.Printf("Issuer: %s\n", response.Iss)
fmt.Printf("Groups: %v\n", strings.Join(response.Groups, ","))
}
default:
log.Fatalf("Unknown output format: %s", output)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "", "Output format. One of: yaml, json")
return command
}
func NewAccountCanICommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
return &cobra.Command{
Use: "can-i ACTION RESOURCE SUBRESOURCE",
Short: "Can I",
Example: fmt.Sprintf(`
# Can I sync any app?
argocd account can-i sync applications '*'
# Can I update a project?
argocd account can-i update projects 'default'
# Can I create a cluster?
argocd account can-i create clusters '*'
Actions: %v
Resources: %v
`, rbacpolicy.Actions, rbacpolicy.Resources),
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, client := argocdclient.NewClientOrDie(clientOpts).NewAccountClientOrDie()
defer io.Close(conn)
ctx := context.Background()
response, err := client.CanI(ctx, &accountpkg.CanIRequest{
Action: args[0],
Resource: args[1],
Subresource: args[2],
})
errors.CheckError(err)
fmt.Println(response.Value)
},
}
}
func printAccountNames(accounts []*accountpkg.Account) {
for _, p := range accounts {
fmt.Println(p.Name)
}
}
func printAccountsTable(items []*accountpkg.Account) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tENABLED\tCAPABILITIES\n")
for _, a := range items {
fmt.Fprintf(w, "%s\t%v\t%s\n", a.Name, a.Enabled, strings.Join(a.Capabilities, ", "))
}
_ = w.Flush()
}
func NewAccountListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
cmd := &cobra.Command{
Use: "list",
Short: "List accounts",
Example: "argocd account list",
Run: func(c *cobra.Command, args []string) {
conn, client := argocdclient.NewClientOrDie(clientOpts).NewAccountClientOrDie()
defer io.Close(conn)
ctx := context.Background()
response, err := client.ListAccounts(ctx, &accountpkg.ListAccountRequest{})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(response.Items, output, false)
errors.CheckError(err)
case "name":
printAccountNames(response.Items)
case "wide", "":
printAccountsTable(response.Items)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
cmd.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|name")
return cmd
}
func getCurrentAccount(clientset argocdclient.Client) session.GetUserInfoResponse {
conn, client := clientset.NewSessionClientOrDie()
defer io.Close(conn)
userInfo, err := client.GetUserInfo(context.Background(), &session.GetUserInfoRequest{})
errors.CheckError(err)
return *userInfo
}
func NewAccountGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
account string
)
cmd := &cobra.Command{
Use: "get",
Short: "Get account details",
Example: `# Get the currently logged in account details
argocd account get
# Get details for an account by name
argocd account get --account <account-name>`,
Run: func(c *cobra.Command, args []string) {
clientset := argocdclient.NewClientOrDie(clientOpts)
if account == "" {
account = getCurrentAccount(clientset).Username
}
conn, client := clientset.NewAccountClientOrDie()
defer io.Close(conn)
acc, err := client.GetAccount(context.Background(), &accountpkg.GetAccountRequest{Name: account})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(acc, output, true)
errors.CheckError(err)
case "name":
fmt.Println(acc.Name)
case "wide", "":
printAccountDetails(acc)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
cmd.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|name")
cmd.Flags().StringVarP(&account, "account", "a", "", "Account name. Defaults to the current account.")
return cmd
}
func printAccountDetails(acc *accountpkg.Account) {
fmt.Printf(printOpFmtStr, "Name:", acc.Name)
fmt.Printf(printOpFmtStr, "Enabled:", strconv.FormatBool(acc.Enabled))
fmt.Printf(printOpFmtStr, "Capabilities:", strings.Join(acc.Capabilities, ", "))
fmt.Println("\nTokens:")
if len(acc.Tokens) == 0 {
fmt.Println("NONE")
} else {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ID\tISSUED AT\tEXPIRING AT\n")
for _, t := range acc.Tokens {
expiresAtFormatted := "never"
if t.ExpiresAt > 0 {
expiresAt := time.Unix(t.ExpiresAt, 0)
expiresAtFormatted = expiresAt.Format(time.RFC3339)
if expiresAt.Before(time.Now()) {
expiresAtFormatted = fmt.Sprintf("%s (expired)", expiresAtFormatted)
}
}
fmt.Fprintf(w, "%s\t%s\t%s\n", t.Id, time.Unix(t.IssuedAt, 0).Format(time.RFC3339), expiresAtFormatted)
}
_ = w.Flush()
}
}
func NewAccountGenerateTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
account string
expiresIn string
id string
)
cmd := &cobra.Command{
Use: "generate-token",
Short: "Generate account token",
Example: `# Generate token for the currently logged in account
argocd account generate-token
# Generate token for the account with the specified name
argocd account generate-token --account <account-name>`,
Run: func(c *cobra.Command, args []string) {
clientset := argocdclient.NewClientOrDie(clientOpts)
conn, client := clientset.NewAccountClientOrDie()
defer io.Close(conn)
if account == "" {
account = getCurrentAccount(clientset).Username
}
expiresIn, err := timeutil.ParseDuration(expiresIn)
errors.CheckError(err)
response, err := client.CreateToken(context.Background(), &accountpkg.CreateTokenRequest{
Name: account,
ExpiresIn: int64(expiresIn.Seconds()),
Id: id,
})
errors.CheckError(err)
fmt.Println(response.Token)
},
}
cmd.Flags().StringVarP(&account, "account", "a", "", "Account name. Defaults to the current account.")
cmd.Flags().StringVarP(&expiresIn, "expires-in", "e", "0s", "Duration before the token will expire. (Default: No expiration)")
cmd.Flags().StringVar(&id, "id", "", "Optional token id. Fall back to uuid if not value specified.")
return cmd
}
func NewAccountDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
account string
)
cmd := &cobra.Command{
Use: "delete-token",
Short: "Deletes account token",
Example: `# Delete token of the currently logged in account
argocd account delete-token ID
# Delete token of the account with the specified name
argocd account delete-token --account <account-name> ID`,
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
id := args[0]
clientset := argocdclient.NewClientOrDie(clientOpts)
conn, client := clientset.NewAccountClientOrDie()
defer io.Close(conn)
if account == "" {
account = getCurrentAccount(clientset).Username
}
_, err := client.DeleteToken(context.Background(), &accountpkg.DeleteTokenRequest{Name: account, Id: id})
errors.CheckError(err)
},
}
cmd.Flags().StringVarP(&account, "account", "a", "", "Account name. Defaults to the current account.")
return cmd
}

View File

@@ -1,243 +0,0 @@
package admin
import (
"reflect"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/settings"
)
const (
// YamlSeparator separates sections of a YAML file
yamlSeparator = "---\n"
)
var (
configMapResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}
secretResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
applicationsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applications"}
appprojectsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "appprojects"}
appplicationSetResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applicationsets"}
)
// NewAdminCommand returns a new instance of an argocd command
func NewAdminCommand() *cobra.Command {
var (
pathOpts = clientcmd.NewDefaultPathOptions()
)
var command = &cobra.Command{
Use: "admin",
Short: "Contains a set of commands useful for Argo CD administrators and requires direct Kubernetes access",
DisableAutoGenTag: true,
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewClusterCommand(pathOpts))
command.AddCommand(NewProjectsCommand())
command.AddCommand(NewSettingsCommand())
command.AddCommand(NewAppCommand())
command.AddCommand(NewRepoCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.AddCommand(NewDashboardCommand())
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return command
}
type argoCDClientsets struct {
configMaps dynamic.ResourceInterface
secrets dynamic.ResourceInterface
applications dynamic.ResourceInterface
projects dynamic.ResourceInterface
applicationSets dynamic.ResourceInterface
}
func newArgoCDClientsets(config *rest.Config, namespace string) *argoCDClientsets {
dynamicIf, err := dynamic.NewForConfig(config)
errors.CheckError(err)
return &argoCDClientsets{
configMaps: dynamicIf.Resource(configMapResource).Namespace(namespace),
secrets: dynamicIf.Resource(secretResource).Namespace(namespace),
applications: dynamicIf.Resource(applicationsResource).Namespace(namespace),
projects: dynamicIf.Resource(appprojectsResource).Namespace(namespace),
applicationSets: dynamicIf.Resource(appplicationSetResource).Namespace(namespace),
}
}
// getReferencedSecrets examines the argocd-cm config for any referenced repo secrets and returns a
// map of all referenced secrets.
func getReferencedSecrets(un unstructured.Unstructured) map[string]bool {
var cm apiv1.ConfigMap
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &cm)
errors.CheckError(err)
referencedSecrets := make(map[string]bool)
// Referenced repository secrets
if reposRAW, ok := cm.Data["repositories"]; ok {
repos := make([]settings.Repository, 0)
err := yaml.Unmarshal([]byte(reposRAW), &repos)
errors.CheckError(err)
for _, cred := range repos {
if cred.PasswordSecret != nil {
referencedSecrets[cred.PasswordSecret.Name] = true
}
if cred.SSHPrivateKeySecret != nil {
referencedSecrets[cred.SSHPrivateKeySecret.Name] = true
}
if cred.UsernameSecret != nil {
referencedSecrets[cred.UsernameSecret.Name] = true
}
if cred.TLSClientCertDataSecret != nil {
referencedSecrets[cred.TLSClientCertDataSecret.Name] = true
}
if cred.TLSClientCertKeySecret != nil {
referencedSecrets[cred.TLSClientCertKeySecret.Name] = true
}
}
}
// Referenced repository credentials secrets
if reposRAW, ok := cm.Data["repository.credentials"]; ok {
creds := make([]settings.RepositoryCredentials, 0)
err := yaml.Unmarshal([]byte(reposRAW), &creds)
errors.CheckError(err)
for _, cred := range creds {
if cred.PasswordSecret != nil {
referencedSecrets[cred.PasswordSecret.Name] = true
}
if cred.SSHPrivateKeySecret != nil {
referencedSecrets[cred.SSHPrivateKeySecret.Name] = true
}
if cred.UsernameSecret != nil {
referencedSecrets[cred.UsernameSecret.Name] = true
}
if cred.TLSClientCertDataSecret != nil {
referencedSecrets[cred.TLSClientCertDataSecret.Name] = true
}
if cred.TLSClientCertKeySecret != nil {
referencedSecrets[cred.TLSClientCertKeySecret.Name] = true
}
}
}
return referencedSecrets
}
// isArgoCDSecret returns whether or not the given secret is a part of Argo CD configuration
// (e.g. argocd-secret, repo credentials, or cluster credentials)
func isArgoCDSecret(repoSecretRefs map[string]bool, un unstructured.Unstructured) bool {
secretName := un.GetName()
if secretName == common.ArgoCDSecretName {
return true
}
if repoSecretRefs != nil {
if _, ok := repoSecretRefs[secretName]; ok {
return true
}
}
if labels := un.GetLabels(); labels != nil {
if _, ok := labels[common.LabelKeySecretType]; ok {
return true
}
}
if annotations := un.GetAnnotations(); annotations != nil {
if annotations[common.AnnotationKeyManagedBy] == common.AnnotationValueManagedByArgoCD {
return true
}
}
return false
}
// isArgoCDConfigMap returns true if the configmap name is one of argo cd's well known configmaps
func isArgoCDConfigMap(name string) bool {
switch name {
case common.ArgoCDConfigMapName, common.ArgoCDRBACConfigMapName, common.ArgoCDKnownHostsConfigMapName, common.ArgoCDTLSCertsConfigMapName:
return true
}
return false
}
// specsEqual returns if the spec, data, labels, annotations, and finalizers of the two
// supplied objects are equal, indicating that no update is necessary during importing
func specsEqual(left, right unstructured.Unstructured) bool {
if !reflect.DeepEqual(left.GetAnnotations(), right.GetAnnotations()) {
return false
}
if !reflect.DeepEqual(left.GetLabels(), right.GetLabels()) {
return false
}
if !reflect.DeepEqual(left.GetFinalizers(), right.GetFinalizers()) {
return false
}
switch left.GetKind() {
case "Secret", "ConfigMap":
leftData, _, _ := unstructured.NestedMap(left.Object, "data")
rightData, _, _ := unstructured.NestedMap(right.Object, "data")
return reflect.DeepEqual(leftData, rightData)
case "AppProject":
leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec")
rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec")
return reflect.DeepEqual(leftSpec, rightSpec)
case "Application":
leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec")
rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec")
leftStatus, _, _ := unstructured.NestedMap(left.Object, "status")
rightStatus, _, _ := unstructured.NestedMap(right.Object, "status")
// reconciledAt and observedAt are constantly changing and we ignore any diff there
delete(leftStatus, "reconciledAt")
delete(rightStatus, "reconciledAt")
delete(leftStatus, "observedAt")
delete(rightStatus, "observedAt")
return reflect.DeepEqual(leftSpec, rightSpec) && reflect.DeepEqual(leftStatus, rightStatus)
}
return false
}
func iterateStringFields(obj interface{}, callback func(name string, val string) string) {
if mapField, ok := obj.(map[string]interface{}); ok {
for field, val := range mapField {
if strVal, ok := val.(string); ok {
mapField[field] = callback(field, strVal)
} else {
iterateStringFields(val, callback)
}
}
} else if arrayField, ok := obj.([]interface{}); ok {
for i := range arrayField {
iterateStringFields(arrayField[i], callback)
}
}
}
func redactor(dirtyString string) string {
config := make(map[string]interface{})
err := yaml.Unmarshal([]byte(dirtyString), &config)
errors.CheckError(err)
iterateStringFields(config, func(name string, val string) string {
if name == "clientSecret" || name == "secret" || name == "bindPW" {
return "********"
} else {
return val
}
})
data, err := yaml.Marshal(config)
errors.CheckError(err)
return string(data)
}

View File

@@ -1,417 +0,0 @@
package admin
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"sort"
"time"
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
kubecache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/controller"
"github.com/argoproj/argo-cd/v2/controller/cache"
"github.com/argoproj/argo-cd/v2/controller/metrics"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
appinformers "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/config"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
"github.com/argoproj/argo-cd/v2/util/settings"
)
func NewAppCommand() *cobra.Command {
var command = &cobra.Command{
Use: "app",
Short: "Manage applications configuration",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewGenAppSpecCommand())
command.AddCommand(NewReconcileCommand())
command.AddCommand(NewDiffReconcileResults())
return command
}
// NewGenAppSpecCommand generates declarative configuration file for given application
func NewGenAppSpecCommand() *cobra.Command {
var (
appOpts cmdutil.AppOptions
fileURL string
appName string
labels []string
outputFormat string
annotations []string
inline bool
)
var command = &cobra.Command{
Use: "generate-spec APPNAME",
Short: "Generate declarative config for an application",
Example: `
# Generate declarative config for a directory app
argocd admin app generate-spec guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --directory-recurse
# Generate declarative config for a Jsonnet app
argocd admin app generate-spec jsonnet-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path jsonnet-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --jsonnet-ext-str replicas=2
# Generate declarative config for a Helm app
argocd admin app generate-spec helm-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path helm-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --helm-set replicaCount=2
# Generate declarative config for a Helm app from a Helm repo
argocd admin app generate-spec nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc
# Generate declarative config for a Kustomize app
argocd admin app generate-spec kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1
# Generate declarative config for a app using a custom tool:
argocd admin app generate-spec ksane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane
`,
Run: func(c *cobra.Command, args []string) {
apps, err := cmdutil.ConstructApps(fileURL, appName, labels, annotations, args, appOpts, c.Flags())
errors.CheckError(err)
if len(apps) > 1 {
errors.CheckError(fmt.Errorf("failed to generate spec, more than one application is not supported"))
}
app := apps[0]
if app.Name == "" {
c.HelpFunc()(c, args)
os.Exit(1)
}
out, closer, err := getOutWriter(inline, fileURL)
errors.CheckError(err)
defer io.Close(closer)
errors.CheckError(PrintResources(outputFormat, out, app))
},
}
command.Flags().StringVar(&appName, "name", "", "A name for the app, ignored if a file is set (DEPRECATED)")
command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the app")
command.Flags().StringArrayVarP(&labels, "label", "l", []string{}, "Labels to apply to the app")
command.Flags().StringArrayVarP(&annotations, "annotations", "", []string{}, "Set metadata annotations (e.g. example=value)")
command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml")
command.Flags().BoolVarP(&inline, "inline", "i", false, "If set then generated resource is written back to the file specified in --file flag")
// Only complete files with appropriate extension.
err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"})
errors.CheckError(err)
cmdutil.AddAppFlags(command, &appOpts)
return command
}
type appReconcileResult struct {
Name string `json:"name"`
Health *v1alpha1.HealthStatus `json:"health"`
Sync *v1alpha1.SyncStatus `json:"sync"`
Conditions []v1alpha1.ApplicationCondition `json:"conditions"`
}
type reconcileResults struct {
Applications []appReconcileResult `json:"applications"`
}
func (r *reconcileResults) getAppsMap() map[string]appReconcileResult {
res := map[string]appReconcileResult{}
for i := range r.Applications {
res[r.Applications[i].Name] = r.Applications[i]
}
return res
}
func printLine(format string, a ...interface{}) {
_, _ = fmt.Printf(format+"\n", a...)
}
func NewDiffReconcileResults() *cobra.Command {
var command = &cobra.Command{
Use: "diff-reconcile-results PATH1 PATH2",
Short: "Compare results of two reconciliations and print diff.",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
path1 := args[0]
path2 := args[1]
var res1 reconcileResults
var res2 reconcileResults
errors.CheckError(config.UnmarshalLocalFile(path1, &res1))
errors.CheckError(config.UnmarshalLocalFile(path2, &res2))
errors.CheckError(diffReconcileResults(res1, res2))
},
}
return command
}
func toUnstructured(val interface{}) (*unstructured.Unstructured, error) {
data, err := json.Marshal(val)
if err != nil {
return nil, err
}
res := make(map[string]interface{})
err = json.Unmarshal(data, &res)
if err != nil {
return nil, err
}
return &unstructured.Unstructured{Object: res}, nil
}
type diffPair struct {
name string
first *unstructured.Unstructured
second *unstructured.Unstructured
}
func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error {
var pairs []diffPair
resMap1 := res1.getAppsMap()
resMap2 := res2.getAppsMap()
for k, v := range resMap1 {
firstUn, err := toUnstructured(v)
if err != nil {
return err
}
var secondUn *unstructured.Unstructured
second, ok := resMap2[k]
if ok {
secondUn, err = toUnstructured(second)
if err != nil {
return err
}
delete(resMap2, k)
}
pairs = append(pairs, diffPair{name: k, first: firstUn, second: secondUn})
}
for k, v := range resMap2 {
secondUn, err := toUnstructured(v)
if err != nil {
return err
}
pairs = append(pairs, diffPair{name: k, first: nil, second: secondUn})
}
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].name < pairs[j].name
})
for _, item := range pairs {
printLine(item.name)
_ = cli.PrintDiff(item.name, item.first, item.second)
}
return nil
}
func NewReconcileCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
selector string
repoServerAddress string
outputFormat string
refresh bool
)
var command = &cobra.Command{
Use: "get-reconcile-results PATH",
Short: "Reconcile all applications and stores reconciliation summary in the specified file.",
Run: func(c *cobra.Command, args []string) {
// get rid of logging error handler
runtime.ErrorHandlers = runtime.ErrorHandlers[1:]
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
outputPath := args[0]
errors.CheckError(os.Setenv(v1alpha1.EnvVarFakeInClusterConfig, "true"))
cfg, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
var result []appReconcileResult
if refresh {
if repoServerAddress == "" {
printLine("Repo server is not provided, trying to port-forward to argocd-repo-server pod.")
overrides := clientcmd.ConfigOverrides{}
repoServerPort, err := kubeutil.PortForward(8081, namespace, &overrides, "app.kubernetes.io/name=argocd-repo-server")
errors.CheckError(err)
repoServerAddress = fmt.Sprintf("localhost:%d", repoServerPort)
}
repoServerClient := apiclient.NewRepoServerClientset(repoServerAddress, 60, apiclient.TLSConfiguration{DisableTLS: false, StrictValidation: false})
appClientset := appclientset.NewForConfigOrDie(cfg)
kubeClientset := kubernetes.NewForConfigOrDie(cfg)
result, err = reconcileApplications(kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache)
errors.CheckError(err)
} else {
appClientset := appclientset.NewForConfigOrDie(cfg)
result, err = getReconcileResults(appClientset, namespace, selector)
}
errors.CheckError(saveToFile(err, outputFormat, reconcileResults{Applications: result}, outputPath))
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().StringVar(&repoServerAddress, "repo-server", "", "Repo server address.")
command.Flags().StringVar(&selector, "l", "", "Label selector")
command.Flags().StringVar(&outputFormat, "o", "yaml", "Output format (yaml|json)")
command.Flags().BoolVar(&refresh, "refresh", false, "If set to true then recalculates apps reconciliation")
return command
}
func saveToFile(err error, outputFormat string, result reconcileResults, outputPath string) error {
errors.CheckError(err)
var data []byte
switch outputFormat {
case "yaml":
if data, err = yaml.Marshal(result); err != nil {
return err
}
case "json":
if data, err = json.Marshal(result); err != nil {
return err
}
default:
return fmt.Errorf("format %s is not supported", outputFormat)
}
return ioutil.WriteFile(outputPath, data, 0644)
}
func getReconcileResults(appClientset appclientset.Interface, namespace string, selector string) ([]appReconcileResult, error) {
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(context.Background(), v1.ListOptions{LabelSelector: selector})
if err != nil {
return nil, err
}
var items []appReconcileResult
for _, app := range appsList.Items {
items = append(items, appReconcileResult{
Name: app.Name,
Conditions: app.Status.Conditions,
Health: &app.Status.Health,
Sync: &app.Status.Sync,
})
}
return items, nil
}
func reconcileApplications(
kubeClientset kubernetes.Interface,
appClientset appclientset.Interface,
namespace string,
repoServerClient apiclient.Clientset,
selector string,
createLiveStateCache func(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache,
) ([]appReconcileResult, error) {
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
argoDB := db.NewDB(namespace, settingsMgr, kubeClientset)
appInformerFactory := appinformers.NewSharedInformerFactoryWithOptions(
appClientset,
1*time.Hour,
appinformers.WithNamespace(namespace),
appinformers.WithTweakListOptions(func(options *v1.ListOptions) {}),
)
appInformer := appInformerFactory.Argoproj().V1alpha1().Applications().Informer()
projInformer := appInformerFactory.Argoproj().V1alpha1().AppProjects().Informer()
go appInformer.Run(context.Background().Done())
go projInformer.Run(context.Background().Done())
if !kubecache.WaitForCacheSync(context.Background().Done(), appInformer.HasSynced, projInformer.HasSynced) {
return nil, fmt.Errorf("failed to sync cache")
}
appLister := appInformerFactory.Argoproj().V1alpha1().Applications().Lister()
projLister := appInformerFactory.Argoproj().V1alpha1().AppProjects().Lister()
server, err := metrics.NewMetricsServer("", appLister, func(obj interface{}) bool {
return true
}, func(r *http.Request) error {
return nil
}, []string{})
if err != nil {
return nil, err
}
stateCache := createLiveStateCache(argoDB, appInformer, settingsMgr, server)
if err := stateCache.Init(); err != nil {
return nil, err
}
cache := appstatecache.NewCache(
cacheutil.NewCache(cacheutil.NewInMemoryCache(1*time.Minute)),
1*time.Minute,
)
appStateManager := controller.NewAppStateManager(
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking())
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(context.Background(), v1.ListOptions{LabelSelector: selector})
if err != nil {
return nil, err
}
sort.Slice(appsList.Items, func(i, j int) bool {
return appsList.Items[i].Spec.Destination.Server < appsList.Items[j].Spec.Destination.Server
})
var items []appReconcileResult
prevServer := ""
for _, app := range appsList.Items {
if prevServer != app.Spec.Destination.Server {
if prevServer != "" {
if clusterCache, err := stateCache.GetClusterCache(prevServer); err == nil {
clusterCache.Invalidate()
}
}
printLine("Reconciling apps of %s", app.Spec.Destination.Server)
prevServer = app.Spec.Destination.Server
}
printLine(app.Name)
proj, err := projLister.AppProjects(namespace).Get(app.Spec.Project)
if err != nil {
return nil, err
}
res := appStateManager.CompareAppState(&app, proj, app.Spec.Source.TargetRevision, app.Spec.Source, false, false, nil)
items = append(items, appReconcileResult{
Name: app.Name,
Conditions: app.Status.Conditions,
Health: res.GetHealthStatus(),
Sync: res.GetSyncStatus(),
})
}
return items, nil
}
func newLiveStateCache(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache {
return cache.NewLiveStateCache(argoDB, appInformer, settingsMgr, kubeutil.NewKubectl(), server, func(managedByApp map[string]bool, ref apiv1.ObjectReference) {}, nil, argo.NewResourceTracking())
}

View File

@@ -1,181 +0,0 @@
package admin
import (
"testing"
"github.com/argoproj/argo-cd/v2/test"
clustermocks "github.com/argoproj/gitops-engine/pkg/cache/mocks"
"github.com/argoproj/gitops-engine/pkg/health"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
cachemocks "github.com/argoproj/argo-cd/v2/controller/cache/mocks"
"github.com/argoproj/argo-cd/v2/controller/metrics"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appfake "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/settings"
)
func TestGetReconcileResults(t *testing.T) {
appClientset := appfake.NewSimpleClientset(&v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Status: v1alpha1.ApplicationStatus{
Health: v1alpha1.HealthStatus{Status: health.HealthStatusHealthy},
Sync: v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
},
})
result, err := getReconcileResults(appClientset, "default", "")
if !assert.NoError(t, err) {
return
}
expectedResults := []appReconcileResult{{
Name: "test",
Health: &v1alpha1.HealthStatus{Status: health.HealthStatusHealthy},
Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
}}
assert.ElementsMatch(t, expectedResults, result)
}
func TestGetReconcileResults_Refresh(t *testing.T) {
cm := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-cm",
Namespace: "default",
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
}
proj := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: "default",
},
Spec: v1alpha1.AppProjectSpec{Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "*"}}},
}
app := &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Spec: v1alpha1.ApplicationSpec{
Project: "default",
Destination: v1alpha1.ApplicationDestination{
Server: v1alpha1.KubernetesInternalAPIServerAddr,
Namespace: "default",
},
},
}
appClientset := appfake.NewSimpleClientset(app, proj)
deployment := test.NewDeployment()
kubeClientset := kubefake.NewSimpleClientset(deployment, &cm)
clusterCache := clustermocks.ClusterCache{}
clusterCache.On("IsNamespaced", mock.Anything).Return(true, nil)
repoServerClient := mocks.RepoServerServiceClient{}
repoServerClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(&apiclient.ManifestResponse{
Manifests: []string{test.DeploymentManifest},
}, nil)
repoServerClientset := mocks.Clientset{RepoServerServiceClient: &repoServerClient}
liveStateCache := cachemocks.LiveStateCache{}
liveStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(deployment): deployment,
}, nil)
liveStateCache.On("GetVersionsInfo", mock.Anything).Return("v1.2.3", nil, nil)
liveStateCache.On("Init").Return(nil, nil)
liveStateCache.On("GetClusterCache", mock.Anything).Return(&clusterCache, nil)
liveStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil)
result, err := reconcileApplications(kubeClientset, appClientset, "default", &repoServerClientset, "",
func(argoDB db.ArgoDB, appInformer cache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) statecache.LiveStateCache {
return &liveStateCache
},
)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, result[0].Health.Status, health.HealthStatusMissing)
assert.Equal(t, result[0].Sync.Status, v1alpha1.SyncStatusCodeOutOfSync)
}
func TestDiffReconcileResults_NoDifferences(t *testing.T) {
logs, err := captureStdout(func() {
assert.NoError(t, diffReconcileResults(
reconcileResults{Applications: []appReconcileResult{{
Name: "app1",
Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
}}},
reconcileResults{Applications: []appReconcileResult{{
Name: "app1",
Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
}}},
))
})
assert.NoError(t, err)
assert.Equal(t, "app1\n", logs)
}
func TestDiffReconcileResults_DifferentApps(t *testing.T) {
logs, err := captureStdout(func() {
assert.NoError(t, diffReconcileResults(
reconcileResults{Applications: []appReconcileResult{{
Name: "app1",
Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
}, {
Name: "app2",
Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
}}},
reconcileResults{Applications: []appReconcileResult{{
Name: "app1",
Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
}, {
Name: "app3",
Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync},
}}},
))
})
assert.NoError(t, err)
assert.Equal(t, `app1
app2
1,9d0
< conditions: null
< health: null
< name: app2
< sync:
< comparedTo:
< destination: {}
< source:
< repoURL: ""
< status: OutOfSync
app3
0a1,9
> conditions: null
> health: null
> name: app3
> sync:
> comparedTo:
> destination: {}
> source:
> repoURL: ""
> status: OutOfSync
`, logs)
}

View File

@@ -1,351 +0,0 @@
package admin
import (
"bufio"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apierr "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
)
// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewExportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = cobra.Command{
Use: "export",
Short: "Export all Argo CD data to stdout (default) or a file",
Run: func(c *cobra.Command, args []string) {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
var writer io.Writer
if out == "-" {
writer = os.Stdout
} else {
f, err := os.Create(out)
errors.CheckError(err)
bw := bufio.NewWriter(f)
writer = bw
defer func() {
err = bw.Flush()
errors.CheckError(err)
err = f.Close()
errors.CheckError(err)
}()
}
acdClients := newArgoCDClientsets(config, namespace)
acdConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDConfigMapName, v1.GetOptions{})
errors.CheckError(err)
export(writer, *acdConfigMap)
acdRBACConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDRBACConfigMapName, v1.GetOptions{})
errors.CheckError(err)
export(writer, *acdRBACConfigMap)
acdKnownHostsConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDKnownHostsConfigMapName, v1.GetOptions{})
errors.CheckError(err)
export(writer, *acdKnownHostsConfigMap)
acdTLSCertsConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDTLSCertsConfigMapName, v1.GetOptions{})
errors.CheckError(err)
export(writer, *acdTLSCertsConfigMap)
referencedSecrets := getReferencedSecrets(*acdConfigMap)
secrets, err := acdClients.secrets.List(context.Background(), v1.ListOptions{})
errors.CheckError(err)
for _, secret := range secrets.Items {
if isArgoCDSecret(referencedSecrets, secret) {
export(writer, secret)
}
}
projects, err := acdClients.projects.List(context.Background(), v1.ListOptions{})
errors.CheckError(err)
for _, proj := range projects.Items {
export(writer, proj)
}
applications, err := acdClients.applications.List(context.Background(), v1.ListOptions{})
errors.CheckError(err)
for _, app := range applications.Items {
export(writer, app)
}
applicationSets, err := acdClients.applicationSets.List(context.Background(), v1.ListOptions{})
if err != nil && !apierr.IsNotFound(err) {
if apierr.IsForbidden(err) {
log.Warn(err)
} else {
errors.CheckError(err)
}
}
if applicationSets != nil {
for _, appSet := range applicationSets.Items {
export(writer, appSet)
}
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout")
return &command
}
// NewImportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewImportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
prune bool
dryRun bool
verbose bool
)
var command = cobra.Command{
Use: "import SOURCE",
Short: "Import Argo CD data from stdin (specify `-') or a file",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = 100
config.Burst = 50
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
acdClients := newArgoCDClientsets(config, namespace)
var input []byte
if in := args[0]; in == "-" {
input, err = ioutil.ReadAll(os.Stdin)
} else {
input, err = ioutil.ReadFile(in)
}
errors.CheckError(err)
var dryRunMsg string
if dryRun {
dryRunMsg = " (dry run)"
}
// pruneObjects tracks live objects and it's current resource version. any remaining
// items in this map indicates the resource should be pruned since it no longer appears
// in the backup
pruneObjects := make(map[kube.ResourceKey]unstructured.Unstructured)
configMaps, err := acdClients.configMaps.List(context.Background(), v1.ListOptions{})
errors.CheckError(err)
// referencedSecrets holds any secrets referenced in the argocd-cm configmap. These
// secrets need to be imported too
var referencedSecrets map[string]bool
for _, cm := range configMaps.Items {
if isArgoCDConfigMap(cm.GetName()) {
pruneObjects[kube.ResourceKey{Group: "", Kind: "ConfigMap", Name: cm.GetName()}] = cm
}
if cm.GetName() == common.ArgoCDConfigMapName {
referencedSecrets = getReferencedSecrets(cm)
}
}
secrets, err := acdClients.secrets.List(context.Background(), v1.ListOptions{})
errors.CheckError(err)
for _, secret := range secrets.Items {
if isArgoCDSecret(referencedSecrets, secret) {
pruneObjects[kube.ResourceKey{Group: "", Kind: "Secret", Name: secret.GetName()}] = secret
}
}
applications, err := acdClients.applications.List(context.Background(), v1.ListOptions{})
errors.CheckError(err)
for _, app := range applications.Items {
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "Application", Name: app.GetName()}] = app
}
projects, err := acdClients.projects.List(context.Background(), v1.ListOptions{})
errors.CheckError(err)
for _, proj := range projects.Items {
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "AppProject", Name: proj.GetName()}] = proj
}
applicationSets, err := acdClients.applicationSets.List(context.Background(), v1.ListOptions{})
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
log.Warnf("argoproj.io/ApplicationSet: %v\n", err)
} else {
errors.CheckError(err)
}
if applicationSets != nil {
for _, appSet := range applicationSets.Items {
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "ApplicationSet", Name: appSet.GetName()}] = appSet
}
}
// Create or replace existing object
backupObjects, err := kube.SplitYAML(input)
errors.CheckError(err)
for _, bakObj := range backupObjects {
gvk := bakObj.GroupVersionKind()
key := kube.ResourceKey{Group: gvk.Group, Kind: gvk.Kind, Name: bakObj.GetName()}
liveObj, exists := pruneObjects[key]
delete(pruneObjects, key)
var dynClient dynamic.ResourceInterface
switch bakObj.GetKind() {
case "Secret":
dynClient = acdClients.secrets
case "ConfigMap":
dynClient = acdClients.configMaps
case "AppProject":
dynClient = acdClients.projects
case "Application":
dynClient = acdClients.applications
case "ApplicationSet":
dynClient = acdClients.applicationSets
}
if !exists {
isForbidden := false
if !dryRun {
_, err = dynClient.Create(context.Background(), bakObj, v1.CreateOptions{})
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
isForbidden = true
log.Warnf("%s/%s %s: %v", gvk.Group, gvk.Kind, bakObj.GetName(), err)
} else {
errors.CheckError(err)
}
}
if !isForbidden {
fmt.Printf("%s/%s %s created%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg)
}
} else if specsEqual(*bakObj, liveObj) {
if verbose {
fmt.Printf("%s/%s %s unchanged%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg)
}
} else {
isForbidden := false
if !dryRun {
newLive := updateLive(bakObj, &liveObj)
_, err = dynClient.Update(context.Background(), newLive, v1.UpdateOptions{})
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
isForbidden = true
log.Warnf("%s/%s %s: %v", gvk.Group, gvk.Kind, bakObj.GetName(), err)
} else {
errors.CheckError(err)
}
}
if !isForbidden {
fmt.Printf("%s/%s %s updated%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg)
}
}
}
// Delete objects not in backup
for key, liveObj := range pruneObjects {
if prune {
var dynClient dynamic.ResourceInterface
switch key.Kind {
case "Secret":
dynClient = acdClients.secrets
case "AppProject":
dynClient = acdClients.projects
case "Application":
dynClient = acdClients.applications
if !dryRun {
if finalizers := liveObj.GetFinalizers(); len(finalizers) > 0 {
newLive := liveObj.DeepCopy()
newLive.SetFinalizers(nil)
_, err = dynClient.Update(context.Background(), newLive, v1.UpdateOptions{})
if err != nil && !apierr.IsNotFound(err) {
errors.CheckError(err)
}
}
}
case "ApplicationSet":
dynClient = acdClients.applicationSets
default:
log.Fatalf("Unexpected kind '%s' in prune list", key.Kind)
}
isForbidden := false
if !dryRun {
err = dynClient.Delete(context.Background(), key.Name, v1.DeleteOptions{})
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
isForbidden = true
log.Warnf("%s/%s %s: %v\n", key.Group, key.Kind, key.Name, err)
} else {
errors.CheckError(err)
}
}
if !isForbidden {
fmt.Printf("%s/%s %s pruned%s\n", key.Group, key.Kind, key.Name, dryRunMsg)
}
} else {
fmt.Printf("%s/%s %s needs pruning\n", key.Group, key.Kind, key.Name)
}
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().BoolVar(&dryRun, "dry-run", false, "Print what will be performed")
command.Flags().BoolVar(&prune, "prune", false, "Prune secrets, applications and projects which do not appear in the backup")
command.Flags().BoolVar(&verbose, "verbose", false, "Verbose output (versus only changed output)")
return &command
}
// export writes the unstructured object and removes extraneous cruft from output before writing
func export(w io.Writer, un unstructured.Unstructured) {
name := un.GetName()
finalizers := un.GetFinalizers()
apiVersion := un.GetAPIVersion()
kind := un.GetKind()
labels := un.GetLabels()
annotations := un.GetAnnotations()
unstructured.RemoveNestedField(un.Object, "metadata")
un.SetName(name)
un.SetFinalizers(finalizers)
un.SetAPIVersion(apiVersion)
un.SetKind(kind)
un.SetLabels(labels)
un.SetAnnotations(annotations)
data, err := yaml.Marshal(un.Object)
errors.CheckError(err)
_, err = w.Write(data)
errors.CheckError(err)
_, err = w.Write([]byte(yamlSeparator))
errors.CheckError(err)
}
// updateLive replaces the live object's finalizers, spec, annotations, labels, and data from the
// backup object but leaves all other fields intact (status, other metadata, etc...)
func updateLive(bak, live *unstructured.Unstructured) *unstructured.Unstructured {
newLive := live.DeepCopy()
newLive.SetAnnotations(bak.GetAnnotations())
newLive.SetLabels(bak.GetLabels())
newLive.SetFinalizers(bak.GetFinalizers())
switch live.GetKind() {
case "Secret", "ConfigMap":
newLive.Object["data"] = bak.Object["data"]
case "AppProject":
newLive.Object["spec"] = bak.Object["spec"]
case "Application":
newLive.Object["spec"] = bak.Object["spec"]
if _, ok := bak.Object["status"]; ok {
newLive.Object["status"] = bak.Object["status"]
}
case "ApplicationSet":
newLive.Object["spec"] = bak.Object["spec"]
}
return newLive
}

View File

@@ -1,617 +0,0 @@
package admin
import (
"context"
"fmt"
"math"
"os"
"sort"
"strings"
"text/tabwriter"
"time"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/go-redis/redis/v8"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/controller/sharding"
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/util/argo"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/clusterauth"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/glob"
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
"github.com/argoproj/argo-cd/v2/util/settings"
"github.com/argoproj/argo-cd/v2/util/text/label"
)
func NewClusterCommand(pathOpts *clientcmd.PathOptions) *cobra.Command {
var command = &cobra.Command{
Use: "cluster",
Short: "Manage clusters configuration",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewClusterConfig())
command.AddCommand(NewGenClusterConfigCommand(pathOpts))
command.AddCommand(NewClusterStatsCommand())
command.AddCommand(NewClusterShardsCommand())
namespacesCommand := NewClusterNamespacesCommand()
namespacesCommand.AddCommand(NewClusterEnableNamespacedMode())
namespacesCommand.AddCommand(NewClusterDisableNamespacedMode())
command.AddCommand(namespacesCommand)
return command
}
type ClusterWithInfo struct {
argoappv1.Cluster
// Shard holds controller shard number that handles the cluster
Shard int
// Namespaces holds list of namespaces managed by Argo CD in the cluster
Namespaces []string
}
func loadClusters(kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int) ([]ClusterWithInfo, error) {
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, namespace)
argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
clustersList, err := argoDB.ListClusters(context.Background())
if err != nil {
return nil, err
}
var cache *appstatecache.Cache
if portForwardRedis {
overrides := clientcmd.ConfigOverrides{}
port, err := kubeutil.PortForward(6379, namespace, &overrides,
"app.kubernetes.io/name=argocd-redis-ha-haproxy", "app.kubernetes.io/name=argocd-redis")
if err != nil {
return nil, err
}
client := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", port)})
cache = appstatecache.NewCache(cacheutil.NewCache(cacheutil.NewRedisCache(client, time.Hour)), time.Hour)
} else {
cache, err = cacheSrc()
if err != nil {
return nil, err
}
}
appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(context.Background(), v1.ListOptions{})
if err != nil {
return nil, err
}
apps := appItems.Items
for i, app := range apps {
err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, argoDB)
if err != nil {
return nil, err
}
apps[i] = app
}
clusters := make([]ClusterWithInfo, len(clustersList.Items))
batchSize := 10
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
for batchNum := 0; batchNum < batchesCount; batchNum++ {
batchStart := batchSize * batchNum
batchEnd := batchSize * (batchNum + 1)
if batchEnd > len(clustersList.Items) {
batchEnd = len(clustersList.Items)
}
batch := clustersList.Items[batchStart:batchEnd]
_ = kube.RunAllAsync(len(batch), func(i int) error {
cluster := batch[i]
clusterShard := 0
if replicas > 0 {
clusterShard = sharding.GetShardByID(cluster.ID, replicas)
}
if shard != -1 && clusterShard != shard {
return nil
}
nsSet := map[string]bool{}
for _, app := range apps {
if app.Spec.Destination.Server == cluster.Server {
nsSet[app.Spec.Destination.Namespace] = true
}
}
var namespaces []string
for ns := range nsSet {
namespaces = append(namespaces, ns)
}
_ = cache.GetClusterInfo(cluster.Server, &cluster.Info)
clusters[batchStart+i] = ClusterWithInfo{cluster, clusterShard, namespaces}
return nil
})
}
return clusters, nil
}
func getControllerReplicas(kubeClient *kubernetes.Clientset, namespace string) (int, error) {
controllerPods, err := kubeClient.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{
LabelSelector: "app.kubernetes.io/name=argocd-application-controller"})
if err != nil {
return 0, err
}
return len(controllerPods.Items), nil
}
func NewClusterShardsCommand() *cobra.Command {
var (
shard int
replicas int
clientConfig clientcmd.ClientConfig
cacheSrc func() (*appstatecache.Cache, error)
portForwardRedis bool
)
var command = cobra.Command{
Use: "shards",
Short: "Print information about each controller shard and portion of Kubernetes resources it is responsible for.",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.WarnLevel)
clientCfg, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClient := kubernetes.NewForConfigOrDie(clientCfg)
appClient := versioned.NewForConfigOrDie(clientCfg)
if replicas == 0 {
replicas, err = getControllerReplicas(kubeClient, namespace)
errors.CheckError(err)
}
if replicas == 0 {
return
}
clusters, err := loadClusters(kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard)
errors.CheckError(err)
if len(clusters) == 0 {
return
}
printStatsSummary(clusters)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
return &command
}
func printStatsSummary(clusters []ClusterWithInfo) {
totalResourcesCount := int64(0)
resourcesCountByShard := map[int]int64{}
for _, c := range clusters {
totalResourcesCount += c.Info.CacheInfo.ResourcesCount
resourcesCountByShard[c.Shard] += c.Info.CacheInfo.ResourcesCount
}
avgResourcesByShard := totalResourcesCount / int64(len(resourcesCountByShard))
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "SHARD\tRESOURCES COUNT\n")
for shard := 0; shard < len(resourcesCountByShard); shard++ {
cnt := resourcesCountByShard[shard]
percent := (float64(cnt) / float64(avgResourcesByShard)) * 100.0
_, _ = fmt.Fprintf(w, "%d\t%s\n", shard, fmt.Sprintf("%d (%.0f%%)", cnt, percent))
}
_ = w.Flush()
}
func runClusterNamespacesCommand(clientConfig clientcmd.ClientConfig, action func(appClient *versioned.Clientset, argoDB db.ArgoDB, clusters map[string][]string) error) error {
clientCfg, err := clientConfig.ClientConfig()
if err != nil {
return err
}
namespace, _, err := clientConfig.Namespace()
if err != nil {
return err
}
kubeClient := kubernetes.NewForConfigOrDie(clientCfg)
appClient := versioned.NewForConfigOrDie(clientCfg)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, namespace)
argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
clustersList, err := argoDB.ListClusters(context.Background())
if err != nil {
return err
}
appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(context.Background(), v1.ListOptions{})
if err != nil {
return err
}
apps := appItems.Items
for i, app := range apps {
err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, argoDB)
if err != nil {
return err
}
apps[i] = app
}
clusters := map[string][]string{}
for _, cluster := range clustersList.Items {
nsSet := map[string]bool{}
for _, app := range apps {
if app.Spec.Destination.Server != cluster.Server {
continue
}
// Use namespaces of actually deployed resources, since some application use dummy target namespace
// If resources list is empty then use target namespace
if len(app.Status.Resources) != 0 {
for _, res := range app.Status.Resources {
if res.Namespace != "" {
nsSet[res.Namespace] = true
}
}
} else {
if app.Spec.Destination.Server == cluster.Server {
nsSet[app.Spec.Destination.Namespace] = true
}
}
}
var namespaces []string
for ns := range nsSet {
namespaces = append(namespaces, ns)
}
clusters[cluster.Server] = namespaces
}
return action(appClient, argoDB, clusters)
}
func NewClusterNamespacesCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = cobra.Command{
Use: "namespaces",
Short: "Print information namespaces which Argo CD manages in each cluster.",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.WarnLevel)
err := runClusterNamespacesCommand(clientConfig, func(appClient *versioned.Clientset, _ db.ArgoDB, clusters map[string][]string) error {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "CLUSTER\tNAMESPACES\n")
for cluster, namespaces := range clusters {
// print shortest namespace names first
sort.Slice(namespaces, func(i, j int) bool {
return len(namespaces[j]) > len(namespaces[i])
})
namespacesStr := ""
if len(namespaces) > 4 {
namespacesStr = fmt.Sprintf("%s (total %d)", strings.Join(namespaces[:4], ","), len(namespaces))
} else {
namespacesStr = strings.Join(namespaces, ",")
}
_, _ = fmt.Fprintf(w, "%s\t%s\n", cluster, namespacesStr)
}
_ = w.Flush()
return nil
})
errors.CheckError(err)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
return &command
}
func NewClusterEnableNamespacedMode() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
dryRun bool
clusterResources bool
namespacesCount int
)
var command = cobra.Command{
Use: "enable-namespaced-mode PATTERN",
Short: "Enable namespaced mode for clusters which name matches to the specified pattern.",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.WarnLevel)
if len(args) == 0 {
cmd.HelpFunc()(cmd, args)
os.Exit(1)
}
pattern := args[0]
errors.CheckError(runClusterNamespacesCommand(clientConfig, func(_ *versioned.Clientset, argoDB db.ArgoDB, clusters map[string][]string) error {
for server, namespaces := range clusters {
if len(namespaces) == 0 || len(namespaces) > namespacesCount || !glob.Match(pattern, server) {
continue
}
cluster, err := argoDB.GetCluster(context.Background(), server)
if err != nil {
return err
}
cluster.Namespaces = namespaces
cluster.ClusterResources = clusterResources
fmt.Printf("Setting cluster %s namespaces to %v...", server, namespaces)
if !dryRun {
_, err = argoDB.UpdateCluster(context.Background(), cluster)
if err != nil {
return err
}
fmt.Println("done")
} else {
fmt.Println("done (dry run)")
}
}
return nil
}))
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().BoolVar(&dryRun, "dry-run", true, "Print what will be performed")
command.Flags().BoolVar(&clusterResources, "cluster-resources", false, "Indicates if cluster level resources should be managed.")
command.Flags().IntVar(&namespacesCount, "max-namespace-count", 0, "Max number of namespaces that cluster should managed managed namespaces is less or equal to specified count")
return &command
}
func NewClusterDisableNamespacedMode() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
dryRun bool
)
var command = cobra.Command{
Use: "disable-namespaced-mode PATTERN",
Short: "Disable namespaced mode for clusters which name matches to the specified pattern.",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.WarnLevel)
if len(args) == 0 {
cmd.HelpFunc()(cmd, args)
os.Exit(1)
}
pattern := args[0]
errors.CheckError(runClusterNamespacesCommand(clientConfig, func(_ *versioned.Clientset, argoDB db.ArgoDB, clusters map[string][]string) error {
for server := range clusters {
if !glob.Match(pattern, server) {
continue
}
cluster, err := argoDB.GetCluster(context.Background(), server)
if err != nil {
return err
}
if len(cluster.Namespaces) == 0 {
continue
}
cluster.Namespaces = nil
fmt.Printf("Disabling namespaced mode for cluster %s...", server)
if !dryRun {
_, err = argoDB.UpdateCluster(context.Background(), cluster)
if err != nil {
return err
}
fmt.Println("done")
} else {
fmt.Println("done (dry run)")
}
}
return nil
}))
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().BoolVar(&dryRun, "dry-run", true, "Print what will be performed")
return &command
}
func NewClusterStatsCommand() *cobra.Command {
var (
shard int
replicas int
clientConfig clientcmd.ClientConfig
cacheSrc func() (*appstatecache.Cache, error)
portForwardRedis bool
)
var command = cobra.Command{
Use: "stats",
Short: "Prints information cluster statistics and inferred shard number",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.WarnLevel)
clientCfg, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClient := kubernetes.NewForConfigOrDie(clientCfg)
appClient := versioned.NewForConfigOrDie(clientCfg)
if replicas == 0 {
replicas, err = getControllerReplicas(kubeClient, namespace)
errors.CheckError(err)
}
clusters, err := loadClusters(kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard)
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "SERVER\tSHARD\tCONNECTION\tNAMESPACES COUNT\tAPPS COUNT\tRESOURCES COUNT\n")
for _, cluster := range clusters {
_, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%d\t%d\t%d\n", cluster.Server, cluster.Shard, cluster.Info.ConnectionState.Status, len(cluster.Namespaces), cluster.Info.ApplicationsCount, cluster.Info.CacheInfo.ResourcesCount)
}
_ = w.Flush()
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
return &command
}
// NewClusterConfig returns a new instance of `argocd admin kubeconfig` command
func NewClusterConfig() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = &cobra.Command{
Use: "kubeconfig CLUSTER_URL OUTPUT_PATH",
Short: "Generates kubeconfig for the specified cluster",
DisableAutoGenTag: true,
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
serverUrl := args[0]
output := args[1]
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeclientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(context.Background(), kubeclientset, namespace), kubeclientset).GetCluster(context.Background(), serverUrl)
errors.CheckError(err)
err = kube.WriteKubeConfig(cluster.RawRestConfig(), namespace, output)
errors.CheckError(err)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}
func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command {
var (
clusterOpts cmdutil.ClusterOptions
bearerToken string
generateToken bool
outputFormat string
labels []string
annotations []string
)
var command = &cobra.Command{
Use: "generate-spec CONTEXT",
Short: "Generate declarative config for a cluster",
Run: func(c *cobra.Command, args []string) {
log.SetLevel(log.WarnLevel)
var configAccess clientcmd.ConfigAccess = pathOpts
if len(args) == 0 {
log.Error("Choose a context name from:")
cmdutil.PrintKubeContexts(configAccess)
os.Exit(1)
}
cfgAccess, err := configAccess.GetStartingConfig()
errors.CheckError(err)
contextName := args[0]
clstContext := cfgAccess.Contexts[contextName]
if clstContext == nil {
log.Fatalf("Context %s does not exist in kubeconfig", contextName)
return
}
overrides := clientcmd.ConfigOverrides{
Context: *clstContext,
}
clientConfig := clientcmd.NewDefaultClientConfig(*cfgAccess, &overrides)
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
kubeClientset := fake.NewSimpleClientset()
var awsAuthConf *argoappv1.AWSAuthConfig
var execProviderConf *argoappv1.ExecProviderConfig
if clusterOpts.AwsClusterName != "" {
awsAuthConf = &argoappv1.AWSAuthConfig{
ClusterName: clusterOpts.AwsClusterName,
RoleARN: clusterOpts.AwsRoleArn,
}
} else if clusterOpts.ExecProviderCommand != "" {
execProviderConf = &argoappv1.ExecProviderConfig{
Command: clusterOpts.ExecProviderCommand,
Args: clusterOpts.ExecProviderArgs,
Env: clusterOpts.ExecProviderEnv,
APIVersion: clusterOpts.ExecProviderAPIVersion,
InstallHint: clusterOpts.ExecProviderInstallHint,
}
} else if generateToken {
bearerToken, err = GenerateToken(clusterOpts, conf)
errors.CheckError(err)
} else if bearerToken == "" {
bearerToken = "bearer-token"
}
if clusterOpts.Name != "" {
contextName = clusterOpts.Name
}
labelsMap, err := label.Parse(labels)
errors.CheckError(err)
annotationsMap, err := label.Parse(annotations)
errors.CheckError(err)
clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, bearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap)
if clusterOpts.InCluster {
clst.Server = argoappv1.KubernetesInternalAPIServerAddr
}
if clusterOpts.Shard >= 0 {
clst.Shard = &clusterOpts.Shard
}
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, ArgoCDNamespace)
argoDB := db.NewDB(ArgoCDNamespace, settingsMgr, kubeClientset)
_, err = argoDB.CreateCluster(context.Background(), clst)
errors.CheckError(err)
secName, err := db.URIToSecretName("cluster", clst.Server)
errors.CheckError(err)
secret, err := kubeClientset.CoreV1().Secrets(ArgoCDNamespace).Get(context.Background(), secName, v1.GetOptions{})
errors.CheckError(err)
errors.CheckError(PrintResources(outputFormat, os.Stdout, secret))
},
}
command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file")
command.Flags().StringVar(&bearerToken, "bearer-token", "", "Authentication token that should be used to access K8S API server")
command.Flags().BoolVar(&generateToken, "generate-bearer-token", false, "Generate authentication token that should be used to access K8S API server")
command.Flags().StringVar(&clusterOpts.ServiceAccount, "service-account", "argocd-manager", fmt.Sprintf("System namespace service account to use for kubernetes resource management. If not set then default \"%s\" SA will be used", clusterauth.ArgoCDManagerServiceAccount))
command.Flags().StringVar(&clusterOpts.SystemNamespace, "system-namespace", common.DefaultSystemNamespace, "Use different system namespace")
command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml")
command.Flags().StringArrayVar(&labels, "label", nil, "Set metadata labels (e.g. --label key=value)")
command.Flags().StringArrayVar(&annotations, "annotation", nil, "Set metadata annotations (e.g. --annotation key=value)")
cmdutil.AddClusterFlags(command, &clusterOpts)
return command
}
func GenerateToken(clusterOpts cmdutil.ClusterOptions, conf *rest.Config) (string, error) {
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
bearerToken, err := clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount, common.BearerTokenTimeout)
if err != nil {
return "", err
}
return bearerToken, nil
}

View File

@@ -1,30 +0,0 @@
package admin
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/pkg/apiclient"
)
func NewDashboardCommand() *cobra.Command {
var (
port int
)
cmd := &cobra.Command{
Use: "dashboard",
Short: "Starts Argo CD Web UI locally",
Run: func(cmd *cobra.Command, args []string) {
println(fmt.Sprintf("Argo CD UI is available at http://localhost:%d", port))
<-context.Background().Done()
},
}
clientOpts := &apiclient.ClientOptions{Core: true}
headless.InitCommand(cmd, clientOpts, &port)
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
return cmd
}

View File

@@ -1,108 +0,0 @@
package admin
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/ghodss/yaml"
v1 "k8s.io/api/core/v1"
ioutil "github.com/argoproj/argo-cd/v2/util/io"
)
func getOutWriter(inline bool, filePath string) (io.Writer, io.Closer, error) {
if !inline {
return os.Stdout, ioutil.NopCloser, nil
}
if filePath == "" {
return nil, nil, errors.New("The file path must be specified using flag '--file'")
}
err := os.Rename(filePath, fmt.Sprintf("%s.back", filePath))
if err != nil {
return nil, nil, err
}
fileOut, err := os.Create(filePath)
if err != nil {
return nil, nil, err
}
return fileOut, fileOut, nil
}
// PrintResources prints a single resource in YAML or JSON format to stdout according to the output format
func PrintResources(output string, out io.Writer, resources ...interface{}) error {
for i, resource := range resources {
if secret, ok := resource.(*v1.Secret); ok {
convertSecretData(secret)
}
filteredResource, err := omitFields(resource)
if err != nil {
return err
}
resources[i] = filteredResource
}
var obj interface{} = resources
if len(resources) == 1 {
obj = resources[0]
}
switch output {
case "json":
jsonBytes, err := json.MarshalIndent(obj, "", " ")
if err != nil {
return err
}
_, _ = fmt.Fprintln(out, string(jsonBytes))
case "yaml":
yamlBytes, err := yaml.Marshal(obj)
if err != nil {
return err
}
// marshaled YAML already ends with the new line character
_, _ = fmt.Fprint(out, string(yamlBytes))
default:
return fmt.Errorf("unknown output format: %s", output)
}
return nil
}
// omit fields such as status, creationTimestamp and metadata.namespace in k8s objects
func omitFields(resource interface{}) (interface{}, error) {
jsonBytes, err := json.Marshal(resource)
if err != nil {
return nil, err
}
toMap := make(map[string]interface{})
err = json.Unmarshal(jsonBytes, &toMap)
if err != nil {
return nil, err
}
delete(toMap, "status")
if v, ok := toMap["metadata"]; ok {
if metadata, ok := v.(map[string]interface{}); ok {
delete(metadata, "creationTimestamp")
delete(metadata, "namespace")
}
}
return toMap, nil
}
// convertSecretData converts kubernetes secret's data to stringData
func convertSecretData(secret *v1.Secret) {
secret.Kind = kube.SecretKind
secret.APIVersion = "v1"
secret.StringData = map[string]string{}
for k, v := range secret.Data {
secret.StringData[k] = string(v)
}
secret.Data = map[string][]byte{}
}

View File

@@ -1,58 +0,0 @@
package admin
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGetOutWriter_InlineOff(t *testing.T) {
out, closer, err := getOutWriter(false, "")
require.NoError(t, err)
defer io.Close(closer)
assert.Equal(t, os.Stdout, out)
}
func TestGetOutWriter_InlineOn(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer func() {
_ = os.Remove(tmpFile.Name())
_ = os.Remove(fmt.Sprintf("%s.back", tmpFile.Name()))
}()
out, closer, err := getOutWriter(true, tmpFile.Name())
require.NoError(t, err)
defer io.Close(closer)
assert.Equal(t, tmpFile.Name(), out.(*os.File).Name())
_, err = os.Stat(fmt.Sprintf("%s.back", tmpFile.Name()))
assert.NoError(t, err, "Back file must be created")
}
func TestPrintResources_Secret_YAML(t *testing.T) {
out := bytes.Buffer{}
err := PrintResources("yaml", &out, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "my-secret"},
Data: map[string][]byte{"my-secret-key": []byte("my-secret-data")},
})
assert.NoError(t, err)
assert.Equal(t, `apiVersion: v1
kind: Secret
metadata:
name: my-secret
stringData:
my-secret-key: my-secret-data
`, out.String())
}

View File

@@ -1,231 +0,0 @@
package admin
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
appclient "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
)
func NewProjectsCommand() *cobra.Command {
var command = &cobra.Command{
Use: "proj",
Short: "Manage projects configuration",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewGenProjectSpecCommand())
command.AddCommand(NewUpdatePolicyRuleCommand())
command.AddCommand(NewProjectAllowListGenCommand())
return command
}
// NewGenProjectSpecCommand generates declarative configuration file for given project
func NewGenProjectSpecCommand() *cobra.Command {
var (
opts cmdutil.ProjectOpts
fileURL string
outputFormat string
inline bool
)
var command = &cobra.Command{
Use: "generate-spec PROJECT",
Short: "Generate declarative config for a project",
Run: func(c *cobra.Command, args []string) {
proj, err := cmdutil.ConstructAppProj(fileURL, args, opts, c)
errors.CheckError(err)
out, closer, err := getOutWriter(inline, fileURL)
errors.CheckError(err)
defer io.Close(closer)
errors.CheckError(PrintResources(outputFormat, out, proj))
},
}
command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml")
command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the project")
command.Flags().BoolVarP(&inline, "inline", "i", false, "If set then generated resource is written back to the file specified in --file flag")
// Only complete files with appropriate extension.
err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"})
errors.CheckError(err)
cmdutil.AddProjFlags(command, &opts)
return command
}
func globMatch(pattern string, val string) bool {
if pattern == "*" {
return true
}
if ok, err := filepath.Match(pattern, val); ok && err == nil {
return true
}
return false
}
func getModification(modification string, resource string, scope string, permission string) (func(string, string) string, error) {
switch modification {
case "set":
if scope == "" {
return nil, fmt.Errorf("Flag --group cannot be empty if permission should be set in role")
}
if permission == "" {
return nil, fmt.Errorf("Flag --permission cannot be empty if permission should be set in role")
}
return func(proj string, action string) string {
return fmt.Sprintf("%s, %s, %s/%s, %s", resource, action, proj, scope, permission)
}, nil
case "remove":
return func(proj string, action string) string {
return ""
}, nil
}
return nil, fmt.Errorf("modification %s is not supported", modification)
}
func saveProject(updated v1alpha1.AppProject, orig v1alpha1.AppProject, projectsIf appclient.AppProjectInterface, dryRun bool) error {
fmt.Printf("===== %s ======\n", updated.Name)
target, err := kube.ToUnstructured(&updated)
errors.CheckError(err)
live, err := kube.ToUnstructured(&orig)
if err != nil {
return err
}
_ = cli.PrintDiff(updated.Name, target, live)
if !dryRun {
_, err = projectsIf.Update(context.Background(), &updated, v1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
func formatPolicy(proj string, role string, permission string) string {
return fmt.Sprintf("p, proj:%s:%s, %s", proj, role, permission)
}
func split(input string, delimiter string) []string {
parts := strings.Split(input, delimiter)
for i := range parts {
parts[i] = strings.TrimSpace(parts[i])
}
return parts
}
func NewUpdatePolicyRuleCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
resource string
scope string
rolePattern string
permission string
dryRun bool
)
var command = &cobra.Command{
Use: "update-role-policy PROJECT_GLOB MODIFICATION ACTION",
Short: "Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions.",
Example: ` # Add policy that allows executing any action (action/*) to roles which name matches to *deployer* in all projects
argocd admin projects update-role-policy '*' set 'action/*' --role '*deployer*' --resource applications --scope '*' --permission allow
# Remove policy that which manages running (action/*) from all roles which name matches *deployer* in all projects
argocd admin projects update-role-policy '*' remove override --role '*deployer*'
`,
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projectGlob := args[0]
modificationType := args[1]
action := args[2]
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = 100
config.Burst = 50
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
appclients := appclientset.NewForConfigOrDie(config)
modification, err := getModification(modificationType, resource, scope, permission)
errors.CheckError(err)
projIf := appclients.ArgoprojV1alpha1().AppProjects(namespace)
err = updateProjects(projIf, projectGlob, rolePattern, action, modification, dryRun)
errors.CheckError(err)
},
}
command.Flags().StringVar(&resource, "resource", "", "Resource e.g. 'applications'")
command.Flags().StringVar(&scope, "scope", "", "Resource scope e.g. '*'")
command.Flags().StringVar(&rolePattern, "role", "*", "Role name pattern e.g. '*deployer*'")
command.Flags().StringVar(&permission, "permission", "", "Action permission")
command.Flags().BoolVar(&dryRun, "dry-run", true, "Dry run")
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}
func updateProjects(projIf appclient.AppProjectInterface, projectGlob string, rolePattern string, action string, modification func(string, string) string, dryRun bool) error {
projects, err := projIf.List(context.Background(), v1.ListOptions{})
if err != nil {
return err
}
for _, proj := range projects.Items {
if !globMatch(projectGlob, proj.Name) {
continue
}
origProj := proj.DeepCopy()
updated := false
for i, role := range proj.Spec.Roles {
if !globMatch(rolePattern, role.Name) {
continue
}
actionPolicyIndex := -1
for i := range role.Policies {
parts := split(role.Policies[i], ",")
if len(parts) != 6 || parts[3] != action {
continue
}
actionPolicyIndex = i
break
}
policyPermission := modification(proj.Name, action)
if actionPolicyIndex == -1 && policyPermission != "" {
updated = true
role.Policies = append(role.Policies, formatPolicy(proj.Name, role.Name, policyPermission))
} else if actionPolicyIndex > -1 && policyPermission == "" {
updated = true
role.Policies = append(role.Policies[:actionPolicyIndex], role.Policies[actionPolicyIndex+1:]...)
} else if actionPolicyIndex > -1 && policyPermission != "" {
updated = true
role.Policies[actionPolicyIndex] = formatPolicy(proj.Name, role.Name, policyPermission)
}
proj.Spec.Roles[i] = role
}
if updated {
err = saveProject(proj, *origProj, projIf, dryRun)
if err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,163 +0,0 @@
package admin
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/cli"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// load the azure plugin (required to authenticate with AKS clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
)
// NewProjectAllowListGenCommand generates a project from clusterRole
func NewProjectAllowListGenCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = &cobra.Command{
Use: "generate-allow-list CLUSTERROLE_PATH PROJ_NAME",
Short: "Generates project allow list from the specified clusterRole file",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
clusterRoleFileName := args[0]
projName := args[1]
var writer io.Writer
if out == "-" {
writer = os.Stdout
} else {
f, err := os.Create(out)
errors.CheckError(err)
bw := bufio.NewWriter(f)
writer = bw
defer func() {
err = bw.Flush()
errors.CheckError(err)
err = f.Close()
errors.CheckError(err)
}()
}
resourceList, err := getResourceList(clientConfig)
errors.CheckError(err)
globalProj, err := generateProjectAllowList(resourceList, clusterRoleFileName, projName)
errors.CheckError(err)
yamlBytes, err := yaml.Marshal(globalProj)
errors.CheckError(err)
_, err = writer.Write(yamlBytes)
errors.CheckError(err)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout")
return command
}
func getResourceList(clientConfig clientcmd.ClientConfig) ([]*metav1.APIResourceList, error) {
config, err := clientConfig.ClientConfig()
if err != nil {
return nil, fmt.Errorf("error while creating client config: %s", err)
}
disco, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return nil, fmt.Errorf("error while creating discovery client: %s", err)
}
serverResources, err := disco.ServerPreferredResources()
if err != nil {
return nil, fmt.Errorf("error while getting server resources: %s", err)
}
return serverResources, nil
}
func generateProjectAllowList(serverResources []*metav1.APIResourceList, clusterRoleFileName string, projName string) (*v1alpha1.AppProject, error) {
yamlBytes, err := ioutil.ReadFile(clusterRoleFileName)
if err != nil {
return nil, fmt.Errorf("error reading cluster role file: %s", err)
}
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
if err != nil {
return nil, fmt.Errorf("error unmarshalling cluster role file yaml: %s", err)
}
clusterRole := &rbacv1.ClusterRole{}
err = scheme.Scheme.Convert(&obj, clusterRole, nil)
if err != nil {
return nil, fmt.Errorf("error converting cluster role yaml into ClusterRole struct: %s", err)
}
resourceList := make([]metav1.GroupKind, 0)
for _, rule := range clusterRole.Rules {
if len(rule.APIGroups) <= 0 {
continue
}
canCreate := false
for _, verb := range rule.Verbs {
if strings.EqualFold(verb, "Create") {
canCreate = true
break
}
}
if !canCreate {
continue
}
ruleApiGroup := rule.APIGroups[0]
for _, ruleResource := range rule.Resources {
for _, apiResourcesList := range serverResources {
gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion)
if err != nil {
gv = schema.GroupVersion{}
}
if ruleApiGroup == gv.Group {
for _, apiResource := range apiResourcesList.APIResources {
if apiResource.Name == ruleResource {
resourceList = append(resourceList, metav1.GroupKind{Group: ruleApiGroup, Kind: apiResource.Kind})
}
}
}
}
}
}
globalProj := v1alpha1.AppProject{
TypeMeta: metav1.TypeMeta{
Kind: "AppProject",
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{Name: projName},
Spec: v1alpha1.AppProjectSpec{},
}
globalProj.Spec.NamespaceResourceWhitelist = resourceList
return &globalProj, nil
}

View File

@@ -1,20 +0,0 @@
package admin
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestProjectAllowListGen(t *testing.T) {
res := metav1.APIResource{
Name: "services",
Kind: "Service",
}
resourceList := []*metav1.APIResourceList{{APIResources: []metav1.APIResource{res}}}
globalProj, err := generateProjectAllowList(resourceList, "testdata/test_clusterrole.yaml", "testproj")
assert.NoError(t, err)
assert.True(t, len(globalProj.Spec.NamespaceResourceWhitelist) > 0)
}

View File

@@ -1,79 +0,0 @@
package admin
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
)
const (
namespace = "default"
)
func newProj(name string, roleNames ...string) *v1alpha1.AppProject {
var roles []v1alpha1.ProjectRole
for i := range roleNames {
roles = append(roles, v1alpha1.ProjectRole{Name: roleNames[i]})
}
return &v1alpha1.AppProject{ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
}, Spec: v1alpha1.AppProjectSpec{
Roles: roles,
}}
}
func TestUpdateProjects_FindMatchingProject(t *testing.T) {
clientset := fake.NewSimpleClientset(newProj("foo", "test"), newProj("bar", "test"))
modification, err := getModification("set", "*", "*", "allow")
assert.NoError(t, err)
err = updateProjects(clientset.ArgoprojV1alpha1().AppProjects(namespace), "ba*", "*", "set", modification, false)
assert.NoError(t, err)
fooProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(context.Background(), "foo", v1.GetOptions{})
assert.NoError(t, err)
assert.Len(t, fooProj.Spec.Roles[0].Policies, 0)
barProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(context.Background(), "bar", v1.GetOptions{})
assert.NoError(t, err)
assert.EqualValues(t, barProj.Spec.Roles[0].Policies, []string{"p, proj:bar:test, *, set, bar/*, allow"})
}
func TestUpdateProjects_FindMatchingRole(t *testing.T) {
clientset := fake.NewSimpleClientset(newProj("proj", "foo", "bar"))
modification, err := getModification("set", "*", "*", "allow")
assert.NoError(t, err)
err = updateProjects(clientset.ArgoprojV1alpha1().AppProjects(namespace), "*", "fo*", "set", modification, false)
assert.NoError(t, err)
proj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(context.Background(), "proj", v1.GetOptions{})
assert.NoError(t, err)
assert.EqualValues(t, proj.Spec.Roles[0].Policies, []string{"p, proj:proj:foo, *, set, proj/*, allow"})
assert.Len(t, proj.Spec.Roles[1].Policies, 0)
}
func TestGetModification_SetPolicy(t *testing.T) {
modification, err := getModification("set", "*", "*", "allow")
assert.NoError(t, err)
policy := modification("proj", "myaction")
assert.Equal(t, "*, myaction, proj/*, allow", policy)
}
func TestGetModification_RemovePolicy(t *testing.T) {
modification, err := getModification("remove", "*", "*", "allow")
assert.NoError(t, err)
policy := modification("proj", "myaction")
assert.Equal(t, "", policy)
}
func TestGetModification_NotSupported(t *testing.T) {
_, err := getModification("bar", "*", "*", "allow")
assert.Errorf(t, err, "modification bar is not supported")
}

View File

@@ -1,169 +0,0 @@
package admin
import (
"context"
"fmt"
"io/ioutil"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/git"
"github.com/argoproj/argo-cd/v2/util/settings"
)
const (
ArgoCDNamespace = "argocd"
repoSecretPrefix = "repo"
)
func NewRepoCommand() *cobra.Command {
var command = &cobra.Command{
Use: "repo",
Short: "Manage repositories configuration",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewGenRepoSpecCommand())
return command
}
func NewGenRepoSpecCommand() *cobra.Command {
var (
repoOpts cmdutil.RepoOptions
outputFormat string
)
// For better readability and easier formatting
var repoAddExamples = `
# Add a Git repository via SSH using a private key for authentication, ignoring the server's host key:
argocd admin repo generate-spec git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa
# Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here
argocd admin repo generate-spec ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa
# Add a private Git repository via HTTPS using username/password and TLS client certificates:
argocd admin repo generate-spec https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key
# Add a private Git repository via HTTPS using username/password without verifying the server's TLS certificate
argocd admin repo generate-spec https://git.example.com/repos/repo --username git --password secret --insecure-skip-server-verification
# Add a public Helm repository named 'stable' via HTTPS
argocd admin repo generate-spec https://charts.helm.sh/stable --type helm --name stable
# Add a private Helm repository named 'stable' via HTTPS
argocd admin repo generate-spec https://charts.helm.sh/stable --type helm --name stable --username test --password test
# Add a private Helm OCI-based repository named 'stable' via HTTPS
argocd admin repo generate-spec helm-oci-registry.cn-zhangjiakou.cr.aliyuncs.com --type helm --name stable --enable-oci --username test --password test
`
var command = &cobra.Command{
Use: "generate-spec REPOURL",
Short: "Generate declarative config for a repo",
Example: repoAddExamples,
Run: func(c *cobra.Command, args []string) {
log.SetLevel(log.WarnLevel)
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
// Repository URL
repoOpts.Repo.Repo = args[0]
// Specifying ssh-private-key-path is only valid for SSH repositories
if repoOpts.SshPrivateKeyPath != "" {
if ok, _ := git.IsSSHURL(repoOpts.Repo.Repo); ok {
keyData, err := ioutil.ReadFile(repoOpts.SshPrivateKeyPath)
if err != nil {
log.Fatal(err)
}
repoOpts.Repo.SSHPrivateKey = string(keyData)
} else {
err := fmt.Errorf("--ssh-private-key-path is only supported for SSH repositories.")
errors.CheckError(err)
}
}
// tls-client-cert-path and tls-client-cert-key-key-path must always be
// specified together
if (repoOpts.TlsClientCertPath != "" && repoOpts.TlsClientCertKeyPath == "") || (repoOpts.TlsClientCertPath == "" && repoOpts.TlsClientCertKeyPath != "") {
err := fmt.Errorf("--tls-client-cert-path and --tls-client-cert-key-path must be specified together")
errors.CheckError(err)
}
// Specifying tls-client-cert-path is only valid for HTTPS repositories
if repoOpts.TlsClientCertPath != "" {
if git.IsHTTPSURL(repoOpts.Repo.Repo) {
tlsCertData, err := ioutil.ReadFile(repoOpts.TlsClientCertPath)
errors.CheckError(err)
tlsCertKey, err := ioutil.ReadFile(repoOpts.TlsClientCertKeyPath)
errors.CheckError(err)
repoOpts.Repo.TLSClientCertData = string(tlsCertData)
repoOpts.Repo.TLSClientCertKey = string(tlsCertKey)
} else {
err := fmt.Errorf("--tls-client-cert-path is only supported for HTTPS repositories")
errors.CheckError(err)
}
}
// Set repository connection properties only when creating repository, not
// when creating repository credentials.
// InsecureIgnoreHostKey is deprecated and only here for backwards compat
repoOpts.Repo.InsecureIgnoreHostKey = repoOpts.InsecureIgnoreHostKey
repoOpts.Repo.Insecure = repoOpts.InsecureSkipServerVerification
repoOpts.Repo.EnableLFS = repoOpts.EnableLfs
repoOpts.Repo.EnableOCI = repoOpts.EnableOci
if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" {
errors.CheckError(fmt.Errorf("must specify --name for repos of type 'helm'"))
}
// If the user set a username, but didn't supply password via --password,
// then we prompt for it
if repoOpts.Repo.Username != "" && repoOpts.Repo.Password == "" {
repoOpts.Repo.Password = cli.PromptPassword(repoOpts.Repo.Password)
}
argoCDCM := &apiv1.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: ArgoCDNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
}
kubeClientset := fake.NewSimpleClientset(argoCDCM)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, ArgoCDNamespace)
argoDB := db.NewDB(ArgoCDNamespace, settingsMgr, kubeClientset)
_, err := argoDB.CreateRepository(context.Background(), &repoOpts.Repo)
errors.CheckError(err)
secret, err := kubeClientset.CoreV1().Secrets(ArgoCDNamespace).Get(context.Background(), db.RepoURLToSecretName(repoSecretPrefix, repoOpts.Repo.Repo), v1.GetOptions{})
errors.CheckError(err)
errors.CheckError(PrintResources(outputFormat, os.Stdout, secret))
},
}
command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml")
cmdutil.AddRepoFlags(command, &repoOpts)
return command
}

View File

@@ -1,94 +0,0 @@
package admin
import (
"testing"
"github.com/stretchr/testify/assert"
)
var textToRedact = `
connectors:
- config:
clientID: aabbccddeeff00112233
clientSecret: |
theSecret
orgs:
- name: your-github-org
redirectURI: https://argocd.example.com/api/dex/callback
id: github
name: GitHub
type: github
- config:
bindDN: uid=serviceaccount,cn=users,dc=example,dc=com
bindPW: theSecret
host: ldap.example.com:636
id: ldap
name: LDAP
type: ldap
grpc:
addr: 0.0.0.0:5557
telemetry:
http: 0.0.0.0:5558
issuer: https://argocd.example.com/api/dex
oauth2:
skipApprovalScreen: true
staticClients:
- id: argo-cd
name: Argo CD
redirectURIs:
- https://argocd.example.com/auth/callback
secret: Dis9M-GA11oTwZVQQWdDklPQw-sWXZkWJFyyEhMs
- id: argo-cd-cli
name: Argo CD CLI
public: true
redirectURIs:
- http://localhost
storage:
type: memory
web:
http: 0.0.0.0:5556`
var expectedRedaction = `connectors:
- config:
clientID: aabbccddeeff00112233
clientSecret: '********'
orgs:
- name: your-github-org
redirectURI: https://argocd.example.com/api/dex/callback
id: github
name: GitHub
type: github
- config:
bindDN: uid=serviceaccount,cn=users,dc=example,dc=com
bindPW: '********'
host: ldap.example.com:636
id: ldap
name: LDAP
type: ldap
grpc:
addr: 0.0.0.0:5557
issuer: https://argocd.example.com/api/dex
oauth2:
skipApprovalScreen: true
staticClients:
- id: argo-cd
name: Argo CD
redirectURIs:
- https://argocd.example.com/auth/callback
secret: '********'
- id: argo-cd-cli
name: Argo CD CLI
public: true
redirectURIs:
- http://localhost
storage:
type: memory
telemetry:
http: 0.0.0.0:5558
web:
http: 0.0.0.0:5556
`
func TestSecretsRedactor(t *testing.T) {
assert.Equal(t, expectedRedaction, redactor(textToRedact))
}

View File

@@ -1,546 +0,0 @@
package admin
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"reflect"
"sort"
"strconv"
"strings"
"text/tabwriter"
healthutil "github.com/argoproj/gitops-engine/pkg/health"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/lua"
"github.com/argoproj/argo-cd/v2/util/settings"
)
type settingsOpts struct {
argocdCMPath string
argocdSecretPath string
loadClusterSettings bool
clientConfig clientcmd.ClientConfig
}
type commandContext interface {
createSettingsManager() (*settings.SettingsManager, error)
}
func collectLogs(callback func()) string {
log.SetLevel(log.DebugLevel)
out := bytes.Buffer{}
log.SetOutput(&out)
defer log.SetLevel(log.FatalLevel)
callback()
return out.String()
}
func setSettingsMeta(obj v1.Object) {
obj.SetNamespace("default")
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels["app.kubernetes.io/part-of"] = "argocd"
obj.SetLabels(labels)
}
func (opts *settingsOpts) createSettingsManager() (*settings.SettingsManager, error) {
var argocdCM *corev1.ConfigMap
if opts.argocdCMPath == "" && !opts.loadClusterSettings {
return nil, fmt.Errorf("either --argocd-cm-path must be provided or --load-cluster-settings must be set to true")
} else if opts.argocdCMPath == "" {
realClientset, ns, err := opts.getK8sClient()
if err != nil {
return nil, err
}
argocdCM, err = realClientset.CoreV1().ConfigMaps(ns).Get(context.Background(), common.ArgoCDConfigMapName, v1.GetOptions{})
if err != nil {
return nil, err
}
} else {
data, err := ioutil.ReadFile(opts.argocdCMPath)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(data, &argocdCM)
if err != nil {
return nil, err
}
}
setSettingsMeta(argocdCM)
var argocdSecret *corev1.Secret
if opts.argocdSecretPath != "" {
data, err := ioutil.ReadFile(opts.argocdSecretPath)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(data, &argocdSecret)
if err != nil {
return nil, err
}
setSettingsMeta(argocdSecret)
} else if opts.loadClusterSettings {
realClientset, ns, err := opts.getK8sClient()
if err != nil {
return nil, err
}
argocdSecret, err = realClientset.CoreV1().Secrets(ns).Get(context.Background(), common.ArgoCDSecretName, v1.GetOptions{})
if err != nil {
return nil, err
}
} else {
argocdSecret = &corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Name: common.ArgoCDSecretName,
},
Data: map[string][]byte{
"admin.password": []byte("test"),
"server.secretkey": []byte("test"),
},
}
}
setSettingsMeta(argocdSecret)
clientset := fake.NewSimpleClientset(argocdSecret, argocdCM)
manager := settings.NewSettingsManager(context.Background(), clientset, "default")
errors.CheckError(manager.ResyncInformers())
return manager, nil
}
func (opts *settingsOpts) getK8sClient() (*kubernetes.Clientset, string, error) {
namespace, _, err := opts.clientConfig.Namespace()
if err != nil {
return nil, "", err
}
restConfig, err := opts.clientConfig.ClientConfig()
if err != nil {
return nil, "", err
}
realClientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, "", err
}
return realClientset, namespace, nil
}
func NewSettingsCommand() *cobra.Command {
var (
opts settingsOpts
)
var command = &cobra.Command{
Use: "settings",
Short: "Provides set of commands for settings validation and troubleshooting",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
log.SetLevel(log.FatalLevel)
command.AddCommand(NewValidateSettingsCommand(&opts))
command.AddCommand(NewResourceOverridesCommand(&opts))
command.AddCommand(NewRBACCommand())
opts.clientConfig = cli.AddKubectlFlagsToCmd(command)
command.PersistentFlags().StringVar(&opts.argocdCMPath, "argocd-cm-path", "", "Path to local argocd-cm.yaml file")
command.PersistentFlags().StringVar(&opts.argocdSecretPath, "argocd-secret-path", "", "Path to local argocd-secret.yaml file")
command.PersistentFlags().BoolVar(&opts.loadClusterSettings, "load-cluster-settings", false,
"Indicates that config map and secret should be loaded from cluster unless local file path is provided")
return command
}
type settingValidator func(manager *settings.SettingsManager) (string, error)
func joinValidators(validators ...settingValidator) settingValidator {
return func(manager *settings.SettingsManager) (string, error) {
var errorStrs []string
var summaries []string
for i := range validators {
summary, err := validators[i](manager)
if err != nil {
errorStrs = append(errorStrs, err.Error())
}
if summary != "" {
summaries = append(summaries, summary)
}
}
if len(errorStrs) > 0 {
return "", fmt.Errorf("%s", strings.Join(errorStrs, "\n"))
}
return strings.Join(summaries, "\n"), nil
}
}
var validatorsByGroup = map[string]settingValidator{
"general": joinValidators(func(manager *settings.SettingsManager) (string, error) {
general, err := manager.GetSettings()
if err != nil {
return "", err
}
ssoProvider := ""
if general.DexConfig != "" {
if _, err := settings.UnmarshalDexConfig(general.DexConfig); err != nil {
return "", fmt.Errorf("invalid dex.config: %v", err)
}
ssoProvider = "Dex"
} else if general.OIDCConfigRAW != "" {
if _, err := settings.UnmarshalOIDCConfig(general.OIDCConfigRAW); err != nil {
return "", fmt.Errorf("invalid oidc.config: %v", err)
}
ssoProvider = "OIDC"
}
var summary string
if ssoProvider != "" {
summary = fmt.Sprintf("%s is configured", ssoProvider)
if general.URL == "" {
summary = summary + " ('url' field is missing)"
}
} else if ssoProvider != "" && general.URL != "" {
} else {
summary = "SSO is not configured"
}
return summary, nil
}, func(manager *settings.SettingsManager) (string, error) {
_, err := manager.GetAppInstanceLabelKey()
return "", err
}, func(manager *settings.SettingsManager) (string, error) {
_, err := manager.GetHelp()
return "", err
}, func(manager *settings.SettingsManager) (string, error) {
_, err := manager.GetGoogleAnalytics()
return "", err
}),
"plugins": func(manager *settings.SettingsManager) (string, error) {
plugins, err := manager.GetConfigManagementPlugins()
if err != nil {
return "", err
}
return fmt.Sprintf("%d plugins", len(plugins)), nil
},
"kustomize": func(manager *settings.SettingsManager) (string, error) {
opts, err := manager.GetKustomizeSettings()
if err != nil {
return "", err
}
summary := "default options"
if opts.BuildOptions != "" {
summary = opts.BuildOptions
}
if len(opts.Versions) > 0 {
summary = fmt.Sprintf("%s (%d versions)", summary, len(opts.Versions))
}
return summary, err
},
"repositories": joinValidators(func(manager *settings.SettingsManager) (string, error) {
repos, err := manager.GetRepositories()
if err != nil {
return "", err
}
return fmt.Sprintf("%d repositories", len(repos)), nil
}, func(manager *settings.SettingsManager) (string, error) {
creds, err := manager.GetRepositoryCredentials()
if err != nil {
return "", err
}
return fmt.Sprintf("%d repository credentials", len(creds)), nil
}),
"accounts": func(manager *settings.SettingsManager) (string, error) {
accounts, err := manager.GetAccounts()
if err != nil {
return "", err
}
return fmt.Sprintf("%d accounts", len(accounts)), nil
},
"resource-overrides": func(manager *settings.SettingsManager) (string, error) {
overrides, err := manager.GetResourceOverrides()
if err != nil {
return "", err
}
return fmt.Sprintf("%d resource overrides", len(overrides)), nil
},
}
func NewValidateSettingsCommand(cmdCtx commandContext) *cobra.Command {
var (
groups []string
)
var allGroups []string
for k := range validatorsByGroup {
allGroups = append(allGroups, k)
}
sort.Slice(allGroups, func(i, j int) bool {
return allGroups[i] < allGroups[j]
})
var command = &cobra.Command{
Use: "validate",
Short: "Validate settings",
Long: "Validates settings specified in 'argocd-cm' ConfigMap and 'argocd-secret' Secret",
Example: `
#Validates all settings in the specified YAML file
argocd admin settings validate --argocd-cm-path ./argocd-cm.yaml
#Validates accounts and plugins settings in Kubernetes cluster of current kubeconfig context
argocd admin settings validate --group accounts --group plugins --load-cluster-settings`,
Run: func(c *cobra.Command, args []string) {
settingsManager, err := cmdCtx.createSettingsManager()
errors.CheckError(err)
if len(groups) == 0 {
groups = allGroups
}
for i, group := range groups {
validator := validatorsByGroup[group]
logs := collectLogs(func() {
summary, err := validator(settingsManager)
if err != nil {
_, _ = fmt.Fprintf(os.Stdout, "❌ %s\n", group)
_, _ = fmt.Fprintf(os.Stdout, "%s\n", err.Error())
} else {
_, _ = fmt.Fprintf(os.Stdout, "✅ %s\n", group)
if summary != "" {
_, _ = fmt.Fprintf(os.Stdout, "%s\n", summary)
}
}
})
if logs != "" {
_, _ = fmt.Fprintf(os.Stdout, "%s\n", logs)
}
if i != len(groups)-1 {
_, _ = fmt.Fprintf(os.Stdout, "\n")
}
}
},
}
command.Flags().StringArrayVar(&groups, "group", nil, fmt.Sprintf(
"Optional list of setting groups that have to be validated ( one of: %s)", strings.Join(allGroups, ", ")))
return command
}
func NewResourceOverridesCommand(cmdCtx commandContext) *cobra.Command {
var command = &cobra.Command{
Use: "resource-overrides",
Short: "Troubleshoot resource overrides",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewResourceIgnoreDifferencesCommand(cmdCtx))
command.AddCommand(NewResourceActionListCommand(cmdCtx))
command.AddCommand(NewResourceActionRunCommand(cmdCtx))
command.AddCommand(NewResourceHealthCommand(cmdCtx))
return command
}
func executeResourceOverrideCommand(cmdCtx commandContext, args []string, callback func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride)) {
data, err := ioutil.ReadFile(args[0])
errors.CheckError(err)
res := unstructured.Unstructured{}
errors.CheckError(yaml.Unmarshal(data, &res))
settingsManager, err := cmdCtx.createSettingsManager()
errors.CheckError(err)
overrides, err := settingsManager.GetResourceOverrides()
errors.CheckError(err)
gvk := res.GroupVersionKind()
key := gvk.Kind
if gvk.Group != "" {
key = fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind)
}
override, hasOverride := overrides[key]
if !hasOverride {
_, _ = fmt.Printf("No overrides configured for '%s/%s'\n", gvk.Group, gvk.Kind)
return
}
callback(res, override, overrides)
}
func NewResourceIgnoreDifferencesCommand(cmdCtx commandContext) *cobra.Command {
var command = &cobra.Command{
Use: "ignore-differences RESOURCE_YAML_PATH",
Short: "Renders fields excluded from diffing",
Long: "Renders ignored fields using the 'ignoreDifferences' setting specified in the 'resource.customizations' field of 'argocd-cm' ConfigMap",
Example: `
argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`,
Run: func(c *cobra.Command, args []string) {
if len(args) < 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
gvk := res.GroupVersionKind()
if len(override.IgnoreDifferences.JSONPointers) == 0 && len(override.IgnoreDifferences.JQPathExpressions) == 0 {
_, _ = fmt.Printf("Ignore differences are not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
return
}
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides)
errors.CheckError(err)
normalizedRes := res.DeepCopy()
logs := collectLogs(func() {
errors.CheckError(normalizer.Normalize(normalizedRes))
})
if logs != "" {
_, _ = fmt.Println(logs)
}
if reflect.DeepEqual(&res, normalizedRes) {
_, _ = fmt.Printf("No fields are ignored by ignoreDifferences settings: \n%s\n", override.IgnoreDifferences)
return
}
_, _ = fmt.Printf("Following fields are ignored:\n\n")
_ = cli.PrintDiff(res.GetName(), &res, normalizedRes)
})
},
}
return command
}
func NewResourceHealthCommand(cmdCtx commandContext) *cobra.Command {
var command = &cobra.Command{
Use: "health RESOURCE_YAML_PATH",
Short: "Assess resource health",
Long: "Assess resource health using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap",
Example: `
argocd admin settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`,
Run: func(c *cobra.Command, args []string) {
if len(args) < 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
gvk := res.GroupVersionKind()
if override.HealthLua == "" {
_, _ = fmt.Printf("Health script is not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
return
}
resHealth, err := healthutil.GetResourceHealth(&res, lua.ResourceHealthOverrides(overrides))
errors.CheckError(err)
_, _ = fmt.Printf("STATUS: %s\n", resHealth.Status)
_, _ = fmt.Printf("MESSAGE: %s\n", resHealth.Message)
})
},
}
return command
}
func NewResourceActionListCommand(cmdCtx commandContext) *cobra.Command {
var command = &cobra.Command{
Use: "list-actions RESOURCE_YAML_PATH",
Short: "List available resource actions",
Long: "List actions available for given resource action using the lua scripts configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields",
Example: `
argocd admin settings resource-overrides action list /tmp/deploy.yaml --argocd-cm-path ./argocd-cm.yaml`,
Run: func(c *cobra.Command, args []string) {
if len(args) < 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
gvk := res.GroupVersionKind()
if override.Actions == "" {
_, _ = fmt.Printf("Actions are not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
return
}
luaVM := lua.VM{ResourceOverrides: overrides}
discoveryScript, err := luaVM.GetResourceActionDiscovery(&res)
errors.CheckError(err)
availableActions, err := luaVM.ExecuteResourceActionDiscovery(&res, discoveryScript)
errors.CheckError(err)
sort.Slice(availableActions, func(i, j int) bool {
return availableActions[i].Name < availableActions[j].Name
})
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "NAME\tENABLED\n")
for _, action := range availableActions {
_, _ = fmt.Fprintf(w, "%s\t%s\n", action.Name, strconv.FormatBool(action.Disabled))
}
_ = w.Flush()
})
},
}
return command
}
func NewResourceActionRunCommand(cmdCtx commandContext) *cobra.Command {
var command = &cobra.Command{
Use: "run-action RESOURCE_YAML_PATH ACTION",
Aliases: []string{"action"},
Short: "Executes resource action",
Long: "Executes resource action using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields",
Example: `
argocd admin settings resource-overrides action run /tmp/deploy.yaml restart --argocd-cm-path ./argocd-cm.yaml`,
Run: func(c *cobra.Command, args []string) {
if len(args) < 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
action := args[1]
executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
gvk := res.GroupVersionKind()
if override.Actions == "" {
_, _ = fmt.Printf("Actions are not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
return
}
luaVM := lua.VM{ResourceOverrides: overrides}
action, err := luaVM.GetResourceAction(&res, action)
errors.CheckError(err)
modifiedRes, err := luaVM.ExecuteResourceAction(&res, action.ActionLua)
errors.CheckError(err)
if reflect.DeepEqual(&res, modifiedRes) {
_, _ = fmt.Printf("No fields had been changed by action: \n%s\n", action.Name)
return
}
_, _ = fmt.Printf("Following fields have been changed:\n\n")
_ = cli.PrintDiff(res.GetName(), &res, modifiedRes)
})
},
}
return command
}

View File

@@ -1,374 +0,0 @@
package admin
import (
"context"
"fmt"
"io/ioutil"
"os"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
"github.com/argoproj/argo-cd/v2/util/assets"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/rbac"
)
// Provide a mapping of short-hand resource names to their RBAC counterparts
var resourceMap map[string]string = map[string]string{
"account": rbacpolicy.ResourceAccounts,
"app": rbacpolicy.ResourceApplications,
"apps": rbacpolicy.ResourceApplications,
"application": rbacpolicy.ResourceApplications,
"cert": rbacpolicy.ResourceCertificates,
"certs": rbacpolicy.ResourceCertificates,
"certificate": rbacpolicy.ResourceCertificates,
"cluster": rbacpolicy.ResourceClusters,
"gpgkey": rbacpolicy.ResourceGPGKeys,
"key": rbacpolicy.ResourceGPGKeys,
"proj": rbacpolicy.ResourceProjects,
"projs": rbacpolicy.ResourceProjects,
"project": rbacpolicy.ResourceProjects,
"repo": rbacpolicy.ResourceRepositories,
"repos": rbacpolicy.ResourceRepositories,
"repository": rbacpolicy.ResourceRepositories,
}
// List of allowed RBAC resources
var validRBACResources map[string]bool = map[string]bool{
rbacpolicy.ResourceAccounts: true,
rbacpolicy.ResourceApplications: true,
rbacpolicy.ResourceCertificates: true,
rbacpolicy.ResourceClusters: true,
rbacpolicy.ResourceGPGKeys: true,
rbacpolicy.ResourceProjects: true,
rbacpolicy.ResourceRepositories: true,
}
// List of allowed RBAC actions
var validRBACActions map[string]bool = map[string]bool{
rbacpolicy.ActionAction: true,
rbacpolicy.ActionCreate: true,
rbacpolicy.ActionDelete: true,
rbacpolicy.ActionGet: true,
rbacpolicy.ActionOverride: true,
rbacpolicy.ActionSync: true,
rbacpolicy.ActionUpdate: true,
}
// NewRBACCommand is the command for 'rbac'
func NewRBACCommand() *cobra.Command {
var command = &cobra.Command{
Use: "rbac",
Short: "Validate and test RBAC configuration",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewRBACCanCommand())
command.AddCommand(NewRBACValidateCommand())
return command
}
// NewRBACCanRoleCommand is the command for 'rbac can-role'
func NewRBACCanCommand() *cobra.Command {
var (
policyFile string
defaultRole string
useBuiltin bool
strict bool
quiet bool
subject string
action string
resource string
subResource string
clientConfig clientcmd.ClientConfig
)
var command = &cobra.Command{
Use: "can ROLE/SUBJECT ACTION RESOURCE [SUB-RESOURCE]",
Short: "Check RBAC permissions for a role or subject",
Long: `
Check whether a given role or subject has appropriate RBAC permissions to do
something.
`,
Example: `
# Check whether role some:role has permissions to create an application in the
# 'default' project, using a local policy.csv file
argocd admin settings rbac can some:role create application 'default/app' --policy-file policy.csv
# Policy file can also be K8s config map with data keys like argocd-rbac-cm,
# i.e. 'policy.csv' and (optionally) 'policy.default'
argocd admin settings rbac can some:role create application 'default/app' --policy-file argocd-rbac-cm.yaml
# If --policy-file is not given, the ConfigMap 'argocd-rbac-cm' from K8s is
# used. You need to specify the argocd namespace, and make sure that your
# current Kubernetes context is pointing to the cluster Argo CD is running in
argocd admin settings rbac can some:role create application 'default/app' --namespace argocd
# You can override a possibly configured default role
argocd admin settings rbac can someuser create application 'default/app' --default-role role:readonly
`,
Run: func(c *cobra.Command, args []string) {
if len(args) < 3 || len(args) > 4 {
c.HelpFunc()(c, args)
os.Exit(1)
}
subject = args[0]
action = args[1]
resource = args[2]
if len(args) > 3 {
subResource = args[3]
}
userPolicy := ""
builtinPolicy := ""
var newDefaultRole string
namespace, nsOverride, err := clientConfig.Namespace()
if err != nil {
log.Fatalf("could not create k8s client: %v", err)
}
// Exactly one of --namespace or --policy-file must be given.
if (!nsOverride && policyFile == "") || (nsOverride && policyFile != "") {
c.HelpFunc()(c, args)
log.Fatalf("please provide exactly one of --policy-file or --namespace")
}
restConfig, err := clientConfig.ClientConfig()
if err != nil {
log.Fatalf("could not create k8s client: %v", err)
}
realClientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
log.Fatalf("could not create k8s client: %v", err)
}
userPolicy, newDefaultRole = getPolicy(policyFile, realClientset, namespace)
// Use built-in policy as augmentation if requested
if useBuiltin {
builtinPolicy = assets.BuiltinPolicyCSV
}
// If no explicit default role was given, but we have one defined from
// a policy, use this to check for enforce.
if newDefaultRole != "" && defaultRole == "" {
defaultRole = newDefaultRole
}
res := checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, strict)
if res {
if !quiet {
fmt.Println("Yes")
}
os.Exit(0)
} else {
if !quiet {
fmt.Println("No")
}
os.Exit(1)
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
command.Flags().StringVar(&defaultRole, "default-role", "", "name of the default role to use")
command.Flags().BoolVar(&useBuiltin, "use-builtin-policy", true, "whether to also use builtin-policy")
command.Flags().BoolVar(&strict, "strict", true, "whether to perform strict check on action and resource names")
command.Flags().BoolVarP(&quiet, "quiet", "q", false, "quiet mode - do not print results to stdout")
return command
}
// NewRBACValidateCommand returns a new rbac validate command
func NewRBACValidateCommand() *cobra.Command {
var (
policyFile string
)
var command = &cobra.Command{
Use: "validate --policy-file=POLICYFILE",
Short: "Validate RBAC policy",
Long: `
Validates an RBAC policy for being syntactically correct. The policy must be
a local file, and in either CSV or K8s ConfigMap format.
`,
Run: func(c *cobra.Command, args []string) {
if policyFile == "" {
c.HelpFunc()(c, args)
log.Fatalf("Please specify policy to validate using --policy-file")
}
userPolicy, _ := getPolicy(policyFile, nil, "")
if userPolicy != "" {
if err := rbac.ValidatePolicy(userPolicy); err == nil {
fmt.Printf("Policy is valid.\n")
os.Exit(0)
} else {
fmt.Printf("Policy is invalid: %v\n", err)
os.Exit(1)
}
}
},
}
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
return command
}
// Load user policy file if requested or use Kubernetes client to get the
// appropriate ConfigMap from the current context
func getPolicy(policyFile string, kubeClient kubernetes.Interface, namespace string) (userPolicy string, defaultRole string) {
var err error
if policyFile != "" {
// load from file
userPolicy, defaultRole, err = getPolicyFromFile(policyFile)
if err != nil {
log.Fatalf("could not read policy file: %v", err)
}
} else {
cm, err := getPolicyConfigMap(kubeClient, namespace)
if err != nil {
log.Fatalf("could not get configmap: %v", err)
}
userPolicy, defaultRole = getPolicyFromConfigMap(cm)
}
return userPolicy, defaultRole
}
// getPolicyFromFile loads a RBAC policy from given path
func getPolicyFromFile(policyFile string) (string, string, error) {
var (
userPolicy string
defaultRole string
)
upol, err := ioutil.ReadFile(policyFile)
if err != nil {
log.Fatalf("error opening policy file: %v", err)
return "", "", err
}
// Try to unmarshal the input file as ConfigMap first. If it succeeds, we
// assume config map input. Otherwise, we treat it as
var upolCM *corev1.ConfigMap
err = yaml.Unmarshal(upol, &upolCM)
if err != nil {
userPolicy = string(upol)
} else {
userPolicy, defaultRole = getPolicyFromConfigMap(upolCM)
}
return userPolicy, defaultRole, nil
}
// Retrieve policy information from a ConfigMap
func getPolicyFromConfigMap(cm *corev1.ConfigMap) (string, string) {
var (
userPolicy string
defaultRole string
ok bool
)
userPolicy, ok = cm.Data[rbac.ConfigMapPolicyCSVKey]
if !ok {
userPolicy = ""
}
if defaultRole == "" {
defaultRole, ok = cm.Data[rbac.ConfigMapPolicyDefaultKey]
if !ok {
defaultRole = ""
}
}
return userPolicy, defaultRole
}
// getPolicyConfigMap fetches the RBAC config map from K8s cluster
func getPolicyConfigMap(client kubernetes.Interface, namespace string) (*corev1.ConfigMap, error) {
cm, err := client.CoreV1().ConfigMaps(namespace).Get(context.Background(), common.ArgoCDRBACConfigMapName, v1.GetOptions{})
if err != nil {
return nil, err
}
return cm, nil
}
// checkPolicy checks whether given subject is allowed to execute specified
// action against specified resource
func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole string, strict bool) bool {
enf := rbac.NewEnforcer(nil, "argocd", "argocd-rbac-cm", nil)
enf.SetDefaultRole(defaultRole)
if builtinPolicy != "" {
if err := enf.SetBuiltinPolicy(builtinPolicy); err != nil {
log.Fatalf("could not set built-in policy: %v", err)
return false
}
}
if userPolicy != "" {
if err := rbac.ValidatePolicy(userPolicy); err != nil {
log.Fatalf("invalid user policy: %v", err)
return false
}
if err := enf.SetUserPolicy(userPolicy); err != nil {
log.Fatalf("could not set user policy: %v", err)
return false
}
}
// User could have used a mutation of the resource name (i.e. 'cert' for
// 'certificate') - let's resolve it to the valid resource.
realResource := resolveRBACResourceName(resource)
// If in strict mode, validate that given RBAC resource and action are
// actually valid tokens.
if strict {
if !isValidRBACResource(realResource) {
log.Fatalf("error in RBAC request: '%s' is not a valid resource name", realResource)
}
if !isValidRBACAction(action) {
log.Fatalf("error in RBAC request: '%s' is not a valid action name", action)
}
}
// Application resources have a special notation - for simplicity's sake,
// if user gives no sub-resource (or specifies simple '*'), we construct
// the required notation by setting subresource to '*/*'.
if realResource == rbacpolicy.ResourceApplications {
if subResource == "*" || subResource == "" {
subResource = "*/*"
}
}
return enf.Enforce(subject, realResource, action, subResource)
}
// resolveRBACResourceName resolves a user supplied value to a valid RBAC
// resource name. If no mapping is found, returns the value verbatim.
func resolveRBACResourceName(name string) string {
if res, ok := resourceMap[name]; ok {
return res
} else {
return name
}
}
// isValidRBACAction checks whether a given action is a valid RBAC action
func isValidRBACAction(action string) bool {
_, ok := validRBACActions[action]
return ok
}
// isValidRBACResource checks whether a given resource is a valid RBAC resource
func isValidRBACResource(resource string) bool {
_, ok := validRBACResources[resource]
return ok
}

View File

@@ -1,91 +0,0 @@
package admin
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"github.com/argoproj/argo-cd/v2/util/assets"
)
func Test_isValidRBACAction(t *testing.T) {
for k := range validRBACActions {
t.Run(k, func(t *testing.T) {
ok := isValidRBACAction(k)
assert.True(t, ok)
})
}
t.Run("invalid", func(t *testing.T) {
ok := isValidRBACAction("invalid")
assert.False(t, ok)
})
}
func Test_isValidRBACResource(t *testing.T) {
for k := range validRBACResources {
t.Run(k, func(t *testing.T) {
ok := isValidRBACResource(k)
assert.True(t, ok)
})
}
t.Run("invalid", func(t *testing.T) {
ok := isValidRBACResource("invalid")
assert.False(t, ok)
})
}
func Test_PolicyFromCSV(t *testing.T) {
uPol, dRole := getPolicy("testdata/rbac/policy.csv", nil, "")
require.NotEmpty(t, uPol)
require.Empty(t, dRole)
}
func Test_PolicyFromYAML(t *testing.T) {
uPol, dRole := getPolicy("testdata/rbac/argocd-rbac-cm.yaml", nil, "")
require.NotEmpty(t, uPol)
require.Equal(t, "role:unknown", dRole)
}
func Test_PolicyFromK8s(t *testing.T) {
data, err := ioutil.ReadFile("testdata/rbac/policy.csv")
require.NoError(t, err)
kubeclientset := fake.NewSimpleClientset(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-rbac-cm",
Namespace: "argocd",
},
Data: map[string]string{
"policy.csv": string(data),
"policy.default": "role:unknown",
},
})
uPol, dRole := getPolicy("", kubeclientset, "argocd")
require.NotEmpty(t, uPol)
require.Equal(t, "role:unknown", dRole)
t.Run("get applications", func(t *testing.T) {
ok := checkPolicy("role:user", "get", "applications", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, true)
require.True(t, ok)
})
t.Run("get clusters", func(t *testing.T) {
ok := checkPolicy("role:user", "get", "clusters", "*", assets.BuiltinPolicyCSV, uPol, dRole, true)
require.True(t, ok)
})
t.Run("get certificates", func(t *testing.T) {
ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, dRole, true)
require.False(t, ok)
})
t.Run("get certificates by default role", func(t *testing.T) {
ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, "role:readonly", true)
require.True(t, ok)
})
t.Run("get certificates by default role without builtin policy", func(t *testing.T) {
ok := checkPolicy("role:user", "get", "certificates", "*", "", uPol, "role:readonly", true)
require.False(t, ok)
})
}

View File

@@ -1,384 +0,0 @@
package admin
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"testing"
"github.com/argoproj/argo-cd/v2/common"
utils "github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/settings"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func captureStdout(callback func()) (string, error) {
oldStdout := os.Stdout
oldStderr := os.Stderr
r, w, err := os.Pipe()
if err != nil {
return "", err
}
os.Stdout = w
defer func() {
os.Stdout = oldStdout
os.Stderr = oldStderr
}()
callback()
utils.Close(w)
data, err := ioutil.ReadAll(r)
if err != nil {
return "", err
}
return string(data), err
}
func newSettingsManager(data map[string]string) *settings.SettingsManager {
clientset := fake.NewSimpleClientset(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: common.ArgoCDConfigMapName,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: data,
}, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: common.ArgoCDSecretName,
},
Data: map[string][]byte{
"admin.password": []byte("test"),
"server.secretkey": []byte("test"),
},
})
return settings.NewSettingsManager(context.Background(), clientset, "default")
}
type fakeCmdContext struct {
mgr *settings.SettingsManager
// nolint:unused,structcheck
out bytes.Buffer
}
func newCmdContext(data map[string]string) *fakeCmdContext {
return &fakeCmdContext{mgr: newSettingsManager(data)}
}
func (ctx *fakeCmdContext) createSettingsManager() (*settings.SettingsManager, error) {
return ctx.mgr, nil
}
type validatorTestCase struct {
validator string
data map[string]string
containsSummary string
containsError string
}
func TestCreateSettingsManager(t *testing.T) {
f, closer, err := tempFile(`apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
data:
url: https://myargocd.com`)
if !assert.NoError(t, err) {
return
}
defer utils.Close(closer)
opts := settingsOpts{argocdCMPath: f}
settingsManager, err := opts.createSettingsManager()
if !assert.NoError(t, err) {
return
}
argoCDSettings, err := settingsManager.GetSettings()
if !assert.NoError(t, err) {
return
}
assert.Equal(t, "https://myargocd.com", argoCDSettings.URL)
}
func TestValidator(t *testing.T) {
testCases := map[string]validatorTestCase{
"General_SSOIsNotConfigured": {
validator: "general", containsSummary: "SSO is not configured",
},
"General_DexInvalidConfig": {
validator: "general",
data: map[string]string{"dex.config": "abcdefg"},
containsError: "invalid dex.config",
},
"General_OIDCConfigured": {
validator: "general",
data: map[string]string{
"url": "https://myargocd.com",
"oidc.config": `
name: Okta
issuer: https://dev-123456.oktapreview.com
clientID: aaaabbbbccccddddeee
clientSecret: aaaabbbbccccddddeee`,
},
containsSummary: "OIDC is configured",
},
"General_DexConfiguredMissingURL": {
validator: "general",
data: map[string]string{
"dex.config": `connectors:
- type: github
name: GitHub
config:
clientID: aabbccddeeff00112233
clientSecret: aabbccddeeff00112233`,
},
containsSummary: "Dex is configured ('url' field is missing)",
},
"Plugins_ValidConfig": {
validator: "plugins",
data: map[string]string{
"configManagementPlugins": `[{"name": "test1"}, {"name": "test2"}]`,
},
containsSummary: "2 plugins",
},
"Kustomize_ModifiedOptions": {
validator: "kustomize",
containsSummary: "default options",
},
"Kustomize_DefaultOptions": {
validator: "kustomize",
data: map[string]string{
"kustomize.buildOptions": "updated-options (2 versions)",
"kustomize.versions.v123": "binary-123",
"kustomize.versions.v321": "binary-321",
},
containsSummary: "updated-options",
},
"Repositories": {
validator: "repositories",
data: map[string]string{
"repositories": `
- url: https://github.com/argoproj/my-private-repository1
- url: https://github.com/argoproj/my-private-repository2`,
},
containsSummary: "2 repositories",
},
"Accounts": {
validator: "accounts",
data: map[string]string{
"accounts.user1": "apiKey, login",
"accounts.user2": "login",
"accounts.user3": "apiKey",
},
containsSummary: "4 accounts",
},
"ResourceOverrides": {
validator: "resource-overrides",
data: map[string]string{
"resource.customizations": `
admissionregistration.k8s.io/MutatingWebhookConfiguration:
ignoreDifferences: |
jsonPointers:
- /webhooks/0/clientConfig/caBundle`,
},
containsSummary: "2 resource overrides",
},
}
for name := range testCases {
tc := testCases[name]
t.Run(name, func(t *testing.T) {
validator, ok := validatorsByGroup[tc.validator]
if !assert.True(t, ok) {
return
}
summary, err := validator(newSettingsManager(tc.data))
if tc.containsSummary != "" {
assert.NoError(t, err)
assert.Contains(t, summary, tc.containsSummary)
} else if tc.containsError != "" {
if assert.Error(t, err) {
assert.Contains(t, err.Error(), tc.containsError)
}
}
})
}
}
const (
testDeploymentYAML = `apiVersion: v1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 0`
)
func tempFile(content string) (string, io.Closer, error) {
f, err := ioutil.TempFile("", "*.yaml")
if err != nil {
return "", nil, err
}
_, err = f.Write([]byte(content))
if err != nil {
_ = os.Remove(f.Name())
return "", nil, err
}
defer f.Close()
return f.Name(), utils.NewCloser(func() error {
return os.Remove(f.Name())
}), nil
}
func TestValidateSettingsCommand_NoErrors(t *testing.T) {
cmd := NewValidateSettingsCommand(newCmdContext(map[string]string{}))
out, err := captureStdout(func() {
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
for k := range validatorsByGroup {
assert.Contains(t, out, fmt.Sprintf("✅ %s", k))
}
}
func TestResourceOverrideIgnoreDifferences(t *testing.T) {
f, closer, err := tempFile(testDeploymentYAML)
if !assert.NoError(t, err) {
return
}
defer utils.Close(closer)
t.Run("NoOverridesConfigured", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{}))
out, err := captureStdout(func() {
cmd.SetArgs([]string{"ignore-differences", f})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
assert.Contains(t, out, "No overrides configured")
})
t.Run("DataIgnored", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
"resource.customizations": `apps/Deployment:
ignoreDifferences: |
jsonPointers:
- /spec`}))
out, err := captureStdout(func() {
cmd.SetArgs([]string{"ignore-differences", f})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
assert.Contains(t, out, "< spec:")
})
}
func TestResourceOverrideHealth(t *testing.T) {
f, closer, err := tempFile(testDeploymentYAML)
if !assert.NoError(t, err) {
return
}
defer utils.Close(closer)
t.Run("NoHealthAssessment", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
"resource.customizations": `apps/Deployment: {}`}))
out, err := captureStdout(func() {
cmd.SetArgs([]string{"health", f})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
assert.Contains(t, out, "Health script is not configured")
})
t.Run("HealthAssessmentConfigured", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
"resource.customizations": `apps/Deployment:
health.lua: |
return { status = "Progressing" }
`}))
out, err := captureStdout(func() {
cmd.SetArgs([]string{"health", f})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
assert.Contains(t, out, "Progressing")
})
}
func TestResourceOverrideAction(t *testing.T) {
f, closer, err := tempFile(testDeploymentYAML)
if !assert.NoError(t, err) {
return
}
defer utils.Close(closer)
t.Run("NoActions", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
"resource.customizations": `apps/Deployment: {}`}))
out, err := captureStdout(func() {
cmd.SetArgs([]string{"run-action", f, "test"})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
assert.Contains(t, out, "Actions are not configured")
})
t.Run("ActionConfigured", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
"resource.customizations": `apps/Deployment:
actions: |
discovery.lua: |
actions = {}
actions["resume"] = {["disabled"] = false}
actions["restart"] = {["disabled"] = false}
return actions
definitions:
- name: test
action.lua: |
obj.metadata.labels["test"] = 'updated'
return obj
`}))
out, err := captureStdout(func() {
cmd.SetArgs([]string{"run-action", f, "test"})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
assert.Contains(t, out, "test: updated")
out, err = captureStdout(func() {
cmd.SetArgs([]string{"list-actions", f})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
assert.Contains(t, out, `NAME ENABLED
restart false
resume false
`)
})
}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
data:
policy.csv: |
p, role:user, clusters, get, *, allow
p, role:user, clusters, get, https://kubernetes*, deny
p, role:user, projects, get, *, allow
p, role:user, applications, get, *, allow
p, role:user, applications, create, */*, allow
p, role:user, applications, delete, *, allow
p, role:user, applications, delete, */guestbook, deny
g, test, role:user
policy.default: role:unknown
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: argocd-rbac-cm
app.kubernetes.io/part-of: argocd
name: argocd-rbac-cm
namespace: argocd

View File

@@ -1,9 +0,0 @@
p, role:user, clusters, get, *, allow
p, role:user, clusters, get, https://kubernetes*, deny
p, role:user, projects, get, *, allow
p, role:user, applications, get, *, allow
p, role:user, applications, create, */*, allow
p, role:user, applications, delete, *, allow
p, role:user, applications, delete, */guestbook, deny
p, role:test, certificates, get, *, allow
g, test, role:user
1 p, role:user, clusters, get, *, allow
2 p, role:user, clusters, get, https://kubernetes*, deny
3 p, role:user, projects, get, *, allow
4 p, role:user, applications, get, *, allow
5 p, role:user, applications, create, */*, allow
6 p, role:user, applications, delete, *, allow
7 p, role:user, applications, delete, */guestbook, deny
8 p, role:test, certificates, get, *, allow
9 g, test, role:user

View File

@@ -1,787 +0,0 @@
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: admin
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- argoproj.io
resources:
- gateways
- gateways/finalizers
- sensors
- sensors/finalizers
- eventsources
- eventsources/finalizers
- eventbuses
- eventbuses/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- argoproj.io
resources:
- rollouts
- rollouts/scale
- experiments
- analysistemplates
- clusteranalysistemplates
- analysisruns
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- metrics.k8s.io
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- iammanager.keikoproj.io
resources:
- iamroles
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/attach
- pods/exec
- pods/portforward
- pods/proxy
- secrets
- services/proxy
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- impersonate
- apiGroups:
- ""
resources:
- pods
- pods/attach
- pods/exec
- pods/portforward
- pods/proxy
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- persistentvolumeclaims
- replicationcontrollers
- replicationcontrollers/scale
- secrets
- serviceaccounts
- services
- services/proxy
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- deployments/rollback
- deployments/scale
- replicasets
- replicasets/scale
- statefulsets
- statefulsets/scale
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- deployments/rollback
- deployments/scale
- ingresses
- networkpolicies
- replicasets
- replicasets/scale
- replicationcontrollers/scale
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- create
- delete
- deletecollection
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- delete
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- deletecollection
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- update
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- argoproj.io
resources:
- gateways
- gateways/finalizers
- sensors
- sensors/finalizers
- eventsources
- eventsources/finalizers
- eventbuses
- eventbuses/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- argoproj.io
resources:
- rollouts
- rollouts/scale
- experiments
- analysistemplates
- clusteranalysistemplates
- analysisruns
verbs:
- get
- list
- watch
- apiGroups:
- ""
resourceNames:
- prometheus-k8s-prometheus-1
- prometheus-k8s-prometheus-0
resources:
- pods/portforward
verbs:
- create
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- destinationrules
- serviceentries
- envoyfilters
- gateways
- sidecars
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- persistentvolumeclaims
- pods
- replicationcontrollers
- replicationcontrollers/scale
- serviceaccounts
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- bindings
- events
- limitranges
- namespaces/status
- pods/log
- pods/status
- replicationcontrollers/status
- resourcequotas
- resourcequotas/status
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- deployments/scale
- replicasets
- replicasets/scale
- statefulsets
- statefulsets/scale
verbs:
- get
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- deployments/scale
- ingresses
- networkpolicies
- replicasets
- replicasets/scale
- replicationcontrollers/scale
verbs:
- get
- list
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- controllerrevisions
verbs:
- get
- apiGroups:
- apps
resources:
- controllerrevisions
verbs:
- list
- apiGroups:
- apps
resources:
- controllerrevisions
verbs:
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- list
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- get
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- get
- apiGroups:
- ""
resources:
- services/status
verbs:
- list
- apiGroups:
- ""
resources:
- services/status
verbs:
- watch
- apiGroups:
- apps
resources:
- daemonsets/status
verbs:
- get
- apiGroups:
- apps
resources:
- daemonsets/status
verbs:
- list
- apiGroups:
- apps
resources:
- daemonsets/status
verbs:
- watch
- apiGroups:
- apps
resources:
- deployments/status
verbs:
- get
- apiGroups:
- apps
resources:
- deployments/status
verbs:
- list
- apiGroups:
- apps
resources:
- deployments/status
verbs:
- watch
- apiGroups:
- apps
resources:
- replicasets/status
verbs:
- get
- apiGroups:
- apps
resources:
- replicasets/status
verbs:
- list
- apiGroups:
- apps
resources:
- replicasets/status
verbs:
- watch
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- get
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- list
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers/status
verbs:
- get
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers/status
verbs:
- list
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers/status
verbs:
- watch
- apiGroups:
- batch
resources:
- cronjobs/status
verbs:
- get
- apiGroups:
- batch
resources:
- cronjobs/status
verbs:
- list
- apiGroups:
- batch
resources:
- cronjobs/status
verbs:
- watch
- apiGroups:
- batch
resources:
- jobs/status
verbs:
- get
- apiGroups:
- batch
resources:
- jobs/status
verbs:
- list
- apiGroups:
- batch
resources:
- jobs/status
verbs:
- watch
- apiGroups:
- extensions
resources:
- daemonsets/status
verbs:
- get
- apiGroups:
- extensions
resources:
- daemonsets/status
verbs:
- list
- apiGroups:
- extensions
resources:
- daemonsets/status
verbs:
- watch
- apiGroups:
- extensions
resources:
- deployments/status
verbs:
- get
- apiGroups:
- extensions
resources:
- deployments/status
verbs:
- list
- apiGroups:
- extensions
resources:
- deployments/status
verbs:
- watch
- apiGroups:
- extensions
resources:
- ingresses/status
verbs:
- get
- apiGroups:
- extensions
resources:
- ingresses/status
verbs:
- list
- apiGroups:
- extensions
resources:
- ingresses/status
verbs:
- watch
- apiGroups:
- extensions
resources:
- replicasets/status
verbs:
- get
- apiGroups:
- extensions
resources:
- replicasets/status
verbs:
- list
- apiGroups:
- extensions
resources:
- replicasets/status
verbs:
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets/status
verbs:
- get
- apiGroups:
- policy
resources:
- poddisruptionbudgets/status
verbs:
- list
- apiGroups:
- policy
resources:
- poddisruptionbudgets/status
verbs:
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- get
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- list
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- monitoring.coreos.com
resources:
- prometheusrules
verbs:
- get
- watch
- list
- update
- delete
- create
- apiGroups:
- hpa.orkaproj.io
resources:
- hpaalgoes
verbs:
- get
- watch
- list
- update
- delete
- create
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- destinationrules
- serviceentries
- envoyfilters
- gateways
- sidecars
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
- apiGroups:
- authorization.k8s.io
resources:
- localsubjectaccessreviews
verbs:
- create
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch

File diff suppressed because it is too large Load Diff

View File

@@ -1,174 +0,0 @@
package commands
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"text/tabwriter"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
applicationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
)
type DisplayedAction struct {
Group string
Kind string
Name string
Action string
Disabled bool
}
// NewApplicationResourceActionsCommand returns a new instance of an `argocd app actions` command
func NewApplicationResourceActionsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "actions",
Short: "Manage Resource actions",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
command.AddCommand(NewApplicationResourceActionsListCommand(clientOpts))
command.AddCommand(NewApplicationResourceActionsRunCommand(clientOpts))
return command
}
// NewApplicationResourceActionsListCommand returns a new instance of an `argocd app actions list` command
func NewApplicationResourceActionsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var namespace string
var kind string
var group string
var resourceName string
var output string
var command = &cobra.Command{
Use: "list APPNAME",
Short: "Lists available actions on a resource",
}
command.Run = func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName := args[0]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer io.Close(conn)
ctx := context.Background()
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName})
errors.CheckError(err)
filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, true)
var availableActions []DisplayedAction
for i := range filteredObjects {
obj := filteredObjects[i]
gvk := obj.GroupVersionKind()
availActionsForResource, err := appIf.ListResourceActions(ctx, &applicationpkg.ApplicationResourceRequest{
Name: &appName,
Namespace: obj.GetNamespace(),
ResourceName: obj.GetName(),
Group: gvk.Group,
Kind: gvk.Kind,
})
errors.CheckError(err)
for _, action := range availActionsForResource.Actions {
displayAction := DisplayedAction{
Group: gvk.Group,
Kind: gvk.Kind,
Name: obj.GetName(),
Action: action.Name,
Disabled: action.Disabled,
}
availableActions = append(availableActions, displayAction)
}
}
switch output {
case "yaml":
yamlBytes, err := yaml.Marshal(availableActions)
errors.CheckError(err)
fmt.Println(string(yamlBytes))
case "json":
jsonBytes, err := json.MarshalIndent(availableActions, "", " ")
errors.CheckError(err)
fmt.Println(string(jsonBytes))
case "":
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "GROUP\tKIND\tNAME\tACTION\tDISABLED\n")
for _, action := range availableActions {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", action.Group, action.Kind, action.Name, action.Action, strconv.FormatBool(action.Disabled))
}
_ = w.Flush()
}
}
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource")
command.Flags().StringVar(&kind, "kind", "", "Kind")
command.Flags().StringVar(&group, "group", "", "Group")
command.Flags().StringVar(&namespace, "namespace", "", "Namespace")
command.Flags().StringVarP(&output, "out", "o", "", "Output format. One of: yaml, json")
return command
}
// NewApplicationResourceActionsRunCommand returns a new instance of an `argocd app actions run` command
func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var namespace string
var resourceName string
var kind string
var group string
var all bool
var command = &cobra.Command{
Use: "run APPNAME ACTION",
Short: "Runs an available action on resource(s)",
}
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource")
command.Flags().StringVar(&namespace, "namespace", "", "Namespace")
command.Flags().StringVar(&kind, "kind", "", "Kind")
command.Flags().StringVar(&group, "group", "", "Group")
errors.CheckError(command.MarkFlagRequired("kind"))
command.Flags().BoolVar(&all, "all", false, "Indicates whether to run the action on multiple matching resources")
command.Run = func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName := args[0]
actionName := args[1]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer io.Close(conn)
ctx := context.Background()
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName})
errors.CheckError(err)
filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, all)
var resGroup = filteredObjects[0].GroupVersionKind().Group
for i := range filteredObjects[1:] {
if filteredObjects[i].GroupVersionKind().Group != resGroup {
log.Fatal("Ambiguous resource group. Use flag --group to specify resource group explicitly.")
}
}
for i := range filteredObjects {
obj := filteredObjects[i]
gvk := obj.GroupVersionKind()
objResourceName := obj.GetName()
_, err := appIf.RunResourceAction(context.Background(), &applicationpkg.ResourceActionRunRequest{
Name: &appName,
Namespace: obj.GetNamespace(),
ResourceName: objResourceName,
Group: gvk.Group,
Kind: gvk.Kind,
Action: actionName,
})
errors.CheckError(err)
}
}
return command
}

View File

@@ -1,163 +0,0 @@
package commands
import (
"testing"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
func TestFindRevisionHistoryWithoutPassedId(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
histories = append(histories, v1alpha1.RevisionHistory{ID: 1})
histories = append(histories, v1alpha1.RevisionHistory{ID: 2})
histories = append(histories, v1alpha1.RevisionHistory{ID: 3})
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, -1)
if err != nil {
t.Fatal("Find revision history should fail without errors")
}
if history == nil {
t.Fatal("History should be found")
}
}
func TestFindRevisionHistoryWithoutPassedIdAndEmptyHistoryList(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, -1)
if err == nil {
t.Fatal("Find revision history should fail with errors")
}
if history != nil {
t.Fatal("History should be empty")
}
if err.Error() != "Application '' should have at least two successful deployments" {
t.Fatal("Find revision history should fail with correct error message")
}
}
func TestFindRevisionHistoryWithPassedId(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
histories = append(histories, v1alpha1.RevisionHistory{ID: 1})
histories = append(histories, v1alpha1.RevisionHistory{ID: 2})
histories = append(histories, v1alpha1.RevisionHistory{ID: 3, Revision: "123"})
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, 3)
if err != nil {
t.Fatal("Find revision history should fail without errors")
}
if history == nil {
t.Fatal("History should be found")
}
if history.Revision != "123" {
t.Fatal("Failed to find correct history with correct revision")
}
}
func TestFindRevisionHistoryWithPassedIdThatNotExist(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
histories = append(histories, v1alpha1.RevisionHistory{ID: 1})
histories = append(histories, v1alpha1.RevisionHistory{ID: 2})
histories = append(histories, v1alpha1.RevisionHistory{ID: 3, Revision: "123"})
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, 4)
if err == nil {
t.Fatal("Find revision history should fail with errors")
}
if history != nil {
t.Fatal("History should be not found")
}
if err.Error() != "Application '' does not have deployment id '4' in history\n" {
t.Fatal("Find revision history should fail with correct error message")
}
}

View File

@@ -1,321 +0,0 @@
package commands
import (
"context"
"crypto/x509"
"fmt"
"os"
"sort"
"strings"
"text/tabwriter"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
certificatepkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/certificate"
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
certutil "github.com/argoproj/argo-cd/v2/util/cert"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
)
// NewCertCommand returns a new instance of an `argocd repo` command
func NewCertCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "cert",
Short: "Manage repository certificates and SSH known hosts entries",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
Example: ` # Add a TLS certificate for cd.example.com to ArgoCD cert store from a file
argocd cert add-tls --from ~/mycert.pem cd.example.com
# Add a TLS certificate for cd.example.com to ArgoCD via stdin
cat ~/mycert.pem | argocd cert add-tls cd.example.com
# Add SSH known host entries for cd.example.com to ArgoCD by scanning host
ssh-keyscan cd.example.com | argocd cert add-ssh --batch
# List all known TLS certificates
argocd cert list --cert-type https
# Remove all TLS certificates for cd.example.com
argocd cert rm --cert-type https cd.example.com
# Remove all certificates and SSH known host entries for cd.example.com
argocd cert rm cd.example.com
`,
}
command.AddCommand(NewCertAddSSHCommand(clientOpts))
command.AddCommand(NewCertAddTLSCommand(clientOpts))
command.AddCommand(NewCertListCommand(clientOpts))
command.AddCommand(NewCertRemoveCommand(clientOpts))
return command
}
func NewCertAddTLSCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
fromFile string
upsert bool
)
var command = &cobra.Command{
Use: "add-tls SERVERNAME",
Short: "Add TLS certificate data for connecting to repository server SERVERNAME",
Run: func(c *cobra.Command, args []string) {
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer io.Close(conn)
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
var certificateArray []string
var err error
if fromFile != "" {
fmt.Printf("Reading TLS certificate data in PEM format from '%s'\n", fromFile)
certificateArray, err = certutil.ParseTLSCertificatesFromPath(fromFile)
} else {
fmt.Println("Enter TLS certificate data in PEM format. Press CTRL-D when finished.")
certificateArray, err = certutil.ParseTLSCertificatesFromStream(os.Stdin)
}
errors.CheckError(err)
certificateList := make([]appsv1.RepositoryCertificate, 0)
subjectMap := make(map[string]*x509.Certificate)
for _, entry := range certificateArray {
// We want to make sure to only send valid certificate data to the
// server, so we decode the certificate into X509 structure before
// further processing it.
x509cert, err := certutil.DecodePEMCertificateToX509(entry)
errors.CheckError(err)
// TODO: We need a better way to detect duplicates sent in the stream,
// maybe by using fingerprints? For now, no two certs with the same
// subject may be sent.
if subjectMap[x509cert.Subject.String()] != nil {
fmt.Printf("ERROR: Cert with subject '%s' already seen in the input stream.\n", x509cert.Subject.String())
continue
} else {
subjectMap[x509cert.Subject.String()] = x509cert
}
}
serverName := args[0]
if len(certificateArray) > 0 {
certificateList = append(certificateList, appsv1.RepositoryCertificate{
ServerName: serverName,
CertType: "https",
CertData: []byte(strings.Join(certificateArray, "\n")),
})
certificates, err := certIf.CreateCertificate(context.Background(), &certificatepkg.RepositoryCertificateCreateRequest{
Certificates: &appsv1.RepositoryCertificateList{
Items: certificateList,
},
Upsert: upsert,
})
errors.CheckError(err)
fmt.Printf("Created entry with %d PEM certificates for repository server %s\n", len(certificates.Items), serverName)
} else {
fmt.Printf("No valid certificates have been detected in the stream.\n")
}
},
}
command.Flags().StringVar(&fromFile, "from", "", "read TLS certificate data from file (default is to read from stdin)")
command.Flags().BoolVar(&upsert, "upsert", false, "Replace existing TLS certificate if certificate is different in input")
return command
}
// NewCertAddCommand returns a new instance of an `argocd cert add` command
func NewCertAddSSHCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
fromFile string
batchProcess bool
upsert bool
certificates []appsv1.RepositoryCertificate
)
var command = &cobra.Command{
Use: "add-ssh --batch",
Short: "Add SSH known host entries for repository servers",
Run: func(c *cobra.Command, args []string) {
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer io.Close(conn)
var sshKnownHostsLists []string
var err error
// --batch is a flag, but it is mandatory for now.
if batchProcess {
if fromFile != "" {
fmt.Printf("Reading SSH known hosts entries from file '%s'\n", fromFile)
sshKnownHostsLists, err = certutil.ParseSSHKnownHostsFromPath(fromFile)
} else {
fmt.Println("Enter SSH known hosts entries, one per line. Press CTRL-D when finished.")
sshKnownHostsLists, err = certutil.ParseSSHKnownHostsFromStream(os.Stdin)
}
} else {
err = fmt.Errorf("You need to specify --batch or specify --help for usage instructions")
}
errors.CheckError(err)
if len(sshKnownHostsLists) == 0 {
errors.CheckError(fmt.Errorf("No valid SSH known hosts data found."))
}
for _, knownHostsEntry := range sshKnownHostsLists {
_, certSubType, certData, err := certutil.TokenizeSSHKnownHostsEntry(knownHostsEntry)
errors.CheckError(err)
hostnameList, _, err := certutil.KnownHostsLineToPublicKey(knownHostsEntry)
errors.CheckError(err)
// Each key could be valid for multiple hostnames
for _, hostname := range hostnameList {
certificate := appsv1.RepositoryCertificate{
ServerName: hostname,
CertType: "ssh",
CertSubType: certSubType,
CertData: certData,
}
certificates = append(certificates, certificate)
}
}
certList := &appsv1.RepositoryCertificateList{Items: certificates}
response, err := certIf.CreateCertificate(context.Background(), &certificatepkg.RepositoryCertificateCreateRequest{
Certificates: certList,
Upsert: upsert,
})
errors.CheckError(err)
fmt.Printf("Successfully created %d SSH known host entries\n", len(response.Items))
},
}
command.Flags().StringVar(&fromFile, "from", "", "Read SSH known hosts data from file (default is to read from stdin)")
command.Flags().BoolVar(&batchProcess, "batch", false, "Perform batch processing by reading in SSH known hosts data (mandatory flag)")
command.Flags().BoolVar(&upsert, "upsert", false, "Replace existing SSH server public host keys if key is different in input")
return command
}
// NewCertRemoveCommand returns a new instance of an `argocd cert rm` command
func NewCertRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
certType string
certSubType string
certQuery certificatepkg.RepositoryCertificateQuery
)
var command = &cobra.Command{
Use: "rm REPOSERVER",
Short: "Remove certificate of TYPE for REPOSERVER",
Run: func(c *cobra.Command, args []string) {
if len(args) < 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer io.Close(conn)
hostNamePattern := args[0]
// Prevent the user from specifying a wildcard as hostname as precaution
// measure -- the user could still use "?*" or any other pattern to
// remove all certificates, but it's less likely that it happens by
// accident.
if hostNamePattern == "*" {
err := fmt.Errorf("A single wildcard is not allowed as REPOSERVER name.")
errors.CheckError(err)
}
certQuery = certificatepkg.RepositoryCertificateQuery{
HostNamePattern: hostNamePattern,
CertType: certType,
CertSubType: certSubType,
}
removed, err := certIf.DeleteCertificate(context.Background(), &certQuery)
errors.CheckError(err)
if len(removed.Items) > 0 {
for _, cert := range removed.Items {
fmt.Printf("Removed cert for '%s' of type '%s' (subtype '%s')\n", cert.ServerName, cert.CertType, cert.CertSubType)
}
} else {
fmt.Println("No certificates were removed (none matched the given patterns)")
}
},
}
command.Flags().StringVar(&certType, "cert-type", "", "Only remove certs of given type (ssh, https)")
command.Flags().StringVar(&certSubType, "cert-sub-type", "", "Only remove certs of given sub-type (only for ssh)")
return command
}
// NewCertListCommand returns a new instance of an `argocd cert rm` command
func NewCertListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
certType string
hostNamePattern string
sortOrder string
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured certificates",
Run: func(c *cobra.Command, args []string) {
if certType != "" {
switch certType {
case "ssh":
case "https":
default:
fmt.Println("cert-type must be either ssh or https")
os.Exit(1)
}
}
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer io.Close(conn)
certificates, err := certIf.ListCertificates(context.Background(), &certificatepkg.RepositoryCertificateQuery{HostNamePattern: hostNamePattern, CertType: certType})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(certificates.Items, output, false)
errors.CheckError(err)
case "wide", "":
printCertTable(certificates.Items, sortOrder)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
command.Flags().StringVar(&sortOrder, "sort", "", "set display sort order for output format wide. One of: hostname|type")
command.Flags().StringVar(&certType, "cert-type", "", "only list certificates of given type, valid: 'ssh','https'")
command.Flags().StringVar(&hostNamePattern, "hostname-pattern", "", "only list certificates for hosts matching given glob-pattern")
return command
}
// Print table of certificate info
func printCertTable(certs []appsv1.RepositoryCertificate, sortOrder string) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "HOSTNAME\tTYPE\tSUBTYPE\tINFO\n")
if sortOrder == "hostname" || sortOrder == "" {
sort.Slice(certs, func(i, j int) bool {
return certs[i].ServerName < certs[j].ServerName
})
} else if sortOrder == "type" {
sort.Slice(certs, func(i, j int) bool {
return certs[i].CertType < certs[j].CertType
})
}
for _, c := range certs {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.ServerName, c.CertType, c.CertSubType, c.CertInfo)
}
_ = w.Flush()
}

View File

@@ -3,29 +3,23 @@ package commands
import (
"context"
"fmt"
"io/ioutil"
"os"
"regexp"
"sort"
"strings"
"text/tabwriter"
"github.com/mattn/go-isatty"
"k8s.io/client-go/kubernetes"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/text/label"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/util"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/common"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
clusterpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster"
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/clusterauth"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
)
// NewClusterCommand returns a new instance of an `argocd cluster` command
@@ -37,63 +31,37 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
c.HelpFunc()(c, args)
os.Exit(1)
},
Example: ` # List all known clusters in JSON format:
argocd cluster list -o json
# Add a target cluster configuration to ArgoCD. The context must exist in your kubectl config:
argocd cluster add example-cluster
# Get specific details about a cluster in plain text (wide) format:
argocd cluster get example-cluster -o wide
# Remove a target cluster context from ArgoCD
argocd cluster rm example-cluster
`,
}
command.AddCommand(NewClusterAddCommand(clientOpts, pathOpts))
command.AddCommand(NewClusterGetCommand(clientOpts))
command.AddCommand(NewClusterListCommand(clientOpts))
command.AddCommand(NewClusterRemoveCommand(clientOpts))
command.AddCommand(NewClusterRotateAuthCommand(clientOpts))
return command
}
// NewClusterAddCommand returns a new instance of an `argocd cluster add` command
func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
var (
clusterOpts cmdutil.ClusterOptions
skipConfirmation bool
labels []string
annotations []string
inCluster bool
upsert bool
)
var command = &cobra.Command{
Use: "add CONTEXT",
Use: "add",
Short: fmt.Sprintf("%s cluster add CONTEXT", cliName),
Run: func(c *cobra.Command, args []string) {
var configAccess clientcmd.ConfigAccess = pathOpts
if len(args) == 0 {
log.Error("Choose a context name from:")
cmdutil.PrintKubeContexts(configAccess)
printKubeContexts(configAccess)
os.Exit(1)
}
config, err := configAccess.GetStartingConfig()
errors.CheckError(err)
contextName := args[0]
clstContext := config.Contexts[contextName]
clstContext := config.Contexts[args[0]]
if clstContext == nil {
log.Fatalf("Context %s does not exist in kubeconfig", contextName)
log.Fatalf("Context %s does not exist in kubeconfig", args[0])
}
isTerminal := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
if isTerminal && !skipConfirmation {
message := fmt.Sprintf("WARNING: This will create a service account `argocd-manager` on the cluster referenced by context `%s` with full cluster level admin privileges. Do you want to continue [y/N]? ", contextName)
if !cli.AskToProceed(message) {
os.Exit(1)
}
}
overrides := clientcmd.ConfigOverrides{
Context: *clstContext,
}
@@ -101,267 +69,166 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
managerBearerToken := ""
var awsAuthConf *argoappv1.AWSAuthConfig
var execProviderConf *argoappv1.ExecProviderConfig
if clusterOpts.AwsClusterName != "" {
awsAuthConf = &argoappv1.AWSAuthConfig{
ClusterName: clusterOpts.AwsClusterName,
RoleARN: clusterOpts.AwsRoleArn,
}
} else if clusterOpts.ExecProviderCommand != "" {
execProviderConf = &argoappv1.ExecProviderConfig{
Command: clusterOpts.ExecProviderCommand,
Args: clusterOpts.ExecProviderArgs,
Env: clusterOpts.ExecProviderEnv,
APIVersion: clusterOpts.ExecProviderAPIVersion,
InstallHint: clusterOpts.ExecProviderInstallHint,
}
} else {
// Install RBAC resources for managing the cluster
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
if clusterOpts.ServiceAccount != "" {
managerBearerToken, err = clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount, common.BearerTokenTimeout)
} else {
isTerminal := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
if isTerminal && !skipConfirmation {
message := fmt.Sprintf("WARNING: This will create a service account `argocd-manager` on the cluster referenced by context `%s` with full cluster level admin privileges. Do you want to continue [y/N]? ", contextName)
if !cli.AskToProceed(message) {
os.Exit(1)
}
}
managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, clusterOpts.SystemNamespace, clusterOpts.Namespaces, common.BearerTokenTimeout)
}
errors.CheckError(err)
}
labelsMap, err := label.Parse(labels)
errors.CheckError(err)
annotationsMap, err := label.Parse(annotations)
errors.CheckError(err)
// Install RBAC resources for managing the cluster
managerBearerToken := common.InstallClusterManagerRBAC(conf)
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer io.Close(conn)
if clusterOpts.Name != "" {
contextName = clusterOpts.Name
defer util.Close(conn)
clst := NewCluster(args[0], conf, managerBearerToken)
if inCluster {
clst.Server = common.KubernetesInternalAPIServerAddr
}
clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, managerBearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap)
if clusterOpts.InCluster {
clst.Server = argoappv1.KubernetesInternalAPIServerAddr
}
if clusterOpts.Shard >= 0 {
clst.Shard = &clusterOpts.Shard
}
if clusterOpts.Project != "" {
clst.Project = clusterOpts.Project
}
clstCreateReq := clusterpkg.ClusterCreateRequest{
clstCreateReq := cluster.ClusterCreateRequest{
Cluster: clst,
Upsert: clusterOpts.Upsert,
Upsert: upsert,
}
_, err = clusterIf.Create(context.Background(), &clstCreateReq)
clst, err = clusterIf.Create(context.Background(), &clstCreateReq)
errors.CheckError(err)
fmt.Printf("Cluster '%s' added\n", clst.Server)
fmt.Printf("Cluster '%s' added\n", clst.Name)
},
}
command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file")
command.Flags().BoolVar(&clusterOpts.Upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs")
command.Flags().StringVar(&clusterOpts.ServiceAccount, "service-account", "", fmt.Sprintf("System namespace service account to use for kubernetes resource management. If not set then default \"%s\" SA will be created", clusterauth.ArgoCDManagerServiceAccount))
command.Flags().StringVar(&clusterOpts.SystemNamespace, "system-namespace", common.DefaultSystemNamespace, "Use different system namespace")
command.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip explicit confirmation")
command.Flags().StringArrayVar(&labels, "label", nil, "Set metadata labels (e.g. --label key=value)")
command.Flags().StringArrayVar(&annotations, "annotation", nil, "Set metadata annotations (e.g. --annotation key=value)")
cmdutil.AddClusterFlags(command, &clusterOpts)
command.Flags().BoolVar(&inCluster, "in-cluster", false, "Indicates ArgoCD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs")
return command
}
func printKubeContexts(ca clientcmd.ConfigAccess) {
config, err := ca.GetStartingConfig()
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
defer func() { _ = w.Flush() }()
columnNames := []string{"CURRENT", "NAME", "CLUSTER", "SERVER"}
_, err = fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t"))
errors.CheckError(err)
// sort names so output is deterministic
contextNames := make([]string, 0)
for name := range config.Contexts {
contextNames = append(contextNames, name)
}
sort.Strings(contextNames)
if config.Clusters == nil {
return
}
for _, name := range contextNames {
// ignore malformed kube config entries
context := config.Contexts[name]
if context == nil {
continue
}
cluster := config.Clusters[context.Cluster]
if cluster == nil {
continue
}
prefix := " "
if config.CurrentContext == name {
prefix = "*"
}
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", prefix, name, context.Cluster, cluster.Server)
errors.CheckError(err)
}
}
func NewCluster(name string, conf *rest.Config, managerBearerToken string) *argoappv1.Cluster {
tlsClientConfig := argoappv1.TLSClientConfig{
Insecure: conf.TLSClientConfig.Insecure,
ServerName: conf.TLSClientConfig.ServerName,
CertData: conf.TLSClientConfig.CertData,
KeyData: conf.TLSClientConfig.KeyData,
CAData: conf.TLSClientConfig.CAData,
}
if len(conf.TLSClientConfig.CertData) == 0 && conf.TLSClientConfig.CertFile != "" {
data, err := ioutil.ReadFile(conf.TLSClientConfig.CertFile)
errors.CheckError(err)
tlsClientConfig.CertData = data
}
if len(conf.TLSClientConfig.KeyData) == 0 && conf.TLSClientConfig.KeyFile != "" {
data, err := ioutil.ReadFile(conf.TLSClientConfig.KeyFile)
errors.CheckError(err)
tlsClientConfig.KeyData = data
}
if len(conf.TLSClientConfig.CAData) == 0 && conf.TLSClientConfig.CAFile != "" {
data, err := ioutil.ReadFile(conf.TLSClientConfig.CAFile)
errors.CheckError(err)
tlsClientConfig.CAData = data
}
clst := argoappv1.Cluster{
Server: conf.Host,
Name: name,
Config: argoappv1.ClusterConfig{
BearerToken: managerBearerToken,
TLSClientConfig: tlsClientConfig,
},
}
return &clst
}
// NewClusterGetCommand returns a new instance of an `argocd cluster get` command
func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "get SERVER/NAME",
Use: "get",
Short: "Get cluster information",
Example: `argocd cluster get https://12.34.567.89
argocd cluster get in-cluster`,
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer io.Close(conn)
clusters := make([]argoappv1.Cluster, 0)
for _, clusterSelector := range args {
clst, err := clusterIf.Get(context.Background(), getQueryBySelector(clusterSelector))
defer util.Close(conn)
for _, clusterName := range args {
clst, err := clusterIf.Get(context.Background(), &cluster.ClusterQuery{Server: clusterName})
errors.CheckError(err)
clusters = append(clusters, *clst)
}
switch output {
case "yaml", "json":
err := PrintResourceList(clusters, output, true)
yamlBytes, err := yaml.Marshal(clst)
errors.CheckError(err)
case "wide", "":
printClusterDetails(clusters)
case "server":
printClusterServers(clusters)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
fmt.Printf("%v", string(yamlBytes))
}
},
}
// we have yaml as default to not break backwards-compatibility
command.Flags().StringVarP(&output, "output", "o", "yaml", "Output format. One of: json|yaml|wide|server")
return command
}
func strWithDefault(value string, def string) string {
if value == "" {
return def
}
return value
}
func formatNamespaces(cluster argoappv1.Cluster) string {
if len(cluster.Namespaces) == 0 {
return "all namespaces"
}
return strings.Join(cluster.Namespaces, ", ")
}
func printClusterDetails(clusters []argoappv1.Cluster) {
for _, cluster := range clusters {
fmt.Printf("Cluster information\n\n")
fmt.Printf(" Server URL: %s\n", cluster.Server)
fmt.Printf(" Server Name: %s\n", strWithDefault(cluster.Name, "-"))
fmt.Printf(" Server Version: %s\n", cluster.ServerVersion)
fmt.Printf(" Namespaces: %s\n", formatNamespaces(cluster))
fmt.Printf("\nTLS configuration\n\n")
fmt.Printf(" Client cert: %v\n", string(cluster.Config.TLSClientConfig.CertData) != "")
fmt.Printf(" Cert validation: %v\n", !cluster.Config.TLSClientConfig.Insecure)
fmt.Printf("\nAuthentication\n\n")
fmt.Printf(" Basic authentication: %v\n", cluster.Config.Username != "")
fmt.Printf(" oAuth authentication: %v\n", cluster.Config.BearerToken != "")
fmt.Printf(" AWS authentication: %v\n", cluster.Config.AWSAuthConfig != nil)
fmt.Println()
}
}
// NewClusterRemoveCommand returns a new instance of an `argocd cluster list` command
func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rm SERVER",
Short: "Remove cluster credentials",
Example: `argocd cluster rm https://12.34.567.89`,
Use: "rm",
Short: "Remove cluster credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer io.Close(conn)
// clientset, err := kubernetes.NewForConfig(conf)
// errors.CheckError(err)
defer util.Close(conn)
for _, clusterName := range args {
// TODO(jessesuen): find the right context and remove manager RBAC artifacts
// err := clusterauth.UninstallClusterManagerRBAC(clientset)
// errors.CheckError(err)
_, err := clusterIf.Delete(context.Background(), &clusterpkg.ClusterQuery{Server: clusterName})
// common.UninstallClusterManagerRBAC(conf)
_, err := clusterIf.Delete(context.Background(), &cluster.ClusterQuery{Server: clusterName})
errors.CheckError(err)
fmt.Printf("Cluster '%s' removed\n", clusterName)
}
},
}
return command
}
// Print table of cluster information
func printClusterTable(clusters []argoappv1.Cluster) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "SERVER\tNAME\tVERSION\tSTATUS\tMESSAGE\tPROJECT\n")
for _, c := range clusters {
server := c.Server
if len(c.Namespaces) > 0 {
server = fmt.Sprintf("%s (%d namespaces)", c.Server, len(c.Namespaces))
}
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", server, c.Name, c.ServerVersion, c.ConnectionState.Status, c.ConnectionState.Message, c.Project)
}
_ = w.Flush()
}
// Returns cluster query for getting cluster depending on the cluster selector
func getQueryBySelector(clusterSelector string) *clusterpkg.ClusterQuery {
var query clusterpkg.ClusterQuery
isServer, err := regexp.MatchString(`^https?://`, clusterSelector)
if isServer || err != nil {
query.Server = clusterSelector
} else {
query.Name = clusterSelector
}
return &query
}
// Print list of cluster servers
func printClusterServers(clusters []argoappv1.Cluster) {
for _, c := range clusters {
fmt.Println(c.Server)
}
}
// NewClusterListCommand returns a new instance of an `argocd cluster rm` command
func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured clusters",
Run: func(c *cobra.Command, args []string) {
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer io.Close(conn)
clusters, err := clusterIf.List(context.Background(), &clusterpkg.ClusterQuery{})
defer util.Close(conn)
clusters, err := clusterIf.List(context.Background(), &cluster.ClusterQuery{})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(clusters.Items, output, false)
errors.CheckError(err)
case "server":
printClusterServers(clusters.Items)
case "wide", "":
printClusterTable(clusters.Items)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "SERVER\tNAME\tSTATUS\tMESSAGE\n")
for _, c := range clusters.Items {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.Server, c.Name, c.ConnectionState.Status, c.ConnectionState.Message)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|server")
return command
}
// NewClusterRotateAuthCommand returns a new instance of an `argocd cluster rotate-auth` command
func NewClusterRotateAuthCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rotate-auth SERVER",
Short: fmt.Sprintf("%s cluster rotate-auth SERVER", cliName),
Example: fmt.Sprintf("%s cluster rotate-auth https://12.34.567.89", cliName),
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer io.Close(conn)
clusterQuery := clusterpkg.ClusterQuery{
Server: args[0],
}
_, err := clusterIf.RotateAuth(context.Background(), &clusterQuery)
errors.CheckError(err)
fmt.Printf("Cluster '%s' rotated auth\n", clusterQuery.Server)
_ = w.Flush()
},
}
return command

View File

@@ -1,47 +0,0 @@
package commands
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/stretchr/testify/assert"
)
func Test_getQueryBySelector(t *testing.T) {
query := getQueryBySelector("my-cluster")
assert.Equal(t, query.Name, "my-cluster")
assert.Equal(t, query.Server, "")
query = getQueryBySelector("http://my-server")
assert.Equal(t, query.Name, "")
assert.Equal(t, query.Server, "http://my-server")
query = getQueryBySelector("https://my-server")
assert.Equal(t, query.Name, "")
assert.Equal(t, query.Server, "https://my-server")
}
func Test_printClusterTable(t *testing.T) {
printClusterTable([]v1alpha1.Cluster{
{
Server: "my-server",
Name: "my-name",
Config: v1alpha1.ClusterConfig{
Username: "my-username",
Password: "my-password",
BearerToken: "my-bearer-token",
TLSClientConfig: v1alpha1.TLSClientConfig{},
AWSAuthConfig: nil,
},
ConnectionState: v1alpha1.ConnectionState{
Status: "my-status",
Message: "my-message",
ModifiedAt: &metav1.Time{},
},
ServerVersion: "my-version",
},
})
}

View File

@@ -1,72 +1,5 @@
package commands
import (
"encoding/json"
"fmt"
"reflect"
"github.com/ghodss/yaml"
)
const (
cliName = "argocd"
// DefaultSSOLocalPort is the localhost port to listen on for the temporary web server performing
// the OAuth2 login flow.
DefaultSSOLocalPort = 8085
)
// PrintResource prints a single resource in YAML or JSON format to stdout according to the output format
func PrintResource(resource interface{}, output string) error {
switch output {
case "json":
jsonBytes, err := json.MarshalIndent(resource, "", " ")
if err != nil {
return err
}
fmt.Println(string(jsonBytes))
case "yaml":
yamlBytes, err := yaml.Marshal(resource)
if err != nil {
return err
}
fmt.Print(string(yamlBytes))
default:
return fmt.Errorf("unknown output format: %s", output)
}
return nil
}
// PrintResourceList marshals & prints a list of resources to stdout according to the output format
func PrintResourceList(resources interface{}, output string, single bool) error {
kt := reflect.ValueOf(resources)
// Sometimes, we want to marshal the first resource of a slice or array as single item
if kt.Kind() == reflect.Slice || kt.Kind() == reflect.Array {
if single && kt.Len() == 1 {
return PrintResource(kt.Index(0).Interface(), output)
}
// If we have a zero len list, prevent printing "null"
if kt.Len() == 0 {
return PrintResource([]string{}, output)
}
}
switch output {
case "json":
jsonBytes, err := json.MarshalIndent(resources, "", " ")
if err != nil {
return err
}
fmt.Println(string(jsonBytes))
case "yaml":
yamlBytes, err := yaml.Marshal(resources)
if err != nil {
return err
}
fmt.Print(string(yamlBytes))
default:
return fmt.Errorf("unknown output format: %s", output)
}
return nil
}

View File

@@ -1,142 +0,0 @@
package commands
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
// Be careful with tabs vs. spaces in the following expected formats. Indents
// should all be spaces, no tabs.
const expectYamlSingle = `bar: ""
baz: foo
foo: bar
`
const expectJsonSingle = `{
"bar": "",
"baz": "foo",
"foo": "bar"
}
`
const expectYamlList = `one:
bar: ""
baz: foo
foo: bar
two:
bar: ""
baz: foo
foo: bar
`
const expectJsonList = `{
"one": {
"bar": "",
"baz": "foo",
"foo": "bar"
},
"two": {
"bar": "",
"baz": "foo",
"foo": "bar"
}
}
`
// Rather dirty hack to capture stdout from PrintResource() and PrintResourceList()
func captureOutput(f func() error) (string, error) {
stdout := os.Stdout
r, w, err := os.Pipe()
if err != nil {
return "", err
}
os.Stdout = w
err = f()
w.Close()
if err != nil {
os.Stdout = stdout
return "", err
}
str, err := ioutil.ReadAll(r)
os.Stdout = stdout
if err != nil {
return "", err
}
return string(str), err
}
func Test_PrintResource(t *testing.T) {
testResource := map[string]string{
"foo": "bar",
"bar": "",
"baz": "foo",
}
str, err := captureOutput(func() error {
err := PrintResource(testResource, "yaml")
return err
})
assert.NoError(t, err)
assert.Equal(t, str, expectYamlSingle)
str, err = captureOutput(func() error {
err := PrintResource(testResource, "json")
return err
})
assert.NoError(t, err)
assert.Equal(t, str, expectJsonSingle)
err = PrintResource(testResource, "unknown")
assert.Error(t, err)
}
func Test_PrintResourceList(t *testing.T) {
testResource := map[string]map[string]string{
"one": {
"foo": "bar",
"bar": "",
"baz": "foo",
},
"two": {
"foo": "bar",
"bar": "",
"baz": "foo",
},
}
testResource2 := make([]map[string]string, 0)
testResource2 = append(testResource2, testResource["one"])
str, err := captureOutput(func() error {
err := PrintResourceList(testResource, "yaml", false)
return err
})
assert.NoError(t, err)
assert.Equal(t, str, expectYamlList)
str, err = captureOutput(func() error {
err := PrintResourceList(testResource, "json", false)
return err
})
assert.NoError(t, err)
assert.Equal(t, str, expectJsonList)
str, err = captureOutput(func() error {
err := PrintResourceList(testResource2, "yaml", true)
return err
})
assert.NoError(t, err)
assert.Equal(t, str, expectYamlSingle)
str, err = captureOutput(func() error {
err := PrintResourceList(testResource2, "json", true)
return err
})
assert.NoError(t, err)
assert.Equal(t, str, expectJsonSingle)
err = PrintResourceList(testResource, "unknown", false)
assert.Error(t, err)
}

View File

@@ -1,233 +0,0 @@
package commands
import (
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const (
bashCompletionFunc = `
__argocd_list_apps() {
local -a argocd_out
if argocd_out=($(argocd app list --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_app_history() {
local app=$1
local -a argocd_out
if argocd_out=($(argocd app history $app --output id 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_app_rollback() {
local -a command
for comp_word in "${COMP_WORDS[@]}"; do
if [[ $comp_word =~ ^-.*$ ]]; then
continue
fi
command+=($comp_word)
done
# fourth arg is app (if present): e.g.- argocd app rollback guestbook
local app=${command[3]}
local id=${command[4]}
if [[ -z $app || $app == $cur ]]; then
__argocd_list_apps
elif [[ -z $id || $id == $cur ]]; then
__argocd_list_app_history $app
fi
}
__argocd_list_servers() {
local -a argocd_out
if argocd_out=($(argocd cluster list --output server 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_repos() {
local -a argocd_out
if argocd_out=($(argocd repo list --output url 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_projects() {
local -a argocd_out
if argocd_out=($(argocd proj list --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_namespaces() {
local -a argocd_out
if argocd_out=($(kubectl get namespaces --no-headers 2>/dev/null | cut -f1 -d' ' 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_proj_server_namespace() {
local -a command
for comp_word in "${COMP_WORDS[@]}"; do
if [[ $comp_word =~ ^-.*$ ]]; then
continue
fi
command+=($comp_word)
done
# expect something like this: argocd proj add-destination PROJECT SERVER NAMESPACE
local project=${command[3]}
local server=${command[4]}
local namespace=${command[5]}
if [[ -z $project || $project == $cur ]]; then
__argocd_list_projects
elif [[ -z $server || $server == $cur ]]; then
__argocd_list_servers
elif [[ -z $namespace || $namespace == $cur ]]; then
__argocd_list_namespaces
fi
}
__argocd_list_project_role() {
local project="$1"
local -a argocd_out
if argocd_out=($(argocd proj role list "$project" --output=name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_proj_role(){
local -a command
for comp_word in "${COMP_WORDS[@]}"; do
if [[ $comp_word =~ ^-.*$ ]]; then
continue
fi
command+=($comp_word)
done
# expect something like this: argocd proj role add-policy PROJECT ROLE-NAME
local project=${command[4]}
local role=${command[5]}
if [[ -z $project || $project == $cur ]]; then
__argocd_list_projects
elif [[ -z $role || $role == $cur ]]; then
__argocd_list_project_role $project
fi
}
__argocd_custom_func() {
case ${last_command} in
argocd_app_delete | \
argocd_app_diff | \
argocd_app_edit | \
argocd_app_get | \
argocd_app_history | \
argocd_app_manifests | \
argocd_app_patch-resource | \
argocd_app_set | \
argocd_app_sync | \
argocd_app_terminate-op | \
argocd_app_unset | \
argocd_app_wait | \
argocd_app_create)
__argocd_list_apps
return
;;
argocd_app_rollback)
__argocd_app_rollback
return
;;
argocd_cluster_get | \
argocd_cluster_rm | \
argocd_login | \
argocd_cluster_add)
__argocd_list_servers
return
;;
argocd_repo_rm | \
argocd_repo_add)
__argocd_list_repos
return
;;
argocd_proj_add-destination | \
argocd_proj_remove-destination)
__argocd_proj_server_namespace
return
;;
argocd_proj_add-source | \
argocd_proj_remove-source | \
argocd_proj_allow-cluster-resource | \
argocd_proj_allow-namespace-resource | \
argocd_proj_deny-cluster-resource | \
argocd_proj_deny-namespace-resource | \
argocd_proj_delete | \
argocd_proj_edit | \
argocd_proj_get | \
argocd_proj_set | \
argocd_proj_role_list)
__argocd_list_projects
return
;;
argocd_proj_role_remove-policy | \
argocd_proj_role_add-policy | \
argocd_proj_role_create | \
argocd_proj_role_delete | \
argocd_proj_role_get | \
argocd_proj_role_create-token | \
argocd_proj_role_delete-token)
__argocd_proj_role
return
;;
*)
;;
esac
}
`
)
func NewCompletionCommand() *cobra.Command {
var command = &cobra.Command{
Use: "completion SHELL",
Short: "output shell completion code for the specified shell (bash or zsh)",
Long: `Write bash or zsh shell completion code to standard output.
For bash, ensure you have bash completions installed and enabled.
To access completions in your current shell, run
$ source <(argocd completion bash)
Alternatively, write it to a file and source in .bash_profile
For zsh, output to a file in a directory referenced by the $fpath shell
variable.
`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.HelpFunc()(cmd, args)
os.Exit(1)
}
shell := args[0]
rootCommand := NewCommand()
rootCommand.BashCompletionFunction = bashCompletionFunc
availableCompletions := map[string]func(io.Writer) error{
"bash": rootCommand.GenBashCompletion,
"zsh": rootCommand.GenZshCompletion,
}
completion, ok := availableCompletions[shell]
if !ok {
fmt.Printf("Invalid shell '%s'. The supported shells are bash and zsh.\n", shell)
os.Exit(1)
}
if err := completion(os.Stdout); err != nil {
log.Fatal(err)
}
},
}
return command
}

View File

@@ -8,43 +8,25 @@ import (
"strings"
"text/tabwriter"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/localconfig"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/localconfig"
)
// NewContextCommand returns a new instance of an `argocd ctx` command
func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var delete bool
var command = &cobra.Command{
Use: "context [CONTEXT]",
Use: "context",
Aliases: []string{"ctx"},
Short: "Switch between contexts",
Run: func(c *cobra.Command, args []string) {
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
if delete {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
err := deleteContext(args[0], clientOpts.ConfigPath)
errors.CheckError(err)
return
}
if len(args) == 0 {
printArgoCDContexts(clientOpts.ConfigPath)
return
}
ctxName := args[0]
argoCDDir, err := localconfig.DefaultConfigDir()
errors.CheckError(err)
prevCtxFile := path.Join(argoCDDir, ".prev-ctx")
@@ -54,6 +36,8 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
errors.CheckError(err)
ctxName = string(prevCtxBytes)
}
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
if localCfg.CurrentContext == ctxName {
fmt.Printf("Already at context '%s'\n", localCfg.CurrentContext)
return
@@ -63,7 +47,6 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
prevCtx := localCfg.CurrentContext
localCfg.CurrentContext = ctxName
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
errors.CheckError(err)
err = ioutil.WriteFile(prevCtxFile, []byte(prevCtx), 0644)
@@ -71,43 +54,9 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
fmt.Printf("Switched to context '%s'\n", localCfg.CurrentContext)
},
}
command.Flags().BoolVar(&delete, "delete", false, "Delete the context instead of switching to it")
return command
}
func deleteContext(context, configPath string) error {
localCfg, err := localconfig.ReadLocalConfig(configPath)
errors.CheckError(err)
if localCfg == nil {
return fmt.Errorf("Nothing to logout from")
}
serverName, ok := localCfg.RemoveContext(context)
if !ok {
return fmt.Errorf("Context %s does not exist", context)
}
_ = localCfg.RemoveUser(context)
_ = localCfg.RemoveServer(serverName)
if localCfg.IsEmpty() {
err = localconfig.DeleteLocalConfig(configPath)
errors.CheckError(err)
} else {
if localCfg.CurrentContext == context {
localCfg.CurrentContext = ""
}
err = localconfig.ValidateLocalConfig(*localCfg)
if err != nil {
return fmt.Errorf("Error in logging out")
}
err = localconfig.WriteLocalConfig(*localCfg, configPath)
errors.CheckError(err)
}
fmt.Printf("Context '%s' deleted\n", context)
return nil
}
func printArgoCDContexts(configPath string) {
localCfg, err := localconfig.ReadLocalConfig(configPath)
errors.CheckError(err)

View File

@@ -1,81 +0,0 @@
package commands
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/argoproj/argo-cd/v2/util/localconfig"
)
const testConfig = `contexts:
- name: argocd1.example.com:443
server: argocd1.example.com:443
user: argocd1.example.com:443
- name: argocd2.example.com:443
server: argocd2.example.com:443
user: argocd2.example.com:443
- name: localhost:8080
server: localhost:8080
user: localhost:8080
current-context: localhost:8080
servers:
- server: argocd1.example.com:443
- server: argocd2.example.com:443
- plain-text: true
server: localhost:8080
users:
- auth-token: vErrYS3c3tReFRe$hToken
name: argocd1.example.com:443
refresh-token: vErrYS3c3tReFRe$hToken
- auth-token: vErrYS3c3tReFRe$hToken
name: argocd2.example.com:443
refresh-token: vErrYS3c3tReFRe$hToken
- auth-token: vErrYS3c3tReFRe$hToken
name: localhost:8080`
const testConfigFilePath = "./testdata/config"
func TestContextDelete(t *testing.T) {
// Write the test config file
err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
// Delete a non-current context
err = deleteContext("argocd1.example.com:443", testConfigFilePath)
assert.NoError(t, err)
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
assert.NotContains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd1.example.com:443", Server: "argocd1.example.com:443", User: "argocd1.example.com:443"})
assert.NotContains(t, localConfig.Servers, localconfig.Server{Server: "argocd1.example.com:443"})
assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "argocd1.example.com:443"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd2.example.com:443", Server: "argocd2.example.com:443", User: "argocd2.example.com:443"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
// Delete the current context
err = deleteContext("localhost:8080", testConfigFilePath)
assert.NoError(t, err)
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "")
assert.NotContains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
assert.NotContains(t, localConfig.Servers, localconfig.Server{PlainText: true, Server: "localhost:8080"})
assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "localhost:8080"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd2.example.com:443", Server: "argocd2.example.com:443", User: "argocd2.example.com:443"})
// Write the file again so that no conflicts are made in git
err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
}

View File

@@ -1,162 +0,0 @@
package commands
import (
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"text/tabwriter"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
gpgkeypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/gpgkey"
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/errors"
argoio "github.com/argoproj/argo-cd/v2/util/io"
)
// NewGPGCommand returns a new instance of an `argocd repo` command
func NewGPGCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "gpg",
Short: "Manage GPG keys used for signature verification",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
Example: ``,
}
command.AddCommand(NewGPGListCommand(clientOpts))
command.AddCommand(NewGPGGetCommand(clientOpts))
command.AddCommand(NewGPGAddCommand(clientOpts))
command.AddCommand(NewGPGDeleteCommand(clientOpts))
return command
}
// NewGPGListCommand lists all configured public keys from the server
func NewGPGListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured GPG public keys",
Run: func(c *cobra.Command, args []string) {
conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie()
defer argoio.Close(conn)
keys, err := gpgIf.List(context.Background(), &gpgkeypkg.GnuPGPublicKeyQuery{})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(keys.Items, output, false)
errors.CheckError(err)
case "wide", "":
printKeyTable(keys.Items)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
return command
}
// NewGPGGetCommand retrieves a single public key from the server
func NewGPGGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "get KEYID",
Short: "Get the GPG public key with ID <KEYID> from the server",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
errors.CheckError(fmt.Errorf("Missing KEYID argument"))
}
conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie()
defer argoio.Close(conn)
key, err := gpgIf.Get(context.Background(), &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: args[0]})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(key, output, false)
errors.CheckError(err)
case "wide", "":
fmt.Printf("Key ID: %s\n", key.KeyID)
fmt.Printf("Key fingerprint: %s\n", key.Fingerprint)
fmt.Printf("Key subtype: %s\n", strings.ToUpper(key.SubType))
fmt.Printf("Key owner: %s\n", key.Owner)
fmt.Printf("Key data follows until EOF:\n%s\n", key.KeyData)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
return command
}
// NewGPGAddCommand adds a public key to the server's configuration
func NewGPGAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
fromFile string
)
var command = &cobra.Command{
Use: "add",
Short: "Adds a GPG public key to the server's keyring",
Run: func(c *cobra.Command, args []string) {
if fromFile == "" {
errors.CheckError(fmt.Errorf("--from is mandatory"))
}
keyData, err := ioutil.ReadFile(fromFile)
if err != nil {
errors.CheckError(err)
}
conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie()
defer argoio.Close(conn)
resp, err := gpgIf.Create(context.Background(), &gpgkeypkg.GnuPGPublicKeyCreateRequest{Publickey: &appsv1.GnuPGPublicKey{KeyData: string(keyData)}})
errors.CheckError(err)
fmt.Printf("Created %d key(s) from input file", len(resp.Created.Items))
if len(resp.Skipped) > 0 {
fmt.Printf(", and %d key(s) were skipped because they exist already", len(resp.Skipped))
}
fmt.Printf(".\n")
},
}
command.Flags().StringVarP(&fromFile, "from", "f", "", "Path to the file that contains the GPG public key to import")
return command
}
// NewGPGDeleteCommand removes a key from the server's keyring
func NewGPGDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rm KEYID",
Short: "Removes a GPG public key from the server's keyring",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
errors.CheckError(fmt.Errorf("Missing KEYID argument"))
}
conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie()
defer argoio.Close(conn)
_, err := gpgIf.Delete(context.Background(), &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: args[0]})
errors.CheckError(err)
fmt.Printf("Deleted key with key ID %s\n", args[0])
},
}
return command
}
// Print table of certificate info
func printKeyTable(keys []appsv1.GnuPGPublicKey) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "KEYID\tTYPE\tIDENTITY\n")
for _, k := range keys {
fmt.Fprintf(w, "%s\t%s\t%s\n", k.KeyID, strings.ToUpper(k.SubType), k.Owner)
}
_ = w.Flush()
}

View File

@@ -1,103 +0,0 @@
package headless
import (
"context"
"fmt"
"sync"
"time"
"github.com/go-redis/redis/v8"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
repoapiclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
"github.com/argoproj/argo-cd/v2/util/cache"
"github.com/argoproj/argo-cd/v2/util/io"
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
)
type forwardCacheClient struct {
namespace string
context string
init sync.Once
client cache.CacheClient
err error
}
func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error {
c.init.Do(func() {
overrides := clientcmd.ConfigOverrides{
CurrentContext: c.context,
}
redisPort, err := kubeutil.PortForward(6379, c.namespace, &overrides,
"app.kubernetes.io/name=argocd-redis-ha-haproxy", "app.kubernetes.io/name=argocd-redis")
if err != nil {
c.err = err
return
}
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort)})
c.client = cache.NewRedisCache(redisClient, time.Hour)
})
if c.err != nil {
return c.err
}
return action(c.client)
}
func (c *forwardCacheClient) Set(item *cache.Item) error {
return c.doLazy(func(client cache.CacheClient) error {
return client.Set(item)
})
}
func (c *forwardCacheClient) Get(key string, obj interface{}) error {
return c.doLazy(func(client cache.CacheClient) error {
return client.Get(key, obj)
})
}
func (c *forwardCacheClient) Delete(key string) error {
return c.doLazy(func(client cache.CacheClient) error {
return client.Delete(key)
})
}
func (c *forwardCacheClient) OnUpdated(ctx context.Context, key string, callback func() error) error {
return c.doLazy(func(client cache.CacheClient) error {
return client.OnUpdated(ctx, key, callback)
})
}
func (c *forwardCacheClient) NotifyUpdated(key string) error {
return c.doLazy(func(client cache.CacheClient) error {
return client.NotifyUpdated(key)
})
}
type forwardRepoClientset struct {
namespace string
context string
init sync.Once
repoClientset repoapiclient.Clientset
err error
}
func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.RepoServerServiceClient, error) {
c.init.Do(func() {
overrides := clientcmd.ConfigOverrides{
CurrentContext: c.context,
}
repoServerPort, err := kubeutil.PortForward(8081, c.namespace, &overrides, "app.kubernetes.io/name=argocd-repo-server")
if err != nil {
c.err = err
return
}
c.repoClientset = apiclient.NewRepoServerClientset(fmt.Sprintf("localhost:%d", repoServerPort), 60, apiclient.TLSConfiguration{
DisableTLS: false, StrictValidation: false})
})
if c.err != nil {
return nil, nil, c.err
}
return c.repoClientset.NewRepoServerClient()
}

View File

@@ -1,162 +0,0 @@
package headless
import (
"context"
"fmt"
"net"
"os"
"time"
"github.com/alicebob/miniredis/v2"
"github.com/go-redis/redis/v8"
"github.com/golang/protobuf/ptypes/empty"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
argoapi "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/server"
servercache "github.com/argoproj/argo-cd/v2/server/cache"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/localconfig"
flag "github.com/spf13/pflag"
)
func testAPI(clientOpts *argoapi.ClientOptions) error {
apiClient, err := argoapi.NewClient(clientOpts)
if err != nil {
return err
}
closer, versionClient, err := apiClient.NewVersionClient()
if err != nil {
return err
}
defer io.Close(closer)
_, err = versionClient.Version(context.Background(), &empty.Empty{})
return err
}
func retrieveContextIfChanged(contextFlag *flag.Flag) string {
if contextFlag != nil && contextFlag.Changed {
return contextFlag.Value.String()
}
return ""
}
// InitCommand allows executing command in a headless mode: on the fly starts Argo CD API server and
// changes provided client options to use started API server port
func InitCommand(cmd *cobra.Command, clientOpts *argoapi.ClientOptions, port *int) *cobra.Command {
ctx, cancel := context.WithCancel(context.Background())
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
clientConfig := cli.AddKubectlFlagsToSet(flags)
// copy k8s persistent flags into argocd command flags
flags.VisitAll(func(flag *pflag.Flag) {
// skip Kubernetes server flags since argocd has it's own server flag
if flag.Name == "server" {
return
}
cmd.Flags().AddFlag(flag)
})
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
startInProcessAPI := clientOpts.Core
if !startInProcessAPI {
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
if err != nil {
return err
}
if localCfg != nil {
configCtx, err := localCfg.ResolveContext(clientOpts.Context)
if err != nil {
return err
}
startInProcessAPI = configCtx.Server.Core
}
}
if !startInProcessAPI {
return nil
}
// get rid of logging error handler
runtime.ErrorHandlers = runtime.ErrorHandlers[1:]
cli.SetLogLevel(log.ErrorLevel.String())
log.SetLevel(log.ErrorLevel)
os.Setenv(v1alpha1.EnvVarFakeInClusterConfig, "true")
if port == nil || *port == 0 {
ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
return err
}
port = &ln.Addr().(*net.TCPAddr).Port
io.Close(ln)
}
restConfig, err := clientConfig.ClientConfig()
if err != nil {
return err
}
appClientset, err := appclientset.NewForConfig(restConfig)
if err != nil {
return err
}
kubeClientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return err
}
namespace, _, err := clientConfig.Namespace()
if err != nil {
return err
}
context := retrieveContextIfChanged(cmd.Flag("context"))
mr, err := miniredis.Run()
if err != nil {
return err
}
appstateCache := appstatecache.NewCache(cacheutil.NewCache(&forwardCacheClient{namespace: namespace, context: context}), time.Hour)
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
EnableGZip: false,
Namespace: namespace,
ListenPort: *port,
AppClientset: appClientset,
DisableAuth: true,
RedisClient: redis.NewClient(&redis.Options{Addr: mr.Addr()}),
Cache: servercache.NewCache(appstateCache, 0, 0, 0),
KubeClientset: kubeClientset,
Insecure: true,
ListenHost: "localhost",
RepoClientset: &forwardRepoClientset{namespace: namespace, context: context},
})
go srv.Run(ctx, *port, 0)
clientOpts.ServerAddr = fmt.Sprintf("localhost:%d", *port)
clientOpts.PlainText = true
if !cache.WaitForCacheSync(ctx.Done(), srv.Initialized) {
log.Fatal("Timed out waiting for project cache to sync")
}
tries := 5
for i := 0; i < tries; i++ {
err = testAPI(clientOpts)
if err == nil {
break
}
time.Sleep(time.Second)
}
return err
}
cmd.PostRun = func(cmd *cobra.Command, args []string) {
cancel()
}
return cmd
}

View File

@@ -1,80 +0,0 @@
package headless
import (
"testing"
flag "github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
)
type StringFlag struct {
// The exact value provided on the flag
value string
}
func (f StringFlag) String() string {
return f.value
}
func (f *StringFlag) Set(value string) error {
f.value = value
return nil
}
func (f *StringFlag) Type() string {
return "string"
}
func Test_FlagContextNotChanged(t *testing.T) {
res := retrieveContextIfChanged(&flag.Flag{
Name: "",
Shorthand: "",
Usage: "",
Value: &StringFlag{value: "test"},
DefValue: "",
Changed: false,
NoOptDefVal: "",
Deprecated: "",
Hidden: false,
ShorthandDeprecated: "",
Annotations: nil,
})
assert.Equal(t, "", res)
}
func Test_FlagContextChanged(t *testing.T) {
res := retrieveContextIfChanged(&flag.Flag{
Name: "",
Shorthand: "",
Usage: "",
Value: &StringFlag{value: "test"},
DefValue: "",
Changed: true,
NoOptDefVal: "",
Deprecated: "",
Hidden: false,
ShorthandDeprecated: "",
Annotations: nil,
})
assert.Equal(t, "test", res)
}
func Test_FlagContextNil(t *testing.T) {
res := retrieveContextIfChanged(&flag.Flag{
Name: "",
Shorthand: "",
Usage: "",
Value: nil,
DefValue: "",
Changed: false,
NoOptDefVal: "",
Deprecated: "",
Hidden: false,
ShorthandDeprecated: "",
Annotations: nil,
})
assert.Equal(t, "", res)
}

View File

@@ -2,34 +2,28 @@ package commands
import (
"context"
"crypto/sha256"
"encoding/base64"
"crypto/tls"
"fmt"
"html"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/coreos/go-oidc"
"github.com/dgrijalva/jwt-go/v4"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/session"
"github.com/argoproj/argo-cd/server/settings"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/localconfig"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
sessionpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/session"
settingspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/settings"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
grpc_util "github.com/argoproj/argo-cd/v2/util/grpc"
"github.com/argoproj/argo-cd/v2/util/io"
jwtutil "github.com/argoproj/argo-cd/v2/util/jwt"
"github.com/argoproj/argo-cd/v2/util/localconfig"
oidcutil "github.com/argoproj/argo-cd/v2/util/oidc"
"github.com/argoproj/argo-cd/v2/util/rand"
)
// NewLoginCommand returns a new instance of `argocd login` command
@@ -39,103 +33,70 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
username string
password string
sso bool
ssoPort int
)
var command = &cobra.Command{
Use: "login SERVER",
Short: "Log in to Argo CD",
Long: "Log in to Argo CD",
Example: `# Login to Argo CD using a username and password
argocd login cd.argoproj.io
# Login to Argo CD using SSO
argocd login cd.argoproj.io --sso
# Configure direct access using Kubernetes API server
argocd login cd.argoproj.io --core`,
Run: func(c *cobra.Command, args []string) {
var server string
if len(args) != 1 && !globalClientOpts.PortForward && !globalClientOpts.Core {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
if globalClientOpts.PortForward {
server = "port-forward"
} else if globalClientOpts.Core {
server = "kubernetes"
} else {
server = args[0]
tlsTestResult, err := grpc_util.TestTLS(server)
errors.CheckError(err)
if !tlsTestResult.TLS {
if !globalClientOpts.PlainText {
if !cli.AskToProceed("WARNING: server is not configured with TLS. Proceed (y/n)? ") {
os.Exit(1)
}
globalClientOpts.PlainText = true
server := args[0]
tlsTestResult, err := grpc_util.TestTLS(server)
errors.CheckError(err)
if !tlsTestResult.TLS {
if !globalClientOpts.PlainText {
if !cli.AskToProceed("WARNING: server is not configured with TLS. Proceed (y/n)? ") {
os.Exit(1)
}
} else if tlsTestResult.InsecureErr != nil {
if !globalClientOpts.Insecure {
if !cli.AskToProceed(fmt.Sprintf("WARNING: server certificate had error: %s. Proceed insecurely (y/n)? ", tlsTestResult.InsecureErr)) {
os.Exit(1)
}
globalClientOpts.Insecure = true
globalClientOpts.PlainText = true
}
} else if tlsTestResult.InsecureErr != nil {
if !globalClientOpts.Insecure {
if !cli.AskToProceed(fmt.Sprintf("WARNING: server certificate had error: %s. Proceed insecurely (y/n)? ", tlsTestResult.InsecureErr)) {
os.Exit(1)
}
globalClientOpts.Insecure = true
}
}
clientOpts := argocdclient.ClientOptions{
ConfigPath: "",
ServerAddr: server,
Insecure: globalClientOpts.Insecure,
PlainText: globalClientOpts.PlainText,
ClientCertFile: globalClientOpts.ClientCertFile,
ClientCertKeyFile: globalClientOpts.ClientCertKeyFile,
GRPCWeb: globalClientOpts.GRPCWeb,
GRPCWebRootPath: globalClientOpts.GRPCWebRootPath,
PortForward: globalClientOpts.PortForward,
PortForwardNamespace: globalClientOpts.PortForwardNamespace,
Headers: globalClientOpts.Headers,
ConfigPath: "",
ServerAddr: server,
Insecure: globalClientOpts.Insecure,
PlainText: globalClientOpts.PlainText,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
setConn, setIf := acdClient.NewSettingsClientOrDie()
defer util.Close(setConn)
if ctxName == "" {
ctxName = server
if globalClientOpts.GRPCWebRootPath != "" {
rootPath := strings.TrimRight(strings.TrimLeft(globalClientOpts.GRPCWebRootPath, "/"), "/")
ctxName = fmt.Sprintf("%s/%s", server, rootPath)
}
}
// Perform the login
var tokenString string
var refreshToken string
if !globalClientOpts.Core {
acdClient := argocdclient.NewClientOrDie(&clientOpts)
setConn, setIf := acdClient.NewSettingsClientOrDie()
defer io.Close(setConn)
if !sso {
tokenString = passwordLogin(acdClient, username, password)
} else {
ctx := context.Background()
httpClient, err := acdClient.HTTPClient()
errors.CheckError(err)
ctx = oidc.ClientContext(ctx, httpClient)
acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{})
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider)
}
parser := &jwt.Parser{
ValidationHelper: jwt.NewValidationHelper(jwt.WithoutClaimsValidation(), jwt.WithoutAudienceValidation()),
}
claims := jwt.MapClaims{}
_, _, err := parser.ParseUnverified(tokenString, &claims)
if !sso {
tokenString = passwordLogin(acdClient, username, password)
} else {
acdSet, err := setIf.Get(context.Background(), &settings.SettingsQuery{})
errors.CheckError(err)
fmt.Printf("'%s' logged in successfully\n", userDisplayName(claims))
if !ssoConfigured(acdSet) {
log.Fatalf("ArgoCD instance is not configured with SSO")
}
tokenString, refreshToken = oauth2Login(server, clientOpts.PlainText)
}
parser := &jwt.Parser{
SkipClaimsValidation: true,
}
claims := jwt.MapClaims{}
_, _, err = parser.ParseUnverified(tokenString, &claims)
errors.CheckError(err)
fmt.Printf("'%s' logged in successfully\n", userDisplayName(claims))
// login successful. Persist the config
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
errors.CheckError(err)
@@ -143,12 +104,9 @@ argocd login cd.argoproj.io --core`,
localCfg = &localconfig.LocalConfig{}
}
localCfg.UpsertServer(localconfig.Server{
Server: server,
PlainText: globalClientOpts.PlainText,
Insecure: globalClientOpts.Insecure,
GRPCWeb: globalClientOpts.GRPCWeb,
GRPCWebRootPath: globalClientOpts.GRPCWebRootPath,
Core: globalClientOpts.Core,
Server: server,
PlainText: globalClientOpts.PlainText,
Insecure: globalClientOpts.Insecure,
})
localCfg.UpsertUser(localconfig.User{
Name: ctxName,
@@ -172,167 +130,129 @@ argocd login cd.argoproj.io --core`,
command.Flags().StringVar(&ctxName, "name", "", "name to use for the context")
command.Flags().StringVar(&username, "username", "", "the username of an account to authenticate")
command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate")
command.Flags().BoolVar(&sso, "sso", false, "perform SSO login")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application")
command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login")
return command
}
func userDisplayName(claims jwt.MapClaims) string {
if email := jwtutil.StringField(claims, "email"); email != "" {
return email
if email, ok := claims["email"]; ok && email != nil {
return email.(string)
}
if name := jwtutil.StringField(claims, "name"); name != "" {
return name
if name, ok := claims["name"]; ok && name != nil {
return name.(string)
}
return jwtutil.StringField(claims, "sub")
return claims["sub"].(string)
}
func ssoConfigured(set *settings.Settings) bool {
return set.DexConfig != nil && len(set.DexConfig.Connectors) > 0
}
// getFreePort asks the kernel for a free open port that is ready to use.
func getFreePort() (int, error) {
ln, err := net.Listen("tcp", "[::]:0")
if err != nil {
return 0, err
}
return ln.Addr().(*net.TCPAddr).Port, ln.Close()
}
// oauth2Login opens a browser, runs a temporary HTTP server to delegate OAuth2 login flow and
// returns the JWT token and a refresh token (if supported)
func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCConfig, oauth2conf *oauth2.Config, provider *oidc.Provider) (string, string) {
oauth2conf.RedirectURL = fmt.Sprintf("http://localhost:%d/auth/callback", port)
oidcConf, err := oidcutil.ParseConfig(provider)
errors.CheckError(err)
log.Debug("OIDC Configuration:")
log.Debugf(" supported_scopes: %v", oidcConf.ScopesSupported)
log.Debugf(" response_types_supported: %v", oidcConf.ResponseTypesSupported)
// handledRequests ensures we do not handle more requests than necessary
handledRequests := 0
// completionChan is to signal flow completed. Non-empty string indicates error
completionChan := make(chan string)
// stateNonce is an OAuth2 state nonce
// According to the spec (https://www.rfc-editor.org/rfc/rfc6749#section-10.10), this must be guessable with
// probability <= 2^(-128). The following call generates one of 52^24 random strings, ~= 2^136 possibilities.
stateNonce, err := rand.String(24)
func oauth2Login(host string, plaintext bool) (string, string) {
ctx := context.Background()
port, err := getFreePort()
errors.CheckError(err)
var scheme = "https"
if plaintext {
scheme = "http"
}
conf := &oauth2.Config{
ClientID: common.ArgoCDCLIClientAppID,
Scopes: []string{"openid", "profile", "email", "groups", "offline_access"},
Endpoint: oauth2.Endpoint{
AuthURL: fmt.Sprintf("%s://%s%s/auth", scheme, host, common.DexAPIEndpoint),
TokenURL: fmt.Sprintf("%s://%s%s/token", scheme, host, common.DexAPIEndpoint),
},
RedirectURL: fmt.Sprintf("http://localhost:%d/auth/callback", port),
}
srv := &http.Server{Addr: ":" + strconv.Itoa(port)}
var tokenString string
var refreshToken string
loginCompleted := make(chan struct{})
handleErr := func(w http.ResponseWriter, errMsg string) {
http.Error(w, html.EscapeString(errMsg), http.StatusBadRequest)
completionChan <- errMsg
}
// PKCE implementation of https://tools.ietf.org/html/rfc7636
codeVerifier, err := rand.StringFromCharset(43, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~")
errors.CheckError(err)
codeChallengeHash := sha256.Sum256([]byte(codeVerifier))
codeChallenge := base64.RawURLEncoding.EncodeToString(codeChallengeHash[:])
// Authorization redirect callback from OAuth2 auth flow.
// Handles both implicit and authorization code flow
callbackHandler := func(w http.ResponseWriter, r *http.Request) {
log.Debugf("Callback: %s", r.URL)
defer func() {
loginCompleted <- struct{}{}
}()
if formErr := r.FormValue("error"); formErr != "" {
handleErr(w, fmt.Sprintf("%s: %s", formErr, r.FormValue("error_description")))
// Authorization redirect callback from OAuth2 auth flow.
if errMsg := r.FormValue("error"); errMsg != "" {
http.Error(w, errMsg+": "+r.FormValue("error_description"), http.StatusBadRequest)
log.Fatal(errMsg)
return
}
handledRequests++
if handledRequests > 2 {
// Since implicit flow will redirect back to ourselves, this counter ensures we do not
// fallinto a redirect loop (e.g. user visits the page by hand)
handleErr(w, "Unable to complete login flow: too many redirects")
code := r.FormValue("code")
if code == "" {
errMsg := fmt.Sprintf("no code in request: %q", r.Form)
http.Error(w, errMsg, http.StatusBadRequest)
log.Fatal(errMsg)
return
}
tok, err := conf.Exchange(ctx, code)
errors.CheckError(err)
log.Info("Authentication successful")
if len(r.Form) == 0 {
// If we get here, no form data was set. We presume to be performing an implicit login
// flow where the id_token is contained in a URL fragment, making it inaccessible to be
// read from the request. This javascript will redirect the browser to send the
// fragments as query parameters so our callback handler can read and return token.
fmt.Fprintf(w, `<script>window.location.search = window.location.hash.substring(1)</script>`)
var ok bool
tokenString, ok = tok.Extra("id_token").(string)
if !ok {
errMsg := "no id_token in token response"
http.Error(w, errMsg, http.StatusInternalServerError)
log.Fatal(errMsg)
return
}
if state := r.FormValue("state"); state != stateNonce {
handleErr(w, "Unknown state nonce")
return
}
tokenString = r.FormValue("id_token")
if tokenString == "" {
code := r.FormValue("code")
if code == "" {
handleErr(w, fmt.Sprintf("no code in request: %q", r.Form))
return
}
opts := []oauth2.AuthCodeOption{oauth2.SetAuthURLParam("code_verifier", codeVerifier)}
tok, err := oauth2conf.Exchange(ctx, code, opts...)
if err != nil {
handleErr(w, err.Error())
return
}
var ok bool
tokenString, ok = tok.Extra("id_token").(string)
if !ok {
handleErr(w, "no id_token in token response")
return
}
refreshToken, _ = tok.Extra("refresh_token").(string)
}
refreshToken, _ = tok.Extra("refresh_token").(string)
log.Debugf("Token: %s", tokenString)
log.Debugf("Refresh Token: %s", tokenString)
successPage := `
<div style="height:100px; width:100%!; display:flex; flex-direction: column; justify-content: center; align-items:center; background-color:#2ecc71; color:white; font-size:22"><div>Authentication successful!</div></div>
<p style="margin-top:20px; font-size:18; text-align:center">Authentication was successful, you can now return to CLI. This page will close automatically</p>
<script>window.onload=function(){setTimeout(this.close, 4000)}</script>
`
fmt.Fprint(w, successPage)
completionChan <- ""
fmt.Fprintf(w, successPage)
}
srv := &http.Server{Addr: "localhost:" + strconv.Itoa(port)}
http.HandleFunc("/auth/callback", callbackHandler)
// add transport for self-signed certificate to context
sslcli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
ctx = context.WithValue(ctx, oauth2.HTTPClient, sslcli)
// Redirect user to login & consent page to ask for permission for the scopes specified above.
fmt.Printf("Opening browser for authentication\n")
var url string
grantType := oidcutil.InferGrantType(oidcConf)
opts := []oauth2.AuthCodeOption{oauth2.AccessTypeOffline}
if claimsRequested := oidcSettings.GetIDTokenClaims(); claimsRequested != nil {
opts = oidcutil.AppendClaimsAuthenticationRequestParameter(opts, claimsRequested)
}
switch grantType {
case oidcutil.GrantTypeAuthorizationCode:
opts = append(opts, oauth2.SetAuthURLParam("code_challenge", codeChallenge))
opts = append(opts, oauth2.SetAuthURLParam("code_challenge_method", "S256"))
url = oauth2conf.AuthCodeURL(stateNonce, opts...)
case oidcutil.GrantTypeImplicit:
url, err = oidcutil.ImplicitFlowURL(oauth2conf, stateNonce, opts...)
errors.CheckError(err)
default:
log.Fatalf("Unsupported grant type: %v", grantType)
}
fmt.Printf("Performing %s flow login: %s\n", grantType, url)
log.Info("Opening browser for authentication")
url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
log.Infof("Authentication URL: %s", url)
time.Sleep(1 * time.Second)
err = open.Start(url)
err = open.Run(url)
errors.CheckError(err)
go func() {
log.Debugf("Listen: %s", srv.Addr)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
log.Fatalf("Temporary HTTP server failed: %s", err)
log.Fatalf("listen: %s\n", err)
}
}()
errMsg := <-completionChan
if errMsg != "" {
log.Fatal(errMsg)
}
fmt.Printf("Authentication successful\n")
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
<-loginCompleted
_ = srv.Shutdown(ctx)
log.Debugf("Token: %s", tokenString)
log.Debugf("Refresh Token: %s", refreshToken)
return tokenString, refreshToken
}
func passwordLogin(acdClient argocdclient.Client, username, password string) string {
username, password = cli.PromptCredentials(username, password)
sessConn, sessionIf := acdClient.NewSessionClientOrDie()
defer io.Close(sessConn)
sessionRequest := sessionpkg.SessionCreateRequest{
defer util.Close(sessConn)
sessionRequest := session.SessionCreateRequest{
Username: username,
Password: password,
}

View File

@@ -1,31 +0,0 @@
package commands
import (
"testing"
"github.com/dgrijalva/jwt-go/v4"
"github.com/stretchr/testify/assert"
)
//
func Test_userDisplayName_email(t *testing.T) {
claims := jwt.MapClaims{"iss": "qux", "sub": "foo", "email": "firstname.lastname@example.com", "groups": []string{"baz"}}
actualName := userDisplayName(claims)
expectedName := "firstname.lastname@example.com"
assert.Equal(t, expectedName, actualName)
}
func Test_userDisplayName_name(t *testing.T) {
claims := jwt.MapClaims{"iss": "qux", "sub": "foo", "name": "Firstname Lastname", "groups": []string{"baz"}}
actualName := userDisplayName(claims)
expectedName := "Firstname Lastname"
assert.Equal(t, expectedName, actualName)
}
func Test_userDisplayName_sub(t *testing.T) {
claims := jwt.MapClaims{"iss": "qux", "sub": "foo", "groups": []string{"baz"}}
actualName := userDisplayName(claims)
expectedName := "foo"
assert.Equal(t, expectedName, actualName)
}

View File

@@ -1,50 +0,0 @@
package commands
import (
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/localconfig"
)
// NewLogoutCommand returns a new instance of `argocd logout` command
func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "logout CONTEXT",
Short: "Log out from Argo CD",
Long: "Log out from Argo CD",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
context := args[0]
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
errors.CheckError(err)
if localCfg == nil {
log.Fatalf("Nothing to logout from")
}
ok := localCfg.RemoveToken(context)
if !ok {
log.Fatalf("Context %s does not exist", context)
}
err = localconfig.ValidateLocalConfig(*localCfg)
if err != nil {
log.Fatalf("Error in logging out: %s", err)
}
err = localconfig.WriteLocalConfig(*localCfg, globalClientOpts.ConfigPath)
errors.CheckError(err)
fmt.Printf("Logged out from '%s'\n", context)
},
}
return command
}

View File

@@ -1,41 +0,0 @@
package commands
import (
"io/ioutil"
"os"
"testing"
"github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/stretchr/testify/assert"
"github.com/argoproj/argo-cd/v2/util/localconfig"
)
func TestLogout(t *testing.T) {
// Write the test config file
err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
command := NewLogoutCommand(&apiclient.ClientOptions{ConfigPath: testConfigFilePath})
command.Run(nil, []string{"localhost:8080"})
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "localhost:8080"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd1.example.com:443", Server: "argocd1.example.com:443", User: "argocd1.example.com:443"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd2.example.com:443", Server: "argocd2.example.com:443", User: "argocd2.example.com:443"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
// Write the file again so that no conflicts are made in git
err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
}

View File

@@ -1,37 +1,48 @@
package commands
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/git"
"github.com/argoproj/argo-cd/v2/util/gpg"
argoio "github.com/argoproj/argo-cd/v2/util/io"
"strings"
"context"
"fmt"
"text/tabwriter"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/project"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/git"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/apis/meta/v1"
)
type policyOpts struct {
action string
permission string
object string
type projectOpts struct {
description string
destinations []string
sources []string
}
func (opts *projectOpts) GetDestinations() []v1alpha1.ApplicationDestination {
destinations := make([]v1alpha1.ApplicationDestination, 0)
for _, destStr := range opts.destinations {
parts := strings.Split(destStr, ",")
if len(parts) != 2 {
log.Fatalf("Expected destination of the form: server,namespace. Received: %s", destStr)
} else {
destinations = append(destinations, v1alpha1.ApplicationDestination{
Server: parts[0],
Namespace: parts[1],
})
}
}
return destinations
}
// NewProjectCommand returns a new instance of an `argocd proj` command
@@ -44,74 +55,61 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
os.Exit(1)
},
}
command.AddCommand(NewProjectRoleCommand(clientOpts))
command.AddCommand(NewProjectCreateCommand(clientOpts))
command.AddCommand(NewProjectGetCommand(clientOpts))
command.AddCommand(NewProjectDeleteCommand(clientOpts))
command.AddCommand(NewProjectListCommand(clientOpts))
command.AddCommand(NewProjectSetCommand(clientOpts))
command.AddCommand(NewProjectEditCommand(clientOpts))
command.AddCommand(NewProjectAddSignatureKeyCommand(clientOpts))
command.AddCommand(NewProjectRemoveSignatureKeyCommand(clientOpts))
command.AddCommand(NewProjectAddDestinationCommand(clientOpts))
command.AddCommand(NewProjectRemoveDestinationCommand(clientOpts))
command.AddCommand(NewProjectAddSourceCommand(clientOpts))
command.AddCommand(NewProjectRemoveSourceCommand(clientOpts))
command.AddCommand(NewProjectAllowClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectAllowNamespaceResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyNamespaceResourceCommand(clientOpts))
command.AddCommand(NewProjectWindowsCommand(clientOpts))
command.AddCommand(NewProjectAddOrphanedIgnoreCommand(clientOpts))
command.AddCommand(NewProjectRemoveOrphanedIgnoreCommand(clientOpts))
return command
}
func addPolicyFlags(command *cobra.Command, opts *policyOpts) {
command.Flags().StringVarP(&opts.action, "action", "a", "", "Action to grant/deny permission on (e.g. get, create, list, update, delete)")
command.Flags().StringVarP(&opts.permission, "permission", "p", "allow", "Whether to allow or deny access to object with the action. This can only be 'allow' or 'deny'")
command.Flags().StringVarP(&opts.object, "object", "o", "", "Object within the project to grant/deny access. Use '*' for a wildcard. Will want access to '<project>/<object>'")
}
func humanizeTimestamp(epoch int64) string {
ts := time.Unix(epoch, 0)
return fmt.Sprintf("%s (%s)", ts.Format(time.RFC3339), humanize.Time(ts))
func addProjFlags(command *cobra.Command, opts *projectOpts) {
command.Flags().StringVarP(&opts.description, "description", "", "desc", "Project description")
command.Flags().StringArrayVarP(&opts.destinations, "dest", "d", []string{},
"Allowed deployment destination. Includes comma separated server url and namespace (e.g. https://192.168.99.100:8443,default")
command.Flags().StringArrayVarP(&opts.sources, "src", "s", []string{}, "Allowed deployment source repository URL.")
}
// NewProjectCreateCommand returns a new instance of an `argocd proj create` command
func NewProjectCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts cmdutil.ProjectOpts
fileURL string
upsert bool
opts projectOpts
)
var command = &cobra.Command{
Use: "create PROJECT",
Short: "Create a project",
Run: func(c *cobra.Command, args []string) {
proj, err := cmdutil.ConstructAppProj(fileURL, args, opts, c)
errors.CheckError(err)
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
proj := v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{Name: projName},
Spec: v1alpha1.AppProjectSpec{
Description: opts.description,
Destinations: opts.GetDestinations(),
SourceRepos: opts.sources,
},
}
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
_, err = projIf.Create(context.Background(), &projectpkg.ProjectCreateRequest{Project: proj, Upsert: upsert})
defer util.Close(conn)
_, err := projIf.Create(context.Background(), &project.ProjectCreateRequest{Project: &proj})
errors.CheckError(err)
},
}
command.Flags().BoolVar(&upsert, "upsert", false, "Allows to override a project with the same name even if supplied project spec is different from existing spec")
command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the project")
err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"})
if err != nil {
log.Fatal(err)
}
cmdutil.AddProjFlags(command, &opts)
addProjFlags(command, &opts)
return command
}
// NewProjectSetCommand returns a new instance of an `argocd proj set` command
func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts cmdutil.ProjectOpts
opts projectOpts
)
var command = &cobra.Command{
Use: "set PROJECT",
@@ -123,113 +121,41 @@ func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
if visited := cmdutil.SetProjSpecOptions(c.Flags(), &proj.Spec, &opts); visited == 0 {
visited := 0
c.Flags().Visit(func(f *pflag.Flag) {
visited++
switch f.Name {
case "description":
proj.Spec.Description = opts.description
case "dest":
proj.Spec.Destinations = opts.GetDestinations()
case "src":
proj.Spec.SourceRepos = opts.sources
}
})
if visited == 0 {
log.Error("Please set at least one option to update")
c.HelpFunc()(c, args)
os.Exit(1)
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
cmdutil.AddProjFlags(command, &opts)
return command
}
// NewProjectAddSignatureKeyCommand returns a new instance of an `argocd proj add-signature-key` command
func NewProjectAddSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add-signature-key PROJECT KEY-ID",
Short: "Add GnuPG signature key to project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
signatureKey := args[1]
if !gpg.IsShortKeyID(signatureKey) && !gpg.IsLongKeyID(signatureKey) {
log.Fatalf("%s is not a valid GnuPG key ID", signatureKey)
}
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for _, key := range proj.Spec.SignatureKeys {
if key.KeyID == signatureKey {
log.Fatal("Specified signature key is already defined in project")
}
}
proj.Spec.SignatureKeys = append(proj.Spec.SignatureKeys, v1alpha1.SignatureKey{KeyID: signatureKey})
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectRemoveSignatureKeyCommand returns a new instance of an `argocd proj remove-signature-key` command
func NewProjectRemoveSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "remove-signature-key PROJECT KEY-ID",
Short: "Remove GnuPG signature key from project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
signatureKey := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
index := -1
for i, key := range proj.Spec.SignatureKeys {
if key.KeyID == signatureKey {
index = i
break
}
}
if index == -1 {
log.Fatal("Specified signature key is not configured for project")
} else {
proj.Spec.SignatureKeys = append(proj.Spec.SignatureKeys[:index], proj.Spec.SignatureKeys[index+1:]...)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
addProjFlags(command, &opts)
return command
}
// NewProjectAddDestinationCommand returns a new instance of an `argocd proj add-destination` command
func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var nameInsteadServer bool
buildApplicationDestination := func(destination string, namespace string, nameInsteadServer bool) v1alpha1.ApplicationDestination {
if nameInsteadServer {
return v1alpha1.ApplicationDestination{Name: destination, Namespace: namespace}
}
return v1alpha1.ApplicationDestination{Server: destination, Namespace: namespace}
}
var command = &cobra.Command{
Use: "add-destination PROJECT SERVER/NAME NAMESPACE",
Use: "add-destination PROJECT SERVER NAMESPACE",
Short: "Add project destination",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
@@ -237,27 +163,24 @@ func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *co
os.Exit(1)
}
projName := args[0]
server := args[1]
namespace := args[2]
destination := buildApplicationDestination(args[1], namespace, nameInsteadServer)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
for _, dest := range proj.Spec.Destinations {
dstServerExist := destination.Server != "" && dest.Server == destination.Server
dstNameExist := destination.Name != "" && dest.Name == destination.Name
if dest.Namespace == namespace && (dstServerExist || dstNameExist) {
if dest.Namespace == namespace && dest.Server == server {
log.Fatal("Specified destination is already defined in project")
}
}
proj.Spec.Destinations = append(proj.Spec.Destinations, destination)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
proj.Spec.Destinations = append(proj.Spec.Destinations, v1alpha1.ApplicationDestination{Server: server, Namespace: namespace})
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
command.Flags().BoolVar(&nameInsteadServer, "name", false, "Use name as destination instead server")
return command
}
@@ -275,9 +198,9 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions)
server := args[1]
namespace := args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
index := -1
@@ -291,7 +214,7 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions)
log.Fatal("Specified destination does not exist in project")
} else {
proj.Spec.Destinations = append(proj.Spec.Destinations[:index], proj.Spec.Destinations[index+1:]...)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
@@ -300,96 +223,6 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions)
return command
}
// NewProjectAddOrphanedIgnoreCommand returns a new instance of an `argocd proj add-orphaned-ignore` command
func NewProjectAddOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
name string
)
var command = &cobra.Command{
Use: "add-orphaned-ignore PROJECT GROUP KIND",
Short: "Add a resource to orphaned ignore list",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
group := args[1]
kind := args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
if proj.Spec.OrphanedResources == nil {
settings := v1alpha1.OrphanedResourcesMonitorSettings{}
settings.Ignore = []v1alpha1.OrphanedResourceKey{{Group: group, Kind: kind, Name: name}}
proj.Spec.OrphanedResources = &settings
} else {
for _, ignore := range proj.Spec.OrphanedResources.Ignore {
if ignore.Group == group && ignore.Kind == kind && ignore.Name == name {
log.Fatal("Specified resource is already defined in the orphaned ignore list of project")
return
}
}
proj.Spec.OrphanedResources.Ignore = append(proj.Spec.OrphanedResources.Ignore, v1alpha1.OrphanedResourceKey{Group: group, Kind: kind, Name: name})
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
command.Flags().StringVar(&name, "name", "", "Resource name pattern")
return command
}
// NewProjectRemoveOrphanedIgnoreCommand returns a new instance of an `argocd proj remove-orphaned-ignore` command
func NewProjectRemoveOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
name string
)
var command = &cobra.Command{
Use: "remove-orphaned-ignore PROJECT GROUP KIND NAME",
Short: "Remove a resource from orphaned ignore list",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
group := args[1]
kind := args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
if proj.Spec.OrphanedResources == nil {
log.Fatal("Specified resource does not exist in the orphaned ignore list of project")
return
}
index := -1
for i, ignore := range proj.Spec.OrphanedResources.Ignore {
if ignore.Group == group && ignore.Kind == kind && ignore.Name == name {
index = i
break
}
}
if index == -1 {
log.Fatal("Specified resource does not exist in the orphaned ignore of project")
} else {
proj.Spec.OrphanedResources.Ignore = append(proj.Spec.OrphanedResources.Ignore[:index], proj.Spec.OrphanedResources.Ignore[index+1:]...)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
command.Flags().StringVar(&name, "name", "", "Resource name pattern")
return command
}
// NewProjectAddSourceCommand returns a new instance of an `argocd proj add-src` command
func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
@@ -403,141 +236,24 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
projName := args[0]
url := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
for _, item := range proj.Spec.SourceRepos {
if item == "*" && item == url {
fmt.Printf("Source repository '*' already allowed in project\n")
return
}
if git.SameURL(item, url) {
fmt.Printf("Source repository '%s' already allowed in project\n", item)
return
if item == git.NormalizeGitURL(url) {
log.Fatal("Specified source repository is already defined in project")
}
}
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos, url)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
func modifyResourcesList(list *[]metav1.GroupKind, add bool, listDesc string, group string, kind string) bool {
if add {
for _, item := range *list {
if item.Group == group && item.Kind == kind {
fmt.Printf("Group '%s' and kind '%s' already present in %s resources\n", group, kind, listDesc)
return false
}
}
fmt.Printf("Group '%s' and kind '%s' is added to %s resources\n", group, kind, listDesc)
*list = append(*list, v1.GroupKind{Group: group, Kind: kind})
return true
} else {
index := -1
for i, item := range *list {
if item.Group == group && item.Kind == kind {
index = i
break
}
}
if index == -1 {
fmt.Printf("Group '%s' and kind '%s' not in %s resources\n", group, kind, listDesc)
return false
}
*list = append((*list)[:index], (*list)[index+1:]...)
fmt.Printf("Group '%s' and kind '%s' is removed from %s resources\n", group, kind, listDesc)
return true
}
}
func modifyResourceListCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, allow bool, namespacedList bool) *cobra.Command {
var (
listType string
defaultList string
)
if namespacedList {
defaultList = "deny"
} else {
defaultList = "allow"
}
var command = &cobra.Command{
Use: cmdUse,
Short: cmdDesc,
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, group, kind := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
var list, allowList, denyList *[]metav1.GroupKind
var listAction, listDesc string
var add bool
if namespacedList {
allowList, denyList = &proj.Spec.NamespaceResourceWhitelist, &proj.Spec.NamespaceResourceBlacklist
listDesc = "namespaced"
} else {
allowList, denyList = &proj.Spec.ClusterResourceWhitelist, &proj.Spec.ClusterResourceBlacklist
listDesc = "cluster"
}
if (listType == "allow") || (listType == "white") {
list = allowList
listAction = "allowed"
add = allow
} else {
list = denyList
listAction = "denied"
add = !allow
}
if modifyResourcesList(list, add, listAction+" "+listDesc, group, kind) {
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
command.Flags().StringVarP(&listType, "list", "l", defaultList, "Use deny list or allow list. This can only be 'allow' or 'deny'")
return command
}
// NewProjectAllowNamespaceResourceCommand returns a new instance of an `deny-cluster-resources` command
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-namespace-resource PROJECT GROUP KIND"
desc := "Removes a namespaced API resource from the deny list or add a namespaced API resource to the allow list"
return modifyResourceListCmd(use, desc, clientOpts, true, true)
}
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-namespace-resource PROJECT GROUP KIND"
desc := "Adds a namespaced API resource to the deny list or removes a namespaced API resource from the allow list"
return modifyResourceListCmd(use, desc, clientOpts, false, true)
}
// NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-cluster-resource PROJECT GROUP KIND"
desc := "Removes a cluster-scoped API resource from the allow list and adds it to deny list"
return modifyResourceListCmd(use, desc, clientOpts, false, false)
}
// NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-cluster-resource PROJECT GROUP KIND"
desc := "Adds a cluster-scoped API resource to the allow list and removes it from deny list"
return modifyResourceListCmd(use, desc, clientOpts, true, false)
}
// NewProjectRemoveSourceCommand returns a new instance of an `argocd proj remove-src` command
func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
@@ -551,23 +267,23 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr
projName := args[0]
url := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
index := -1
for i, item := range proj.Spec.SourceRepos {
if item == url {
if item == git.NormalizeGitURL(url) {
index = i
break
}
}
if index == -1 {
fmt.Printf("Source repository '%s' does not exist in project\n", url)
log.Fatal("Specified source repository does not exist in project")
} else {
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos[:index], proj.Spec.SourceRepos[index+1:]...)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
@@ -587,9 +303,9 @@ func NewProjectDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
os.Exit(1)
}
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
defer util.Close(conn)
for _, name := range args {
_, err := projIf.Delete(context.Background(), &projectpkg.ProjectQuery{Name: name})
_, err := projIf.Delete(context.Background(), &project.ProjectQuery{Name: name})
errors.CheckError(err)
}
},
@@ -597,240 +313,22 @@ func NewProjectDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
return command
}
// Print list of project names
func printProjectNames(projects []v1alpha1.AppProject) {
for _, p := range projects {
fmt.Println(p.Name)
}
}
// Print table of project info
func printProjectTable(projects []v1alpha1.AppProject) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\tSIGNATURE-KEYS\tORPHANED-RESOURCES\n")
for _, p := range projects {
printProjectLine(w, &p)
}
_ = w.Flush()
}
// NewProjectListCommand returns a new instance of an `argocd proj list` command
func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List projects",
Run: func(c *cobra.Command, args []string) {
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
projects, err := projIf.List(context.Background(), &projectpkg.ProjectQuery{})
defer util.Close(conn)
projects, err := projIf.List(context.Background(), &project.ProjectQuery{})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(projects.Items, output, false)
errors.CheckError(err)
case "name":
printProjectNames(projects.Items)
case "wide", "":
printProjectTable(projects.Items)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\n")
for _, p := range projects.Items {
fmt.Fprintf(w, "%s\t%s\t%v\n", p.Name, p.Spec.Description, p.Spec.Destinations)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|name")
return command
}
func formatOrphanedResources(p *v1alpha1.AppProject) string {
if p.Spec.OrphanedResources == nil {
return "disabled"
}
details := fmt.Sprintf("warn=%v", p.Spec.OrphanedResources.IsWarn())
if len(p.Spec.OrphanedResources.Ignore) > 0 {
details = fmt.Sprintf("%s, ignored %d", details, len(p.Spec.OrphanedResources.Ignore))
}
return fmt.Sprintf("enabled (%s)", details)
}
func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
var destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys string
switch len(p.Spec.Destinations) {
case 0:
destinations = "<none>"
case 1:
destinations = fmt.Sprintf("%s,%s", p.Spec.Destinations[0].Server, p.Spec.Destinations[0].Namespace)
default:
destinations = fmt.Sprintf("%d destinations", len(p.Spec.Destinations))
}
switch len(p.Spec.SourceRepos) {
case 0:
sourceRepos = "<none>"
case 1:
sourceRepos = p.Spec.SourceRepos[0]
default:
sourceRepos = fmt.Sprintf("%d repos", len(p.Spec.SourceRepos))
}
switch len(p.Spec.ClusterResourceWhitelist) {
case 0:
clusterWhitelist = "<none>"
case 1:
clusterWhitelist = fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[0].Group, p.Spec.ClusterResourceWhitelist[0].Kind)
default:
clusterWhitelist = fmt.Sprintf("%d resources", len(p.Spec.ClusterResourceWhitelist))
}
switch len(p.Spec.NamespaceResourceBlacklist) {
case 0:
namespaceBlacklist = "<none>"
default:
namespaceBlacklist = fmt.Sprintf("%d resources", len(p.Spec.NamespaceResourceBlacklist))
}
switch len(p.Spec.SignatureKeys) {
case 0:
signatureKeys = "<none>"
default:
signatureKeys = fmt.Sprintf("%d key(s)", len(p.Spec.SignatureKeys))
}
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys, formatOrphanedResources(p))
}
func printProject(p *v1alpha1.AppProject) {
const printProjFmtStr = "%-29s%s\n"
fmt.Printf(printProjFmtStr, "Name:", p.Name)
fmt.Printf(printProjFmtStr, "Description:", p.Spec.Description)
// Print destinations
dest0 := "<none>"
if len(p.Spec.Destinations) > 0 {
dest0 = fmt.Sprintf("%s,%s", p.Spec.Destinations[0].Server, p.Spec.Destinations[0].Namespace)
}
fmt.Printf(printProjFmtStr, "Destinations:", dest0)
for i := 1; i < len(p.Spec.Destinations); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s,%s", p.Spec.Destinations[i].Server, p.Spec.Destinations[i].Namespace))
}
// Print sources
src0 := "<none>"
if len(p.Spec.SourceRepos) > 0 {
src0 = p.Spec.SourceRepos[0]
}
fmt.Printf(printProjFmtStr, "Repositories:", src0)
for i := 1; i < len(p.Spec.SourceRepos); i++ {
fmt.Printf(printProjFmtStr, "", p.Spec.SourceRepos[i])
}
// Print allowed cluster resources
cwl0 := "<none>"
if len(p.Spec.ClusterResourceWhitelist) > 0 {
cwl0 = fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[0].Group, p.Spec.ClusterResourceWhitelist[0].Kind)
}
fmt.Printf(printProjFmtStr, "Allowed Cluster Resources:", cwl0)
for i := 1; i < len(p.Spec.ClusterResourceWhitelist); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[i].Group, p.Spec.ClusterResourceWhitelist[i].Kind))
}
// Print denied namespaced resources
rbl0 := "<none>"
if len(p.Spec.NamespaceResourceBlacklist) > 0 {
rbl0 = fmt.Sprintf("%s/%s", p.Spec.NamespaceResourceBlacklist[0].Group, p.Spec.NamespaceResourceBlacklist[0].Kind)
}
fmt.Printf(printProjFmtStr, "Denied Namespaced Resources:", rbl0)
for i := 1; i < len(p.Spec.NamespaceResourceBlacklist); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s/%s", p.Spec.NamespaceResourceBlacklist[i].Group, p.Spec.NamespaceResourceBlacklist[i].Kind))
}
// Print required signature keys
signatureKeysStr := "<none>"
if len(p.Spec.SignatureKeys) > 0 {
kids := make([]string, 0)
for _, key := range p.Spec.SignatureKeys {
kids = append(kids, key.KeyID)
}
signatureKeysStr = strings.Join(kids, ", ")
}
fmt.Printf(printProjFmtStr, "Signature keys:", signatureKeysStr)
fmt.Printf(printProjFmtStr, "Orphaned Resources:", formatOrphanedResources(p))
}
// NewProjectGetCommand returns a new instance of an `argocd proj get` command
func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "get PROJECT",
Short: "Get project details",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
p, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResource(p, output)
errors.CheckError(err)
case "wide", "":
printProject(p)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
return command
}
func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "edit PROJECT",
Short: "Edit project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
projData, err := json.Marshal(proj.Spec)
errors.CheckError(err)
projData, err = yaml.JSONToYAML(projData)
errors.CheckError(err)
cli.InteractiveEdit(fmt.Sprintf("%s-*-edit.yaml", projName), projData, func(input []byte) error {
input, err = yaml.YAMLToJSON(input)
if err != nil {
return err
}
updatedSpec := v1alpha1.AppProjectSpec{}
err = json.Unmarshal(input, &updatedSpec)
if err != nil {
return err
}
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
if err != nil {
return err
}
proj.Spec = updatedSpec
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
if err != nil {
return fmt.Errorf("Failed to update project:\n%v", err)
}
return err
})
_ = w.Flush()
},
}
return command

View File

@@ -1,505 +0,0 @@
package commands
import (
"context"
"fmt"
"os"
"strconv"
"text/tabwriter"
"time"
timeutil "github.com/argoproj/pkg/time"
jwtgo "github.com/dgrijalva/jwt-go/v4"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/jwt"
)
const (
policyTemplate = "p, proj:%s:%s, applications, %s, %s/%s, %s"
)
// NewProjectRoleCommand returns a new instance of the `argocd proj role` command
func NewProjectRoleCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
roleCommand := &cobra.Command{
Use: "role",
Short: "Manage a project's roles",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
roleCommand.AddCommand(NewProjectRoleListCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleGetCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleCreateCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleDeleteCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleCreateTokenCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleListTokensCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleDeleteTokenCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleAddPolicyCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleRemovePolicyCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleAddGroupCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleRemoveGroupCommand(clientOpts))
return roleCommand
}
// NewProjectRoleAddPolicyCommand returns a new instance of an `argocd proj role add-policy` command
func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts policyOpts
)
var command = &cobra.Command{
Use: "add-policy PROJECT ROLE-NAME",
Short: "Add a policy to a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
role, roleIndex, err := proj.GetRoleByName(roleName)
errors.CheckError(err)
policy := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission)
proj.Spec.Roles[roleIndex].Policies = append(role.Policies, policy)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
addPolicyFlags(command, &opts)
return command
}
// NewProjectRoleRemovePolicyCommand returns a new instance of an `argocd proj role remove-policy` command
func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts policyOpts
)
var command = &cobra.Command{
Use: "remove-policy PROJECT ROLE-NAME",
Short: "Remove a policy from a role within a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
role, roleIndex, err := proj.GetRoleByName(roleName)
errors.CheckError(err)
policyToRemove := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission)
duplicateIndex := -1
for i, policy := range role.Policies {
if policy == policyToRemove {
duplicateIndex = i
break
}
}
if duplicateIndex < 0 {
return
}
role.Policies[duplicateIndex] = role.Policies[len(role.Policies)-1]
proj.Spec.Roles[roleIndex].Policies = role.Policies[:len(role.Policies)-1]
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
addPolicyFlags(command, &opts)
return command
}
// NewProjectRoleCreateCommand returns a new instance of an `argocd proj role create` command
func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
description string
)
var command = &cobra.Command{
Use: "create PROJECT ROLE-NAME",
Short: "Create a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
_, _, err = proj.GetRoleByName(roleName)
if err == nil {
fmt.Printf("Role '%s' already exists\n", roleName)
return
}
proj.Spec.Roles = append(proj.Spec.Roles, v1alpha1.ProjectRole{Name: roleName, Description: description})
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Role '%s' created\n", roleName)
},
}
command.Flags().StringVarP(&description, "description", "", "", "Project description")
return command
}
// NewProjectRoleDeleteCommand returns a new instance of an `argocd proj role delete` command
func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ROLE-NAME",
Short: "Delete a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
_, index, err := proj.GetRoleByName(roleName)
if err != nil {
fmt.Printf("Role '%s' does not exist in project\n", roleName)
return
}
proj.Spec.Roles[index] = proj.Spec.Roles[len(proj.Spec.Roles)-1]
proj.Spec.Roles = proj.Spec.Roles[:len(proj.Spec.Roles)-1]
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Role '%s' deleted\n", roleName)
},
}
return command
}
func tokenTimeToString(t int64) string {
tokenTimeToString := "Never"
if t > 0 {
tokenTimeToString = time.Unix(t, 0).Format(time.RFC3339)
}
return tokenTimeToString
}
// NewProjectRoleCreateTokenCommand returns a new instance of an `argocd proj role create-token` command
func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
expiresIn string
outputTokenOnly bool
tokenID string
)
var command = &cobra.Command{
Use: "create-token PROJECT ROLE-NAME",
Short: "Create a project token",
Aliases: []string{"token-create"},
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
if expiresIn == "" {
expiresIn = "0s"
}
duration, err := timeutil.ParseDuration(expiresIn)
errors.CheckError(err)
tokenResponse, err := projIf.CreateToken(context.Background(), &projectpkg.ProjectTokenCreateRequest{
Project: projName,
Role: roleName,
ExpiresIn: int64(duration.Seconds()),
Id: tokenID,
})
errors.CheckError(err)
token, err := jwtgo.Parse(tokenResponse.Token, nil)
if token == nil {
err = fmt.Errorf("received malformed token %v", err)
errors.CheckError(err)
return
}
claims := token.Claims.(jwtgo.MapClaims)
issuedAt, _ := jwt.IssuedAt(claims)
expiresAt := int64(jwt.Float64Field(claims, "exp"))
id := jwt.StringField(claims, "jti")
subject := jwt.StringField(claims, "sub")
if !outputTokenOnly {
fmt.Printf("Create token succeeded for %s.\n", subject)
fmt.Printf(" ID: %s\n Issued At: %s\n Expires At: %s\n",
id, tokenTimeToString(issuedAt), tokenTimeToString(expiresAt),
)
fmt.Println(" Token: " + tokenResponse.Token)
} else {
fmt.Println(tokenResponse.Token)
}
},
}
command.Flags().StringVarP(&expiresIn, "expires-in", "e", "",
"Duration before the token will expire, e.g. \"12h\", \"7d\". (Default: No expiration)",
)
command.Flags().StringVarP(&tokenID, "id", "i", "", "Token unique identifier. (Default: Random UUID)")
command.Flags().BoolVarP(&outputTokenOnly, "token-only", "t", false, "Output token only - for use in scripts.")
return command
}
func NewProjectRoleListTokensCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
useUnixTime bool
)
var command = &cobra.Command{
Use: "list-tokens PROJECT ROLE-NAME",
Short: "List tokens for a given role.",
Aliases: []string{"list-token", "token-list"},
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
role, _, err := proj.GetRoleByName(roleName)
errors.CheckError(err)
if len(role.JWTTokens) == 0 {
fmt.Printf("No tokens for %s.%s\n", projName, roleName)
return
}
writer := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0)
_, err = fmt.Fprintf(writer, "ID\tISSUED AT\tEXPIRES AT\n")
errors.CheckError(err)
tokenRowFormat := "%s\t%v\t%v\n"
for _, token := range role.JWTTokens {
if useUnixTime {
_, _ = fmt.Fprintf(writer, tokenRowFormat, token.ID, token.IssuedAt, token.ExpiresAt)
} else {
_, _ = fmt.Fprintf(writer, tokenRowFormat, token.ID, tokenTimeToString(token.IssuedAt), tokenTimeToString(token.ExpiresAt))
}
}
err = writer.Flush()
errors.CheckError(err)
},
}
command.Flags().BoolVarP(&useUnixTime, "unixtime", "u", false,
"Print timestamps as Unix time instead of converting. Useful for piping into delete-token.",
)
return command
}
// NewProjectRoleDeleteTokenCommand returns a new instance of an `argocd proj role delete-token` command
func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
Short: "Delete a project token",
Aliases: []string{"token-delete", "remove-token"},
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
issuedAt, err := strconv.ParseInt(args[2], 10, 64)
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
_, err = projIf.DeleteToken(context.Background(), &projectpkg.ProjectTokenDeleteRequest{Project: projName, Role: roleName, Iat: issuedAt})
errors.CheckError(err)
},
}
return command
}
// Print list of project role names
func printProjectRoleListName(roles []v1alpha1.ProjectRole) {
for _, role := range roles {
fmt.Println(role.Name)
}
}
// Print table of project roles
func printProjectRoleListTable(roles []v1alpha1.ProjectRole) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ROLE-NAME\tDESCRIPTION\n")
for _, role := range roles {
fmt.Fprintf(w, "%s\t%s\n", role.Name, role.Description)
}
_ = w.Flush()
}
// NewProjectRoleListCommand returns a new instance of an `argocd proj roles list` command
func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List all the roles in a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
project, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
switch output {
case "json", "yaml":
err := PrintResourceList(project.Spec.Roles, output, false)
errors.CheckError(err)
case "name":
printProjectRoleListName(project.Spec.Roles)
case "wide", "":
printProjectRoleListTable(project.Spec.Roles)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|name")
return command
}
// NewProjectRoleGetCommand returns a new instance of an `argocd proj roles get` command
func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "get PROJECT ROLE-NAME",
Short: "Get the details of a specific role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
role, _, err := proj.GetRoleByName(roleName)
errors.CheckError(err)
printRoleFmtStr := "%-15s%s\n"
fmt.Printf(printRoleFmtStr, "Role Name:", roleName)
fmt.Printf(printRoleFmtStr, "Description:", role.Description)
fmt.Printf("Policies:\n")
fmt.Printf("%s\n", proj.ProjectPoliciesString())
fmt.Printf("JWT Tokens:\n")
// TODO(jessesuen): print groups
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ID\tISSUED-AT\tEXPIRES-AT\n")
for _, token := range proj.Status.JWTTokensByRole[roleName].Items {
expiresAt := "<none>"
if token.ExpiresAt > 0 {
expiresAt = humanizeTimestamp(token.ExpiresAt)
}
fmt.Fprintf(w, "%d\t%s\t%s\n", token.IssuedAt, humanizeTimestamp(token.IssuedAt), expiresAt)
}
_ = w.Flush()
},
}
return command
}
// NewProjectRoleAddGroupCommand returns a new instance of an `argocd proj role add-group` command
func NewProjectRoleAddGroupCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add-group PROJECT ROLE-NAME GROUP-CLAIM",
Short: "Add a group claim to a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, roleName, groupName := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
updated, err := proj.AddGroupToRole(roleName, groupName)
errors.CheckError(err)
if !updated {
fmt.Printf("Group '%s' already present in role '%s'\n", groupName, roleName)
return
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Group '%s' added to role '%s'\n", groupName, roleName)
},
}
return command
}
// NewProjectRoleRemoveGroupCommand returns a new instance of an `argocd proj role remove-group` command
func NewProjectRoleRemoveGroupCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "remove-group PROJECT ROLE-NAME GROUP-CLAIM",
Short: "Remove a group claim from a role within a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, roleName, groupName := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
updated, err := proj.RemoveGroupFromRole(roleName, groupName)
errors.CheckError(err)
if !updated {
fmt.Printf("Group '%s' not present in role '%s'\n", groupName, roleName)
return
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Group '%s' removed from role '%s'\n", groupName, roleName)
},
}
return command
}

View File

@@ -1,324 +0,0 @@
package commands
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"text/tabwriter"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
)
// NewProjectWindowsCommand returns a new instance of the `argocd proj windows` command
func NewProjectWindowsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
roleCommand := &cobra.Command{
Use: "windows",
Short: "Manage a project's sync windows",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
roleCommand.AddCommand(NewProjectWindowsDisableManualSyncCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsEnableManualSyncCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsAddWindowCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsDeleteCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsListCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsUpdateCommand(clientOpts))
return roleCommand
}
// NewProjectSyncWindowsDisableManualSyncCommand returns a new instance of an `argocd proj windows disable-manual-sync` command
func NewProjectWindowsDisableManualSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "disable-manual-sync PROJECT ID",
Short: "Disable manual sync for a sync window",
Long: "Disable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for i, window := range proj.Spec.SyncWindows {
if id == i {
window.ManualSync = false
}
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectWindowsEnableManualSyncCommand returns a new instance of an `argocd proj windows enable-manual-sync` command
func NewProjectWindowsEnableManualSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "enable-manual-sync PROJECT ID",
Short: "Enable manual sync for a sync window",
Long: "Enable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for i, window := range proj.Spec.SyncWindows {
if id == i {
window.ManualSync = true
}
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows add` command
func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
kind string
schedule string
duration string
applications []string
namespaces []string
clusters []string
manualSync bool
timeZone string
)
var command = &cobra.Command{
Use: "add PROJECT",
Short: "Add a sync window to a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
err = proj.Spec.AddWindow(kind, schedule, duration, applications, namespaces, clusters, manualSync, timeZone)
errors.CheckError(err)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
command.Flags().StringVarP(&kind, "kind", "k", "", "Sync window kind, either allow or deny")
command.Flags().StringVar(&schedule, "schedule", "", "Sync window schedule in cron format. (e.g. --schedule \"0 22 * * *\")")
command.Flags().StringVar(&duration, "duration", "", "Sync window duration. (e.g. --duration 1h)")
command.Flags().StringSliceVar(&applications, "applications", []string{}, "Applications that the schedule will be applied to. Comma separated, wildcards supported (e.g. --applications prod-\\*,website)")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "Namespaces that the schedule will be applied to. Comma separated, wildcards supported (e.g. --namespaces default,\\*-prod)")
command.Flags().StringSliceVar(&clusters, "clusters", []string{}, "Clusters that the schedule will be applied to. Comma separated, wildcards supported (e.g. --clusters prod,staging)")
command.Flags().BoolVar(&manualSync, "manual-sync", false, "Allow manual syncs for both deny and allow windows")
command.Flags().StringVar(&timeZone, "time-zone", "UTC", "Time zone of the sync window")
return command
}
// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows delete` command
func NewProjectWindowsDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ID",
Short: "Delete a sync window from a project. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
err = proj.Spec.DeleteWindow(id)
errors.CheckError(err)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectWindowsUpdateCommand returns a new instance of an `argocd proj windows update` command
func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
schedule string
duration string
applications []string
namespaces []string
clusters []string
timeZone string
)
var command = &cobra.Command{
Use: "update PROJECT ID",
Short: "Update a project sync window",
Long: "Update a project sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for i, window := range proj.Spec.SyncWindows {
if id == i {
err := window.Update(schedule, duration, applications, namespaces, clusters, timeZone)
if err != nil {
errors.CheckError(err)
}
}
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
command.Flags().StringVar(&schedule, "schedule", "", "Sync window schedule in cron format. (e.g. --schedule \"0 22 * * *\")")
command.Flags().StringVar(&duration, "duration", "", "Sync window duration. (e.g. --duration 1h)")
command.Flags().StringSliceVar(&applications, "applications", []string{}, "Applications that the schedule will be applied to. Comma separated, wildcards supported (e.g. --applications prod-\\*,website)")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "Namespaces that the schedule will be applied to. Comma separated, wildcards supported (e.g. --namespaces default,\\*-prod)")
command.Flags().StringSliceVar(&clusters, "clusters", []string{}, "Clusters that the schedule will be applied to. Comma separated, wildcards supported (e.g. --clusters prod,staging)")
command.Flags().StringVar(&timeZone, "time-zone", "UTC", "Time zone of the sync window. (e.g. --time-zone \"America/New_York\")")
return command
}
// NewProjectWindowsListCommand returns a new instance of an `argocd proj windows list` command
func NewProjectWindowsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List project sync windows",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer io.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(proj.Spec.SyncWindows, output, false)
errors.CheckError(err)
case "wide", "":
printSyncWindows(proj)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
return command
}
// Print table of sync window data
func printSyncWindows(proj *v1alpha1.AppProject) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var fmtStr string
headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC"}
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
fmt.Fprintf(w, fmtStr, headers...)
if proj.Spec.SyncWindows.HasWindows() {
for i, window := range proj.Spec.SyncWindows {
vals := []interface{}{
strconv.Itoa(i),
formatBoolOutput(window.Active()),
window.Kind,
window.Schedule,
window.Duration,
formatListOutput(window.Applications),
formatListOutput(window.Namespaces),
formatListOutput(window.Clusters),
formatManualOutput(window.ManualSync),
}
fmt.Fprintf(w, fmtStr, vals...)
}
}
_ = w.Flush()
}
func formatListOutput(list []string) string {
var o string
if len(list) == 0 {
o = "-"
} else {
o = strings.Join(list, ",")
}
return o
}
func formatBoolOutput(active bool) string {
var o string
if active {
o = "Active"
} else {
o = "Inactive"
}
return o
}
func formatManualOutput(active bool) string {
var o string
if active {
o = "Enabled"
} else {
o = "Disabled"
}
return o
}

View File

@@ -1,27 +1,23 @@
package commands
import (
"context"
"fmt"
"os"
"github.com/coreos/go-oidc"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
settingspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/settings"
"github.com/argoproj/argo-cd/v2/util/errors"
argoio "github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/localconfig"
"github.com/argoproj/argo-cd/v2/util/session"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/localconfig"
"github.com/argoproj/argo-cd/util/session"
)
// NewReloginCommand returns a new instance of `argocd relogin` command
func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
password string
ssoPort int
)
var command = &cobra.Command{
Use: "relogin",
@@ -40,38 +36,28 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
configCtx, err := localCfg.ResolveContext(localCfg.CurrentContext)
errors.CheckError(err)
parser := &jwt.Parser{
SkipClaimsValidation: true,
}
claims := jwt.StandardClaims{}
_, _, err = parser.ParseUnverified(configCtx.User.AuthToken, &claims)
errors.CheckError(err)
var tokenString string
var refreshToken string
clientOpts := argocdclient.ClientOptions{
ConfigPath: "",
ServerAddr: configCtx.Server.Server,
Insecure: configCtx.Server.Insecure,
ClientCertFile: globalClientOpts.ClientCertFile,
ClientCertKeyFile: globalClientOpts.ClientCertKeyFile,
GRPCWeb: globalClientOpts.GRPCWeb,
GRPCWebRootPath: globalClientOpts.GRPCWebRootPath,
PlainText: configCtx.Server.PlainText,
Headers: globalClientOpts.Headers,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
claims, err := configCtx.User.Claims()
errors.CheckError(err)
if claims.Issuer == session.SessionManagerClaimsIssuer {
fmt.Printf("Relogging in as '%s'\n", localconfig.GetUsername(claims.Subject))
tokenString = passwordLogin(acdClient, localconfig.GetUsername(claims.Subject), password)
clientOpts := argocdclient.ClientOptions{
ConfigPath: "",
ServerAddr: configCtx.Server.Server,
Insecure: configCtx.Server.Insecure,
PlainText: configCtx.Server.PlainText,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
fmt.Printf("Relogging in as '%s'\n", claims.Subject)
tokenString = passwordLogin(acdClient, claims.Subject, password)
} else {
fmt.Println("Reinitiating SSO login")
setConn, setIf := acdClient.NewSettingsClientOrDie()
defer argoio.Close(setConn)
ctx := context.Background()
httpClient, err := acdClient.HTTPClient()
errors.CheckError(err)
ctx = oidc.ClientContext(ctx, httpClient)
acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{})
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider)
tokenString, refreshToken = oauth2Login(configCtx.Server.Server, configCtx.Server.PlainText)
}
localCfg.UpsertUser(localconfig.User{
@@ -85,6 +71,5 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
},
}
command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application")
return command
}

View File

@@ -10,21 +10,20 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
repositorypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repository"
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/git"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/git"
)
// NewRepoCommand returns a new instance of an `argocd repo` command
func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "repo",
Short: "Manage repository connection parameters",
Short: "Manage git repository credentials",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
@@ -32,7 +31,6 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
command.AddCommand(NewRepoAddCommand(clientOpts))
command.AddCommand(NewRepoGetCommand(clientOpts))
command.AddCommand(NewRepoListCommand(clientOpts))
command.AddCommand(NewRepoRemoveCommand(clientOpts))
return command
@@ -41,164 +39,56 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
// NewRepoAddCommand returns a new instance of an `argocd repo add` command
func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
repoOpts cmdutil.RepoOptions
repo appsv1.Repository
upsert bool
sshPrivateKeyPath string
)
// For better readability and easier formatting
var repoAddExamples = ` # Add a Git repository via SSH using a private key for authentication, ignoring the server's host key:
argocd repo add git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa
# Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here
argocd repo add ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa
# Add a private Git repository via HTTPS using username/password and TLS client certificates:
argocd repo add https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key
# Add a private Git repository via HTTPS using username/password without verifying the server's TLS certificate
argocd repo add https://git.example.com/repos/repo --username git --password secret --insecure-skip-server-verification
# Add a public Helm repository named 'stable' via HTTPS
argocd repo add https://charts.helm.sh/stable --type helm --name stable
# Add a private Helm repository named 'stable' via HTTPS
argocd repo add https://charts.helm.sh/stable --type helm --name stable --username test --password test
# Add a private Helm OCI-based repository named 'stable' via HTTPS
argocd repo add helm-oci-registry.cn-zhangjiakou.cr.aliyuncs.com --type helm --name stable --enable-oci --username test --password test
# Add a private Git repository on GitHub.com via GitHub App
argocd repo add https://git.example.com/repos/repo --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem
# Add a private Git repository on GitHub Enterprise via GitHub App
argocd repo add https://ghe.example.com/repos/repo --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3
`
var command = &cobra.Command{
Use: "add REPOURL",
Short: "Add git repository connection parameters",
Example: repoAddExamples,
Use: "add REPO",
Short: "Add git repository credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
// Repository URL
repoOpts.Repo.Repo = args[0]
// Specifying ssh-private-key-path is only valid for SSH repositories
if repoOpts.SshPrivateKeyPath != "" {
if ok, _ := git.IsSSHURL(repoOpts.Repo.Repo); ok {
keyData, err := ioutil.ReadFile(repoOpts.SshPrivateKeyPath)
if err != nil {
log.Fatal(err)
}
repoOpts.Repo.SSHPrivateKey = string(keyData)
} else {
err := fmt.Errorf("--ssh-private-key-path is only supported for SSH repositories.")
errors.CheckError(err)
repo.Repo = args[0]
if sshPrivateKeyPath != "" {
keyData, err := ioutil.ReadFile(sshPrivateKeyPath)
if err != nil {
log.Fatal(err)
}
repo.SSHPrivateKey = string(keyData)
}
// tls-client-cert-path and tls-client-cert-key-key-path must always be
// specified together
if (repoOpts.TlsClientCertPath != "" && repoOpts.TlsClientCertKeyPath == "") || (repoOpts.TlsClientCertPath == "" && repoOpts.TlsClientCertKeyPath != "") {
err := fmt.Errorf("--tls-client-cert-path and --tls-client-cert-key-path must be specified together")
errors.CheckError(err)
}
// Specifying tls-client-cert-path is only valid for HTTPS repositories
if repoOpts.TlsClientCertPath != "" {
if git.IsHTTPSURL(repoOpts.Repo.Repo) {
tlsCertData, err := ioutil.ReadFile(repoOpts.TlsClientCertPath)
errors.CheckError(err)
tlsCertKey, err := ioutil.ReadFile(repoOpts.TlsClientCertKeyPath)
errors.CheckError(err)
repoOpts.Repo.TLSClientCertData = string(tlsCertData)
repoOpts.Repo.TLSClientCertKey = string(tlsCertKey)
} else {
err := fmt.Errorf("--tls-client-cert-path is only supported for HTTPS repositories")
errors.CheckError(err)
// First test the repo *without* username/password. This gives us a hint on whether this
// is a private repo.
// NOTE: it is important not to run git commands to test git credentials on the user's
// system since it may mess with their git credential store (e.g. osx keychain).
// See issue #315
err := git.TestRepo(repo.Repo, "", "", repo.SSHPrivateKey)
if err != nil {
if git.IsSSHURL(repo.Repo) {
// If we failed using git SSH credentials, then the repo is automatically bad
log.Fatal(err)
}
// If we can't test the repo, it's probably private. Prompt for credentials and
// let the server test it.
repo.Username, repo.Password = cli.PromptCredentials(repo.Username, repo.Password)
}
// Specifying github-app-private-key-path is only valid for HTTPS repositories
if repoOpts.GithubAppPrivateKeyPath != "" {
if git.IsHTTPSURL(repoOpts.Repo.Repo) {
githubAppPrivateKey, err := ioutil.ReadFile(repoOpts.GithubAppPrivateKeyPath)
errors.CheckError(err)
repoOpts.Repo.GithubAppPrivateKey = string(githubAppPrivateKey)
} else {
err := fmt.Errorf("--github-app-private-key-path is only supported for HTTPS repositories")
errors.CheckError(err)
}
}
// Set repository connection properties only when creating repository, not
// when creating repository credentials.
// InsecureIgnoreHostKey is deprecated and only here for backwards compat
repoOpts.Repo.InsecureIgnoreHostKey = repoOpts.InsecureIgnoreHostKey
repoOpts.Repo.Insecure = repoOpts.InsecureSkipServerVerification
repoOpts.Repo.EnableLFS = repoOpts.EnableLfs
repoOpts.Repo.EnableOCI = repoOpts.EnableOci
repoOpts.Repo.GithubAppId = repoOpts.GithubAppId
repoOpts.Repo.GithubAppInstallationId = repoOpts.GithubAppInstallationId
repoOpts.Repo.GitHubAppEnterpriseBaseURL = repoOpts.GitHubAppEnterpriseBaseURL
repoOpts.Repo.Proxy = repoOpts.Proxy
if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" {
errors.CheckError(fmt.Errorf("Must specify --name for repos of type 'helm'"))
}
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer io.Close(conn)
// If the user set a username, but didn't supply password via --password,
// then we prompt for it
if repoOpts.Repo.Username != "" && repoOpts.Repo.Password == "" {
repoOpts.Repo.Password = cli.PromptPassword(repoOpts.Repo.Password)
defer util.Close(conn)
repoCreateReq := repository.RepoCreateRequest{
Repo: &repo,
Upsert: upsert,
}
// We let the server check access to the repository before adding it. If
// it is a private repo, but we cannot access with with the credentials
// that were supplied, we bail out.
//
// Skip validation if we are just adding credentials template, chances
// are high that we do not have the given URL pointing to a valid Git
// repo anyway.
repoAccessReq := repositorypkg.RepoAccessQuery{
Repo: repoOpts.Repo.Repo,
Type: repoOpts.Repo.Type,
Name: repoOpts.Repo.Name,
Username: repoOpts.Repo.Username,
Password: repoOpts.Repo.Password,
SshPrivateKey: repoOpts.Repo.SSHPrivateKey,
TlsClientCertData: repoOpts.Repo.TLSClientCertData,
TlsClientCertKey: repoOpts.Repo.TLSClientCertKey,
Insecure: repoOpts.Repo.IsInsecure(),
EnableOci: repoOpts.Repo.EnableOCI,
GithubAppPrivateKey: repoOpts.Repo.GithubAppPrivateKey,
GithubAppID: repoOpts.Repo.GithubAppId,
GithubAppInstallationID: repoOpts.Repo.GithubAppInstallationId,
GithubAppEnterpriseBaseUrl: repoOpts.Repo.GitHubAppEnterpriseBaseURL,
Proxy: repoOpts.Proxy,
Project: repoOpts.Repo.Project,
}
_, err := repoIf.ValidateAccess(context.Background(), &repoAccessReq)
errors.CheckError(err)
repoCreateReq := repositorypkg.RepoCreateRequest{
Repo: &repoOpts.Repo,
Upsert: repoOpts.Upsert,
}
createdRepo, err := repoIf.Create(context.Background(), &repoCreateReq)
errors.CheckError(err)
fmt.Printf("Repository '%s' added\n", createdRepo.Repo)
fmt.Printf("repository '%s' added\n", createdRepo.Repo)
},
}
command.Flags().BoolVar(&repoOpts.Upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
cmdutil.AddRepoFlags(command, &repoOpts)
command.Flags().StringVar(&repo.Username, "username", "", "username to the repository")
command.Flags().StringVar(&repo.Password, "password", "", "password to the repository")
command.Flags().StringVar(&sshPrivateKeyPath, "sshPrivateKeyPath", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
return command
}
@@ -206,138 +96,40 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rm REPO",
Short: "Remove repository credentials",
Short: "Remove git repository credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer io.Close(conn)
defer util.Close(conn)
for _, repoURL := range args {
_, err := repoIf.Delete(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL})
_, err := repoIf.Delete(context.Background(), &repository.RepoQuery{Repo: repoURL})
errors.CheckError(err)
fmt.Printf("Repository '%s' removed\n", repoURL)
}
},
}
return command
}
// Print table of repo info
func printRepoTable(repos appsv1.Repositories) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "TYPE\tNAME\tREPO\tINSECURE\tOCI\tLFS\tCREDS\tSTATUS\tMESSAGE\tPROJECT\n")
for _, r := range repos {
var hasCreds string
if !r.HasCredentials() {
hasCreds = "false"
} else {
if r.InheritedCreds {
hasCreds = "inherited"
} else {
hasCreds = "true"
}
}
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%v\t%v\t%v\t%s\t%s\t%s\t%s\n", r.Type, r.Name, r.Repo, r.IsInsecure(), r.EnableOCI, r.EnableLFS, hasCreds, r.ConnectionState.Status, r.ConnectionState.Message, r.Project)
}
_ = w.Flush()
}
// Print list of repo urls or url patterns for repository credentials
func printRepoUrls(repos appsv1.Repositories) {
for _, r := range repos {
fmt.Println(r.Repo)
}
}
// NewRepoListCommand returns a new instance of an `argocd repo rm` command
func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
refresh string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured repositories",
Run: func(c *cobra.Command, args []string) {
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer io.Close(conn)
forceRefresh := false
switch refresh {
case "":
case "hard":
forceRefresh = true
default:
err := fmt.Errorf("--refresh must be one of: 'hard'")
errors.CheckError(err)
}
repos, err := repoIf.List(context.Background(), &repositorypkg.RepoQuery{ForceRefresh: forceRefresh})
defer util.Close(conn)
repos, err := repoIf.List(context.Background(), &repository.RepoQuery{})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(repos.Items, output, false)
errors.CheckError(err)
case "url":
printRepoUrls(repos.Items)
// wide is the default
case "wide", "":
printRepoTable(repos.Items)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "REPO\tUSER\tSTATUS\tMESSAGE\n")
for _, r := range repos.Items {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", r.Repo, r.Username, r.ConnectionState.Status, r.ConnectionState.Message)
}
_ = w.Flush()
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status")
return command
}
// NewRepoGetCommand returns a new instance of an `argocd repo rm` command
func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
refresh string
)
var command = &cobra.Command{
Use: "get",
Short: "Get a configured repository by URL",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
// Repository URL
repoURL := args[0]
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer io.Close(conn)
forceRefresh := false
switch refresh {
case "":
case "hard":
forceRefresh = true
default:
err := fmt.Errorf("--refresh must be one of: 'hard'")
errors.CheckError(err)
}
repo, err := repoIf.Get(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL, ForceRefresh: forceRefresh})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResource(repo, output)
errors.CheckError(err)
case "url":
fmt.Println(repo.Repo)
// wide is the default
case "wide", "":
printRepoTable(appsv1.Repositories{repo})
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status")
return command
}

View File

@@ -1,233 +0,0 @@
package commands
import (
"context"
"fmt"
"io/ioutil"
"os"
"text/tabwriter"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v2/common"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
repocredspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repocreds"
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/git"
"github.com/argoproj/argo-cd/v2/util/io"
)
// NewRepoCredsCommand returns a new instance of an `argocd repocreds` command
func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "repocreds",
Short: "Manage repository connection parameters",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
command.AddCommand(NewRepoCredsAddCommand(clientOpts))
command.AddCommand(NewRepoCredsListCommand(clientOpts))
command.AddCommand(NewRepoCredsRemoveCommand(clientOpts))
return command
}
// NewRepoCredsAddCommand returns a new instance of an `argocd repocreds add` command
func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
repo appsv1.RepoCreds
upsert bool
sshPrivateKeyPath string
tlsClientCertPath string
tlsClientCertKeyPath string
githubAppPrivateKeyPath string
)
// For better readability and easier formatting
var repocredsAddExamples = ` # Add credentials with user/pass authentication to use for all repositories under https://git.example.com/repos
argocd repocreds add https://git.example.com/repos/ --username git --password secret
# Add credentials with SSH private key authentication to use for all repositories under ssh://git@git.example.com/repos
argocd repocreds add ssh://git@git.example.com/repos/ --ssh-private-key-path ~/.ssh/id_rsa
# Add credentials with GitHub App authentication to use for all repositories under https://github.com/repos
argocd repocreds add https://github.com/repos/ --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem
# Add credentials with GitHub App authentication to use for all repositories under https://ghe.example.com/repos
argocd repocreds add https://ghe.example.com/repos/ --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3
# Add credentials with helm oci registry so that these oci registry urls do not need to be added as repos individually.
argocd repocreds add localhost:5000/myrepo --enable-oci --type helm
`
var command = &cobra.Command{
Use: "add REPOURL",
Short: "Add git repository connection parameters",
Example: repocredsAddExamples,
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
// Repository URL
repo.URL = args[0]
// Specifying ssh-private-key-path is only valid for SSH repositories
if sshPrivateKeyPath != "" {
if ok, _ := git.IsSSHURL(repo.URL); ok {
keyData, err := ioutil.ReadFile(sshPrivateKeyPath)
if err != nil {
log.Fatal(err)
}
repo.SSHPrivateKey = string(keyData)
} else {
err := fmt.Errorf("--ssh-private-key-path is only supported for SSH repositories.")
errors.CheckError(err)
}
}
// tls-client-cert-path and tls-client-cert-key-key-path must always be
// specified together
if (tlsClientCertPath != "" && tlsClientCertKeyPath == "") || (tlsClientCertPath == "" && tlsClientCertKeyPath != "") {
err := fmt.Errorf("--tls-client-cert-path and --tls-client-cert-key-path must be specified together")
errors.CheckError(err)
}
// Specifying tls-client-cert-path is only valid for HTTPS repositories
if tlsClientCertPath != "" {
if git.IsHTTPSURL(repo.URL) {
tlsCertData, err := ioutil.ReadFile(tlsClientCertPath)
errors.CheckError(err)
tlsCertKey, err := ioutil.ReadFile(tlsClientCertKeyPath)
errors.CheckError(err)
repo.TLSClientCertData = string(tlsCertData)
repo.TLSClientCertKey = string(tlsCertKey)
} else {
err := fmt.Errorf("--tls-client-cert-path is only supported for HTTPS repositories")
errors.CheckError(err)
}
}
// Specifying github-app-private-key-path is only valid for HTTPS repositories
if githubAppPrivateKeyPath != "" {
if git.IsHTTPSURL(repo.URL) {
githubAppPrivateKey, err := ioutil.ReadFile(githubAppPrivateKeyPath)
errors.CheckError(err)
repo.GithubAppPrivateKey = string(githubAppPrivateKey)
} else {
err := fmt.Errorf("--github-app-private-key-path is only supported for HTTPS repositories")
errors.CheckError(err)
}
}
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoCredsClientOrDie()
defer io.Close(conn)
// If the user set a username, but didn't supply password via --password,
// then we prompt for it
if repo.Username != "" && repo.Password == "" {
repo.Password = cli.PromptPassword(repo.Password)
}
repoCreateReq := repocredspkg.RepoCredsCreateRequest{
Creds: &repo,
Upsert: upsert,
}
createdRepo, err := repoIf.CreateRepositoryCredentials(context.Background(), &repoCreateReq)
errors.CheckError(err)
fmt.Printf("Repository credentials for '%s' added\n", createdRepo.URL)
},
}
command.Flags().StringVar(&repo.Username, "username", "", "username to the repository")
command.Flags().StringVar(&repo.Password, "password", "", "password to the repository")
command.Flags().StringVar(&sshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().StringVar(&tlsClientCertPath, "tls-client-cert-path", "", "path to the TLS client cert (must be PEM format)")
command.Flags().StringVar(&tlsClientCertKeyPath, "tls-client-cert-key-path", "", "path to the TLS client cert's key path (must be PEM format)")
command.Flags().Int64Var(&repo.GithubAppId, "github-app-id", 0, "id of the GitHub Application")
command.Flags().Int64Var(&repo.GithubAppInstallationId, "github-app-installation-id", 0, "installation id of the GitHub Application")
command.Flags().StringVar(&githubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application")
command.Flags().StringVar(&repo.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
command.Flags().BoolVar(&repo.EnableOCI, "enable-oci", false, "Specifies whether helm-oci support should be enabled for this repo")
command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"")
return command
}
// NewRepoCredsRemoveCommand returns a new instance of an `argocd repocreds rm` command
func NewRepoCredsRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rm CREDSURL",
Short: "Remove repository credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoCredsClientOrDie()
defer io.Close(conn)
for _, repoURL := range args {
_, err := repoIf.DeleteRepositoryCredentials(context.Background(), &repocredspkg.RepoCredsDeleteRequest{Url: repoURL})
errors.CheckError(err)
fmt.Printf("Repository credentials for '%s' removed\n", repoURL)
}
},
}
return command
}
// Print the repository credentials as table
func printRepoCredsTable(repos []appsv1.RepoCreds) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "URL PATTERN\tUSERNAME\tSSH_CREDS\tTLS_CREDS\n")
for _, r := range repos {
if r.Username == "" {
r.Username = "-"
}
fmt.Fprintf(w, "%s\t%s\t%v\t%v\n", r.URL, r.Username, r.SSHPrivateKey != "", r.TLSClientCertData != "")
}
_ = w.Flush()
}
// Print list of repo urls or url patterns for repository credentials
func printRepoCredsUrls(repos []appsv1.RepoCreds) {
for _, r := range repos {
fmt.Println(r.URL)
}
}
// NewRepoCredsListCommand returns a new instance of an `argocd repo list` command
func NewRepoCredsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured repository credentials",
Run: func(c *cobra.Command, args []string) {
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoCredsClientOrDie()
defer io.Close(conn)
repos, err := repoIf.ListRepositoryCredentials(context.Background(), &repocredspkg.RepoCredsQuery{})
errors.CheckError(err)
switch output {
case "yaml", "json":
err := PrintResourceList(repos.Items, output, false)
errors.CheckError(err)
case "url":
printRepoCredsUrls(repos.Items)
case "wide", "":
printRepoCredsTable(repos.Items)
default:
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
return command
}

View File

@@ -1,28 +1,13 @@
package commands
import (
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/localconfig"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/admin"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/config"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/localconfig"
)
func init() {
cobra.OnInitialize(initConfig)
}
func initConfig() {
cli.SetLogFormat(cmdutil.LogFormat)
cli.SetLogLevel(cmdutil.LogLevel)
}
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
@@ -32,47 +17,30 @@ func NewCommand() *cobra.Command {
var command = &cobra.Command{
Use: cliName,
Short: "argocd controls a Argo CD server",
Short: "argocd controls a ArgoCD server",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
DisableAutoGenTag: true,
}
command.AddCommand(NewCompletionCommand())
command.AddCommand(headless.InitCommand(NewVersionCmd(&clientOpts), &clientOpts, nil))
command.AddCommand(headless.InitCommand(NewClusterCommand(&clientOpts, pathOpts), &clientOpts, nil))
command.AddCommand(headless.InitCommand(NewApplicationCommand(&clientOpts), &clientOpts, nil))
command.AddCommand(NewVersionCmd(&clientOpts))
command.AddCommand(NewClusterCommand(&clientOpts, pathOpts))
command.AddCommand(NewApplicationCommand(&clientOpts))
command.AddCommand(NewLoginCommand(&clientOpts))
command.AddCommand(NewReloginCommand(&clientOpts))
command.AddCommand(headless.InitCommand(NewRepoCommand(&clientOpts), &clientOpts, nil))
command.AddCommand(headless.InitCommand(NewRepoCredsCommand(&clientOpts), &clientOpts, nil))
command.AddCommand(NewRepoCommand(&clientOpts))
command.AddCommand(NewContextCommand(&clientOpts))
command.AddCommand(headless.InitCommand(NewProjectCommand(&clientOpts), &clientOpts, nil))
command.AddCommand(headless.InitCommand(NewAccountCommand(&clientOpts), &clientOpts, nil))
command.AddCommand(NewLogoutCommand(&clientOpts))
command.AddCommand(headless.InitCommand(NewCertCommand(&clientOpts), &clientOpts, nil))
command.AddCommand(headless.InitCommand(NewGPGCommand(&clientOpts), &clientOpts, nil))
command.AddCommand(admin.NewAdminCommand())
command.AddCommand(NewProjectCommand(&clientOpts))
command.AddCommand(NewAccountCommand(&clientOpts))
defaultLocalConfigPath, err := localconfig.DefaultLocalConfigPath()
errors.CheckError(err)
command.PersistentFlags().StringVar(&clientOpts.ConfigPath, "config", config.GetFlag("config", defaultLocalConfigPath), "Path to Argo CD config")
command.PersistentFlags().StringVar(&clientOpts.ServerAddr, "server", config.GetFlag("server", ""), "Argo CD server address")
command.PersistentFlags().BoolVar(&clientOpts.PlainText, "plaintext", config.GetBoolFlag("plaintext"), "Disable TLS")
command.PersistentFlags().BoolVar(&clientOpts.Insecure, "insecure", config.GetBoolFlag("insecure"), "Skip server certificate and domain verification")
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", config.GetFlag("server-crt", ""), "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.ClientCertFile, "client-crt", config.GetFlag("client-crt", ""), "Client certificate file")
command.PersistentFlags().StringVar(&clientOpts.ClientCertKeyFile, "client-crt-key", config.GetFlag("client-crt-key", ""), "Client certificate key file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", ""), "Authentication token")
command.PersistentFlags().BoolVar(&clientOpts.GRPCWeb, "grpc-web", config.GetBoolFlag("grpc-web"), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.")
command.PersistentFlags().StringVar(&clientOpts.GRPCWebRootPath, "grpc-web-root-path", config.GetFlag("grpc-web-root-path", ""), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.")
command.PersistentFlags().StringVar(&cmdutil.LogFormat, "logformat", config.GetFlag("logformat", "text"), "Set the logging format. One of: text|json")
command.PersistentFlags().StringVar(&cmdutil.LogLevel, "loglevel", config.GetFlag("loglevel", "info"), "Set the logging level. One of: debug|info|warn|error")
command.PersistentFlags().StringSliceVarP(&clientOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)")
command.PersistentFlags().BoolVar(&clientOpts.PortForward, "port-forward", config.GetBoolFlag("port-forward"), "Connect to a random argocd-server port using port forwarding")
command.PersistentFlags().StringVar(&clientOpts.PortForwardNamespace, "port-forward-namespace", config.GetFlag("port-forward-namespace", ""), "Namespace name which should be used for port forwarding")
command.PersistentFlags().IntVar(&clientOpts.HttpRetryMax, "http-retry-max", 0, "Maximum number of retries to establish http connection to Argo CD server")
command.PersistentFlags().BoolVar(&clientOpts.Core, "core", false, "If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server")
command.PersistentFlags().StringVar(&clientOpts.ConfigPath, "config", defaultLocalConfigPath, "Path to ArgoCD config")
command.PersistentFlags().StringVar(&clientOpts.ServerAddr, "server", "", "ArgoCD server address")
command.PersistentFlags().BoolVar(&clientOpts.PlainText, "plaintext", false, "Disable TLS")
command.PersistentFlags().BoolVar(&clientOpts.Insecure, "insecure", false, "Skip server certificate and domain verification")
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", "", "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", "", "Authentication token")
return command
}

View File

@@ -1,25 +0,0 @@
contexts:
- name: argocd1.example.com:443
server: argocd1.example.com:443
user: argocd1.example.com:443
- name: argocd2.example.com:443
server: argocd2.example.com:443
user: argocd2.example.com:443
- name: localhost:8080
server: localhost:8080
user: localhost:8080
current-context: localhost:8080
servers:
- server: argocd1.example.com:443
- server: argocd2.example.com:443
- plain-text: true
server: localhost:8080
users:
- auth-token: vErrYS3c3tReFRe$hToken
name: argocd1.example.com:443
refresh-token: vErrYS3c3tReFRe$hToken
- auth-token: vErrYS3c3tReFRe$hToken
name: argocd2.example.com:443
refresh-token: vErrYS3c3tReFRe$hToken
- auth-token: vErrYS3c3tReFRe$hToken
name: localhost:8080

View File

@@ -4,152 +4,62 @@ import (
"context"
"fmt"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util"
"github.com/golang/protobuf/ptypes/empty"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v2/common"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/argoproj/argo-cd/v2/pkg/apiclient/version"
"github.com/argoproj/argo-cd/v2/util/errors"
argoio "github.com/argoproj/argo-cd/v2/util/io"
)
// NewVersionCmd returns a new `version` command to be used as a sub-command to root
func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
short bool
client bool
output string
)
var short bool
var client bool
versionCmd := cobra.Command{
Use: "version",
Short: "Print version information",
Example: ` # Print the full version of client and server to stdout
argocd version
# Print only full version of the client - no connection to server will be made
argocd version --client
# Print the full version of client and server in JSON format
argocd version -o json
# Print only client and server core version strings in YAML format
argocd version --short -o yaml
`,
Short: fmt.Sprintf("Print version information"),
Run: func(cmd *cobra.Command, args []string) {
cv := common.GetVersion()
switch output {
case "yaml", "json":
v := make(map[string]interface{})
if short {
v["client"] = map[string]string{cliName: cv.Version}
} else {
v["client"] = cv
version := argocd.GetVersion()
fmt.Printf("%s: %s\n", cliName, version)
if !short {
fmt.Printf(" BuildDate: %s\n", version.BuildDate)
fmt.Printf(" GitCommit: %s\n", version.GitCommit)
fmt.Printf(" GitTreeState: %s\n", version.GitTreeState)
if version.GitTag != "" {
fmt.Printf(" GitTag: %s\n", version.GitTag)
}
if !client {
sv := getServerVersion(clientOpts)
if short {
v["server"] = map[string]string{"argocd-server": sv.Version}
} else {
v["server"] = sv
}
}
err := PrintResource(v, output)
errors.CheckError(err)
case "wide", "short", "":
printClientVersion(&cv, short || (output == "short"))
if !client {
sv := getServerVersion(clientOpts)
printServerVersion(sv, short || (output == "short"))
}
default:
log.Fatalf("unknown output format: %s", output)
fmt.Printf(" GoVersion: %s\n", version.GoVersion)
fmt.Printf(" Compiler: %s\n", version.Compiler)
fmt.Printf(" Platform: %s\n", version.Platform)
}
if client {
return
}
// Get Server version
conn, versionIf := argocdclient.NewClientOrDie(clientOpts).NewVersionClientOrDie()
defer util.Close(conn)
serverVers, err := versionIf.Version(context.Background(), &empty.Empty{})
errors.CheckError(err)
fmt.Printf("%s: %s\n", "argocd-server", serverVers.Version)
if !short {
fmt.Printf(" BuildDate: %s\n", serverVers.BuildDate)
fmt.Printf(" GitCommit: %s\n", serverVers.GitCommit)
fmt.Printf(" GitTreeState: %s\n", serverVers.GitTreeState)
if version.GitTag != "" {
fmt.Printf(" GitTag: %s\n", serverVers.GitTag)
}
fmt.Printf(" GoVersion: %s\n", serverVers.GoVersion)
fmt.Printf(" Compiler: %s\n", serverVers.Compiler)
fmt.Printf(" Platform: %s\n", serverVers.Platform)
fmt.Printf(" Ksonnet Version: %s\n", serverVers.KsonnetVersion)
}
},
}
versionCmd.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|short")
versionCmd.Flags().BoolVar(&short, "short", false, "print just the version number")
versionCmd.Flags().BoolVar(&client, "client", false, "client version only (no server required)")
return &versionCmd
}
func getServerVersion(options *argocdclient.ClientOptions) *version.VersionMessage {
conn, versionIf := argocdclient.NewClientOrDie(options).NewVersionClientOrDie()
defer argoio.Close(conn)
v, err := versionIf.Version(context.Background(), &empty.Empty{})
errors.CheckError(err)
return v
}
func printClientVersion(version *common.Version, short bool) {
fmt.Printf("%s: %s\n", cliName, version)
if short {
return
}
fmt.Printf(" BuildDate: %s\n", version.BuildDate)
fmt.Printf(" GitCommit: %s\n", version.GitCommit)
fmt.Printf(" GitTreeState: %s\n", version.GitTreeState)
if version.GitTag != "" {
fmt.Printf(" GitTag: %s\n", version.GitTag)
}
fmt.Printf(" GoVersion: %s\n", version.GoVersion)
fmt.Printf(" Compiler: %s\n", version.Compiler)
fmt.Printf(" Platform: %s\n", version.Platform)
}
func printServerVersion(version *version.VersionMessage, short bool) {
fmt.Printf("%s: %s\n", "argocd-server", version.Version)
if short {
return
}
if version.BuildDate != "" {
fmt.Printf(" BuildDate: %s\n", version.BuildDate)
}
if version.GitCommit != "" {
fmt.Printf(" GitCommit: %s\n", version.GitCommit)
}
if version.GitTreeState != "" {
fmt.Printf(" GitTreeState: %s\n", version.GitTreeState)
}
if version.GitTag != "" {
fmt.Printf(" GitTag: %s\n", version.GitTag)
}
if version.GoVersion != "" {
fmt.Printf(" GoVersion: %s\n", version.GoVersion)
}
if version.Compiler != "" {
fmt.Printf(" Compiler: %s\n", version.Compiler)
}
if version.Platform != "" {
fmt.Printf(" Platform: %s\n", version.Platform)
}
if version.KsonnetVersion != "" {
fmt.Printf(" Ksonnet Version: %s\n", version.KsonnetVersion)
}
if version.KustomizeVersion != "" {
fmt.Printf(" Kustomize Version: %s\n", version.KustomizeVersion)
}
if version.HelmVersion != "" {
fmt.Printf(" Helm Version: %s\n", version.HelmVersion)
}
if version.KubectlVersion != "" {
fmt.Printf(" Kubectl Version: %s\n", version.KubectlVersion)
}
if version.JsonnetVersion != "" {
fmt.Printf(" Jsonnet Version: %s\n", version.JsonnetVersion)
}
}

16
cmd/argocd/main.go Normal file
View File

@@ -0,0 +1,16 @@
package main
import (
commands "github.com/argoproj/argo-cd/cmd/argocd/commands"
"github.com/argoproj/argo-cd/errors"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
func main() {
err := commands.NewCommand().Execute()
errors.CheckError(err)
}

Some files were not shown because too many files have changed in this diff Show More