Compare commits

..

5 Commits

Author SHA1 Message Date
Alexander Matyushentsev
e4d0bd3926 Take into account number of unavailable replicas to decided if deployment is healthy or not (#270)
* Take into account number of unavailable replicas to decided if deployment is healthy or not

* Run one controller for all e2e tests to reduce tests duration

* Apply reviewer notes: use logic from kubectl/rollout_status.go to check deployment health
2018-06-07 11:09:38 -07:00
Jesse Suen
18dc82d14d Remove hard requirement of initializing OIDC app during server startup (resolves #272) 2018-06-07 02:12:36 -07:00
Jesse Suen
e720abb58b Bump version to v0.4.7 2018-06-06 14:28:14 -07:00
Jesse Suen
a2e9a9ee49 Repo names containing underscores were not being accepted (resolves #258) 2018-06-06 14:27:41 -07:00
Jesse Suen
cf3776903d Retry argocd app wait connection errors from EOF watch. Show detailed state changes 2018-06-06 10:45:59 -07:00
1134 changed files with 292112 additions and 158991 deletions

82
.argo-ci/ci.yaml Normal file
View File

@@ -0,0 +1,82 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argo-cd-ci-
spec:
entrypoint: argo-cd-ci
arguments:
parameters:
- name: revision
value: master
- name: repo
value: https://github.com/argoproj/argo-cd.git
templates:
- name: argo-cd-ci
steps:
- - name: build
template: ci-dind
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- make controller-image server-image repo-server-image
- name: test
template: ci-builder
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- dep ensure && make cli lint test test-e2e
- name: ci-builder
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["{{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
resources:
requests:
memory: 1024Mi
cpu: 200m
- name: ci-dind
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["until docker ps; do sleep 3; done && {{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: DOCKER_HOST
value: 127.0.0.1
resources:
requests:
memory: 1024Mi
cpu: 200m
sidecars:
- name: dind
image: docker:17.10-dind
securityContext:
privileged: true
mirrorVolumeMounts: true

View File

@@ -1,214 +0,0 @@
version: 2.1
commands:
configure_git:
steps:
- run:
name: Configure Git
command: |
set -x
# must be configured for tests to run
git config --global user.email you@example.com
git config --global user.name "Your Name"
echo "export PATH=/home/circleci/.go_workspace/src/github.com/argoproj/argo-cd/hack:\$PATH" | tee -a $BASH_ENV
echo "export GIT_ASKPASS=git-ask-pass.sh" | tee -a $BASH_ENV
dep_ensure:
steps:
- restore_cache:
keys:
- vendor-v4-{{ checksum "Gopkg.lock" }}
- run:
name: Run dep ensure
command: dep ensure -v
- save_cache:
key: vendor-v4-{{ checksum "Gopkg.lock" }}
paths:
- vendor
install_golang:
steps:
- run:
name: Install Golang v1.12.6
command: |
go get golang.org/dl/go1.12.6
[ -e /home/circleci/sdk/go1.12.6 ] || go1.12.6 download
echo "export GOPATH=/home/circleci/.go_workspace" | tee -a $BASH_ENV
echo "export PATH=/home/circleci/sdk/go1.12.6/bin:\$PATH" | tee -a $BASH_ENV
save_go_cache:
steps:
- save_cache:
key: go-v18-{{ .Branch }}
paths:
- /home/circleci/.go_workspace
- /home/circleci/.cache/go-build
- /home/circleci/sdk/go1.12.6
restore_go_cache:
steps:
- restore_cache:
keys:
- go-v18-{{ .Branch }}
- go-v18-master
- go-v17-{{ .Branch }}
- go-v17-master
jobs:
codegen:
docker:
- image: circleci/golang:1.12
working_directory: /go/src/github.com/argoproj/argo-cd
steps:
- checkout
- restore_cache:
keys: [codegen-v2]
- run: ./hack/install.sh codegen-go-tools
- run: sudo ./hack/install.sh codegen-tools
- run: dep ensure
- save_cache:
key: codegen-v2
paths: [vendor, /tmp/dl, /go/pkg]
- run: helm init --client-only
- run: make codegen-local
- run:
name: Check nothing has changed
command: |
set -xo pipefail
# This makes sure you ran `make pre-commit` before you pushed.
# We exclude the Swagger resources; CircleCI doesn't generate them correctly.
# When this fails, it will, create a patch file you can apply locally to fix it.
# To troubleshoot builds: https://argoproj.github.io/argo-cd/developer-guide/ci/
git diff --exit-code -- . ':!Gopkg.lock' ':!assets/swagger.json' | tee codegen.patch
- store_artifacts:
path: codegen.patch
destination: .
test:
working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
machine:
image: circleci/classic:201808-01
steps:
- restore_go_cache
- install_golang
- checkout
- restore_cache:
key: test-dl-v1
- run: sudo ./hack/install.sh kubectl-linux kubectx-linux dep-linux ksonnet-linux helm-linux kustomize-linux
- save_cache:
key: test-dl-v1
paths: [/tmp/dl]
- configure_git
- run: go get github.com/jstemmer/go-junit-report
- dep_ensure
- save_go_cache
- run: make test
- run:
name: Uploading code coverage
command: bash <(curl -s https://codecov.io/bash) -f coverage.out
- store_test_results:
path: test-results
- store_artifacts:
path: test-results
destination: .
e2e:
working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
machine:
image: circleci/classic:201808-01
environment:
ARGOCD_FAKE_IN_CLUSTER: "true"
ARGOCD_SSH_DATA_PATH: "/tmp/argo-e2e/app/config/ssh"
ARGOCD_TLS_DATA_PATH: "/tmp/argo-e2e/app/config/tls"
steps:
- run:
name: Install and start K3S v0.5.0
command: |
curl -sfL https://get.k3s.io | sh -
sudo chmod -R a+rw /etc/rancher/k3s
kubectl version
background: true
environment:
INSTALL_K3S_EXEC: --docker
INSTALL_K3S_VERSION: v0.5.0
- restore_go_cache
- install_golang
- checkout
- restore_cache:
keys: [e2e-dl-v1]
- run: sudo ./hack/install.sh kubectx-linux dep-linux ksonnet-linux helm-linux kustomize-linux
- run: go get github.com/jstemmer/go-junit-report
- save_cache:
key: e2e-dl-v10
paths: [/tmp/dl]
- dep_ensure
- configure_git
- run: make cli
- run:
name: Create namespace
command: |
set -x
kubectl create ns argocd-e2e
kubens argocd-e2e
# install the certificates (not 100% sure we need this)
sudo cp /var/lib/rancher/k3s/server/tls/token-ca.crt /usr/local/share/ca-certificates/k3s.crt
sudo update-ca-certificates
# create the kubecfg, again - not sure we need this
cat /etc/rancher/k3s/k3s.yaml | sed "s/localhost/`hostname`/" | tee ~/.kube/config
echo "127.0.0.1 `hostname`" | sudo tee -a /etc/hosts
- run:
name: Apply manifests
command: kustomize build test/manifests/base | kubectl apply -f -
- run:
name: Start Redis
command: docker run --rm --name argocd-redis -i -p 6379:6379 redis:5.0.3-alpine --save "" --appendonly no
background: true
- run:
name: Start repo server
command: go run ./cmd/argocd-repo-server/main.go --loglevel debug --redis localhost:6379
background: true
- run:
name: Start API server
command: go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:6379 --insecure --dex-server http://localhost:5556 --repo-server localhost:8081 --staticassets ../argo-cd-ui/dist/app
background: true
- run:
name: Start Test Git
command: |
test/fixture/testrepos/start-git.sh
background: true
- run: until curl -v http://localhost:8080/healthz; do sleep 10; done
- run:
name: Start controller
command: go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:6379 --repo-server localhost:8081 --kubeconfig ~/.kube/config
background: true
- run:
command: PATH=dist:$PATH make test-e2e
environment:
ARGOCD_OPTS: "--server localhost:8080 --plaintext"
ARGOCD_E2E_K3S: "true"
- store_test_results:
path: test-results
- store_artifacts:
path: test-results
destination: .
ui:
docker:
- image: node:11.15.0
working_directory: ~/argo-cd/ui
steps:
- checkout:
path: ~/argo-cd/
- restore_cache:
keys:
- yarn-packages-v4-{{ checksum "yarn.lock" }}
- run: yarn install --frozen-lockfile --ignore-optional --non-interactive
- save_cache:
key: yarn-packages-v4-{{ checksum "yarn.lock" }}
paths: [~/.cache/yarn, node_modules]
- run: yarn test
- run: yarn build
- run: yarn lint
workflows:
version: 2
workflow:
jobs:
- test
- codegen:
requires:
- test
- ui:
requires:
- codegen
- e2e

View File

@@ -1,17 +0,0 @@
ignore:
- "**/*.pb.go"
- "**/*.pb.gw.go"
- "**/*generated.go"
- "**/*generated.deepcopy.go"
- "**/*_test.go"
- "pkg/apis/client/.*"
- "pkg/client/.*"
- "vendor/.*"
coverage:
status:
# we've found this not to be useful
patch: off
project:
default:
# allow test coverage to drop by 1%, assume that it's typically due to CI problems
threshold: 1

View File

@@ -1,13 +1,4 @@
# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from
# a user's workspace, and are only building off of what is locked by dep.
.vscode/
.idea/
.DS_Store
vendor/
dist/
*.iml
# delve debug binaries
cmd/**/debug
debug.test
coverage.out
ui/node_modules/
vendor
dist

View File

@@ -1,39 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'bug'
assignees: ''
---
Checklist:
* [ ] I've included steps to reproduce the bug.
* [ ] I've pasted the output of `argocd version`.
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Version**
```shell
Paste the output from `argocd version` here.
```
**Logs**
```
Paste any relevant application logs here.
```

View File

@@ -1,18 +0,0 @@
---
name: Enhancement proposal
about: Propose an enhancement for this project
title: ''
labels: 'enhancement'
assignees: ''
---
# Summary
What change you think needs making.
# Motivation
Please give examples of your use case, e.g. when would you use this.
# Proposal
How do you think this should be implemented?

View File

@@ -1 +0,0 @@
# See https://github.com/probot/no-response

View File

@@ -1,5 +0,0 @@
Checklist:
* [ ] I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and I feel I've gotten a green light from the community.
* [ ] My build is green ([troubleshooting builds](https://argoproj.github.io/argo-cd/developer-guide/ci/)).
* [ ] Optional. My organisation is added to the README.

4
.github/stale.yml vendored
View File

@@ -1,4 +0,0 @@
# See https://github.com/probot/stale
# See https://github.com/probot/stale
exemptLabels:
- backlog

3
.gitignore vendored
View File

@@ -3,10 +3,7 @@
.DS_Store
vendor/
dist/
site/
*.iml
# delve debug binaries
cmd/**/debug
debug.test
coverage.out
test-results

View File

@@ -1,22 +0,0 @@
run:
deadline: 2m
skip-files:
- ".*\\.pb\\.go"
skip-dirs:
- pkg/client/
- vendor/
linters:
enable:
- vet
- deadcode
- goimports
- varcheck
- structcheck
- ineffassign
- unconvert
- unparam
linters-settings:
goimports:
local-prefixes: github.com/argoproj/argo-cd
service:
golangci-lint-version: 1.21.0

File diff suppressed because it is too large Load Diff

40
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,40 @@
## Requirements
Make sure you have following tools installed [golang](https://golang.org/), [dep](https://github.com/golang/dep), [protobuf](https://developers.google.com/protocol-buffers/),
[kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
```
$ brew install go dep protobuf kubectl
$ go get -u github.com/golang/protobuf/protoc-gen-go
```
Nice to have [gometalinter](https://github.com/alecthomas/gometalinter) and [goreman](https://github.com/mattn/goreman):
```
$ go get -u gopkg.in/alecthomas/gometalinter.v2 github.com/mattn/goreman && gometalinter.v2 --install
```
## Building
```
$ go get -u github.com/argoproj/argo-cd
$ dep ensure
$ make
```
## Running locally
You need to have access to kubernetes cluster (including [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) or [docker edge](https://docs.docker.com/docker-for-mac/install/) ) in order to run Argo CD on your laptop:
* install kubectl: `brew install kubectl`
* make sure `kubectl` is connected to your cluster (e.g. `kubectl get pods` should work).
* install application CRD using following command:
```
$ kubectl create -f install/manifests/01_application-crd.yaml
```
* start Argo CD services using [goreman](https://github.com/mattn/goreman):
```
$ goreman start
```

13
COPYRIGHT Normal file
View File

@@ -0,0 +1,13 @@
Copyright 2017-2018 The Argo Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License..

View File

@@ -1,127 +0,0 @@
ARG BASE_IMAGE=debian:10-slim
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM golang:1.12.6 as builder
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
RUN apt-get update && apt-get install -y \
openssh-server \
nginx \
fcgiwrap \
git \
git-lfs \
make \
wget \
gcc \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
WORKDIR /tmp
ADD hack/install.sh .
ADD hack/installers installers
RUN ./install.sh dep-linux
RUN ./install.sh packr-linux
RUN ./install.sh kubectl-linux
RUN ./install.sh ksonnet-linux
RUN ./install.sh helm-linux
RUN ./install.sh kustomize-linux
RUN ./install.sh aws-iam-authenticator-linux
####################################################################################################
# Argo CD Base - used as the base for both the release and dev argocd images
####################################################################################################
FROM $BASE_IMAGE as argocd-base
USER root
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
RUN groupadd -g 999 argocd && \
useradd -r -u 999 -g argocd argocd && \
mkdir -p /home/argocd && \
chown argocd:0 /home/argocd && \
chmod g=u /home/argocd && \
chmod g=u /etc/passwd && \
apt-get update && \
apt-get install -y git git-lfs && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
COPY --from=builder /usr/local/bin/aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
# script to add current (possibly arbitrary) user to /etc/passwd at runtime
# (if it's not already there, to be openshift friendly)
COPY uid_entrypoint.sh /usr/local/bin/uid_entrypoint.sh
# support for mounting configuration from a configmap
RUN mkdir -p /app/config/ssh && \
touch /app/config/ssh/ssh_known_hosts && \
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
RUN mkdir -p /app/config/tls
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=argocd
USER argocd
WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM node:11.15.0 as argocd-ui
WORKDIR /src
ADD ["ui/package.json", "ui/yarn.lock", "./"]
RUN yarn install
ADD ["ui/", "."]
ARG ARGO_VERSION=latest
ENV ARGO_VERSION=$ARGO_VERSION
RUN NODE_ENV='production' yarn build
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM golang:1.12.6 as argocd-build
COPY --from=builder /usr/local/bin/dep /usr/local/bin/dep
COPY --from=builder /usr/local/bin/packr /usr/local/bin/packr
# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep
# to install all the packages of our dep lock file
COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml
COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock
RUN cd ${GOPATH}/src/dummy && \
dep ensure -vendor-only && \
mv vendor/* ${GOPATH}/src/ && \
rmdir vendor
# Perform the build
WORKDIR /go/src/github.com/argoproj/argo-cd
COPY . .
RUN make cli server controller repo-server argocd-util && \
make CLI_NAME=argocd-darwin-amd64 GOOS=darwin cli
####################################################################################################
# Final image
####################################################################################################
FROM argocd-base
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
COPY --from=argocd-ui ./src/dist/app /shared/app

83
Dockerfile-argocd Normal file
View File

@@ -0,0 +1,83 @@
FROM debian:9.3 as builder
RUN apt-get update && apt-get install -y \
git \
make \
wget \
gcc \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Install go
ENV GO_VERSION 1.9.3
ENV GO_ARCH amd64
ENV GOPATH /root/go
ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH}
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
# Install protoc, dep, packr
ENV PROTOBUF_VERSION 3.5.1
RUN cd /usr/local && \
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-*.zip && \
wget https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -O /usr/local/bin/dep && \
chmod +x /usr/local/bin/dep && \
wget https://github.com/gobuffalo/packr/releases/download/v1.10.4/packr_1.10.4_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep
# to install all the packages of our dep lock file
COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml
COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock
RUN cd ${GOPATH}/src/dummy && \
dep ensure -vendor-only && \
mv vendor/* ${GOPATH}/src/ && \
rmdir vendor
# Perform the build
WORKDIR /root/go/src/github.com/argoproj/argo-cd
COPY . .
ARG MAKE_TARGET="cli server controller repo-server argocd-util"
RUN make ${MAKE_TARGET}
##############################################################
# This stage will pull in or build any CLI tooling we need for our final image
FROM golang:1.10 as cli-tooling
# NOTE: we frequently switch between tip of master ksonnet vs. official builds. Comment/uncomment
# the corresponding section to switch between the two options:
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /ks
# Option 2: use official tagged ksonnet release
env KSONNET_VERSION=0.10.2
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /ks
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl
##############################################################
FROM debian:9.3
RUN apt-get update && apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=cli-tooling /ks /usr/local/bin/ks
COPY --from=cli-tooling /kubectl /usr/local/bin/kubectl
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=root
COPY --from=builder /root/go/src/github.com/argoproj/argo-cd/dist/* /
ARG BINARY
CMD /${BINARY}

22
Dockerfile-ci-builder Normal file
View File

@@ -0,0 +1,22 @@
FROM golang:1.9.2
WORKDIR /tmp
RUN curl -O https://get.docker.com/builds/Linux/x86_64/docker-1.13.1.tgz && \
tar -xzf docker-1.13.1.tgz && \
mv docker/docker /usr/local/bin/docker && \
rm -rf ./docker && \
go get -u github.com/golang/dep/cmd/dep && \
go get -u gopkg.in/alecthomas/gometalinter.v2 && \
gometalinter.v2 --install
# Install kubectl
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl && mv /kubectl /usr/local/bin/kubectl
# Install ksonnet
env KSONNET_VERSION=0.10.2
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks && \
rm -rf /tmp/ks_${KSONNET_VERSION}

View File

@@ -1,6 +0,0 @@
####################################################################################################
# argocd-dev
####################################################################################################
FROM argocd-base
COPY argocd* /usr/local/bin/
COPY --from=argocd-ui ./src/dist/app /shared/app

1341
Gopkg.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,82 +1,48 @@
# Packages should only be added to the following list when we use them *outside* of our go code.
# (e.g. we want to build the binary to invoke as part of the build process, such as in
# generate-proto.sh). Normal use of golang packages should be added via `dep ensure`, and pinned
# with a [[constraint]] or [[override]] when version is important.
required = [
"github.com/golang/protobuf/protoc-gen-go",
"github.com/gogo/protobuf/protoc-gen-gofast",
"github.com/gogo/protobuf/protoc-gen-gogofast",
"k8s.io/code-generator/cmd/go-to-protobuf",
"k8s.io/kube-openapi/cmd/openapi-gen",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
"golang.org/x/sync/errgroup",
"k8s.io/code-generator/cmd/go-to-protobuf",
]
[[constraint]]
name = "google.golang.org/grpc"
version = "1.15.0"
[[constraint]]
name = "github.com/gogo/protobuf"
version = "1.1.1"
# override github.com/grpc-ecosystem/go-grpc-middleware's constraint on master
[[override]]
name = "github.com/golang/protobuf"
version = "1.2.0"
version = "1.9.2"
[[constraint]]
name = "github.com/grpc-ecosystem/grpc-gateway"
version = "v1.3.1"
# prometheus does not believe in semversioning yet
[[constraint]]
name = "github.com/prometheus/client_golang"
revision = "7858729281ec582767b20e0d696b6041d995d5e0"
# override ksonnet's release-1.8 dependency
[[override]]
branch = "release-1.14"
name = "k8s.io/api"
[[override]]
branch = "release-1.14"
name = "k8s.io/kubernetes"
[[override]]
branch = "release-1.14"
name = "k8s.io/code-generator"
[[override]]
branch = "release-1.14"
branch = "release-1.9"
name = "k8s.io/apimachinery"
[[override]]
branch = "release-11.0"
[[constraint]]
branch = "release-1.9"
name = "k8s.io/api"
[[constraint]]
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.9"
[[constraint]]
branch = "release-1.9"
name = "k8s.io/code-generator"
[[constraint]]
branch = "release-6.0"
name = "k8s.io/client-go"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.2"
version = "1.2.1"
[[constraint]]
name = "github.com/gobuffalo/packr"
version = "v1.11.0"
name = "github.com/ksonnet/ksonnet"
version = "v0.10.1"
[[constraint]]
branch = "master"
name = "github.com/argoproj/pkg"
[[constraint]]
branch = "master"
name = "github.com/yudai/gojsondiff"
[[constraint]]
name = "github.com/spf13/cobra"
revision = "fe5e611709b0c57fa4a89136deaa8e1d4004d053"
# TODO: move off of k8s.io/kube-openapi and use controller-tools for CRD spec generation
# (override argoproj/argo contraint on master)
# override ksonnet's logrus dependency
[[override]]
revision = "411b2483e5034420675ebcdd4a55fc76fe5e55cf"
name = "k8s.io/kube-openapi"
name = "github.com/sirupsen/logrus"
version = "v1.0.3"

View File

@@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-2018 The Argo Authors
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

203
Makefile
View File

@@ -1,4 +1,4 @@
PACKAGE=github.com/argoproj/argo-cd/common
PACKAGE=github.com/argoproj/argo-cd
CURRENT_DIR=$(shell pwd)
DIST_DIR=${CURRENT_DIR}/dist
CLI_NAME=argocd
@@ -9,21 +9,6 @@ GIT_COMMIT=$(shell git rev-parse HEAD)
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run vendor/github.com/gobuffalo/packr/packr/main.go"; fi)
VOLUME_MOUNT=$(shell [[ $(go env GOOS)=="darwin" ]] && echo ":delegated" || echo "")
define run-in-dev-tool
docker run --rm -it -u $(shell id -u) -e HOME=/home/user -v ${CURRENT_DIR}:/go/src/github.com/argoproj/argo-cd${VOLUME_MOUNT} -w /go/src/github.com/argoproj/argo-cd argocd-dev-tools bash -c "GOPATH=/go $(1)"
endef
PATH:=$(PATH):$(PWD)/hack
# docker image publishing options
DOCKER_PUSH?=false
IMAGE_NAMESPACE?=
# perform static compilation
STATIC_BUILD?=true
# build development images
DEV_IMAGE?=false
override LDFLAGS += \
-X ${PACKAGE}.version=${VERSION} \
@@ -31,14 +16,19 @@ override LDFLAGS += \
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}
ifeq (${STATIC_BUILD}, true)
override LDFLAGS += -extldflags "-static"
endif
# docker image publishing options
DOCKER_PUSH=false
IMAGE_TAG=latest
ifneq (${GIT_TAG},)
IMAGE_TAG=${GIT_TAG}
LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
endif
ifneq (${IMAGE_NAMESPACE},)
override LDFLAGS += -X ${PACKAGE}/install.imageNamespace=${IMAGE_NAMESPACE}
endif
ifneq (${IMAGE_TAG},)
override LDFLAGS += -X ${PACKAGE}/install.imageTag=${IMAGE_TAG}
endif
ifeq (${DOCKER_PUSH},true)
ifndef IMAGE_NAMESPACE
@@ -51,173 +41,102 @@ IMAGE_PREFIX=${IMAGE_NAMESPACE}/
endif
.PHONY: all
all: cli image argocd-util
all: cli server-image controller-image repo-server-image argocd-util
.PHONY: protogen
protogen:
./hack/generate-proto.sh
.PHONY: openapigen
openapigen:
./hack/update-openapi.sh
.PHONY: clientgen
clientgen:
./hack/update-codegen.sh
.PHONY: codegen-local
codegen-local: protogen clientgen openapigen manifests-local
.PHONY: codegen
codegen: dev-tools-image
$(call run-in-dev-tool,make codegen-local)
codegen: protogen clientgen
# NOTE: we use packr to do the build instead of go, since we embed .yaml files into the go binary.
# This enables ease of maintenance of the yaml files.
.PHONY: cli
cli: clean-debug
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
cli:
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
.PHONY: release-cli
release-cli: clean-debug image
docker create --name tmp-argocd-linux $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)
docker cp tmp-argocd-linux:/usr/local/bin/argocd ${DIST_DIR}/argocd-linux-amd64
docker cp tmp-argocd-linux:/usr/local/bin/argocd-darwin-amd64 ${DIST_DIR}/argocd-darwin-amd64
.PHONY: cli-linux
cli-linux:
docker build --iidfile /tmp/argocd-linux-id --target builder --build-arg MAKE_TARGET="cli IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-linux-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-linux `cat /tmp/argocd-linux-id`
docker cp tmp-argocd-linux:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-linux-amd64 dist/
docker rm tmp-argocd-linux
.PHONY: cli-darwin
cli-darwin:
docker build --iidfile /tmp/argocd-darwin-id --target builder --build-arg MAKE_TARGET="cli GOOS=darwin IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-darwin-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-darwin `cat /tmp/argocd-darwin-id`
docker cp tmp-argocd-darwin:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64 dist/
docker rm tmp-argocd-darwin
.PHONY: argocd-util
argocd-util: clean-debug
# Build argocd-util as a statically linked binary, so it could run within the alpine-based dex container (argoproj/argo-cd#844)
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
argocd-util:
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
.PHONY: dev-tools-image
dev-tools-image:
cd hack && docker build -t argocd-dev-tools . -f Dockerfile.dev-tools
.PHONY: manifests-local
manifests-local:
./hack/update-manifests.sh
.PHONY: manifests
manifests:
$(call run-in-dev-tool,make manifests-local IMAGE_TAG='${IMAGE_TAG}')
# NOTE: we use packr to do the build instead of go, since we embed swagger files and policy.csv
# files into the go binary
.PHONY: server
server: clean-debug
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
server:
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
.PHONY: server-image
server-image:
docker build --build-arg BINARY=argocd-server -t $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) ; fi
.PHONY: repo-server
repo-server:
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
.PHONY: repo-server-image
repo-server-image:
docker build --build-arg BINARY=argocd-repo-server -t $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) ; fi
.PHONY: controller
controller:
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
.PHONY: packr
packr:
go build -o ${DIST_DIR}/packr ./vendor/github.com/gobuffalo/packr/packr/
.PHONY: controller-image
controller-image:
docker build --build-arg BINARY=argocd-application-controller -t $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) ; fi
.PHONY: image
ifeq ($(DEV_IMAGE), true)
# The "dev" image builds the binaries from the users desktop environment (instead of in Docker)
# which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since
# the dist directory is under .dockerignore.
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
image: packr
docker build -t argocd-base --target argocd-base .
docker build -t argocd-ui --target argocd-ui .
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd/argocd
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-darwin-amd64 ./cmd/argocd
cp Dockerfile.dev dist
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
else
image:
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
endif
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
.PHONY: cli-image
cli-image:
docker build --build-arg BINARY=argocd -t $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) ; fi
.PHONY: builder-image
builder-image:
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) ; fi
.PHONY: dep
dep:
dep ensure -v
.PHONY: dep-ensure
dep-ensure:
dep ensure -no-vendor
.PHONY: install-lint-tools
install-lint-tools:
./hack/install.sh lint-tools
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) -f Dockerfile-ci-builder .
.PHONY: lint
lint:
golangci-lint --version
golangci-lint run --fix --verbose
.PHONY: build
build:
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
gometalinter.v2 --config gometalinter.json ./...
.PHONY: test
test:
./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`
go test `go list ./... | grep -v "github.com/argoproj/argo-cd/test/e2e"`
.PHONY: test-e2e
test-e2e:
./hack/test.sh -timeout 15m ./test/e2e
.PHONY: start-e2e
start-e2e: cli
killall goreman || true
# check we can connect to Docker to start Redis
docker version
kubectl create ns argocd-e2e || true
kubectl config set-context --current --namespace=argocd-e2e
kustomize build test/manifests/base | kubectl apply -f -
# set paths for locally managed ssh known hosts and tls certs data
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
ARGOCD_E2E_DISABLE_AUTH=false \
ARGOCD_ZJWT_FEATURE_FLAG=always \
goreman start
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
.PHONY: clean-debug
clean-debug:
-find ${CURRENT_DIR} -name debug.test | xargs rm -f
go test ./test/e2e
.PHONY: clean
clean: clean-debug
clean:
-rm -rf ${CURRENT_DIR}/dist
.PHONY: start
start:
killall goreman || true
# check we can connect to Docker to start Redis
docker version
kubectl create ns argocd || true
kubens argocd
ARGOCD_ZJWT_FEATURE_FLAG=always \
goreman start
.PHONY: pre-commit
pre-commit: dep-ensure codegen build lint test
.PHONY: precheckin
precheckin: test lint
.PHONY: release-precheck
release-precheck: manifests
release-precheck:
@if [ "$(GIT_TREE_STATE)" != "clean" ]; then echo 'git tree state is $(GIT_TREE_STATE)' ; exit 1; fi
@if [ -z "$(GIT_TAG)" ]; then echo 'commit must be tagged to perform release' ; exit 1; fi
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
.PHONY: release
release: pre-commit release-precheck image release-cli
release: release-precheck precheckin cli-darwin cli-linux server-image controller-image repo-server-image cli-image

6
OWNERS
View File

@@ -1,12 +1,8 @@
owners:
- alexec
- alexmt
- jessesuen
reviewers:
- jannfis
approvers:
- alexec
- alexmt
- jessesuen
- merenbach

View File

@@ -1,7 +1,5 @@
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --staticassets ui/dist/app"
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/dexidp/dex:v2.14.0 serve /dex.yaml"
redis: docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:5.0.3-alpine --save "" --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-repo-server/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
git-server: test/fixture/testrepos/start-git.sh
controller: go run ./cmd/argocd-application-controller/main.go --app-resync 10
api-server: go run ./cmd/argocd-server/main.go --insecure --disable-auth
repo-server: go run ./cmd/argocd-repo-server/main.go --loglevel debug
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -p 5557:5557 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/coreos/dex:v2.10.0 serve /dex.yaml"
redis: docker run --rm -p 6379:6379 redis:3.2.11

118
README.md
View File

@@ -1,61 +1,91 @@
[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd)
[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest)
# Argo CD - Declarative Continuous Delivery for Kubernetes
# Argo CD - GitOps Continuous Delivery for Kubernetes
## What is Argo CD?
Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
![Argo CD UI](docs/assets/argocd-ui.gif)
Argo CD is a declarative, continuous delivery service based on ksonnet for Kubernetes.
## Why Argo CD?
Application definitions, configurations, and environments should be declarative and version controlled.
Application deployment and lifecycle management should be automated, auditable, and easy to understand.
## Getting Started
## Who uses Argo CD?
Follow our [getting started guide](docs/getting_started.md).
Organizations below are **officially** using Argo CD. Please send a PR with your organization name if you are using Argo CD.
## How it works
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
1. [Codility](https://www.codility.com/)
1. [Commonbond](https://commonbond.co/)
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
1. [END.](https://www.endclothing.com/)
1. [Future PLC](https://www.futureplc.com/)
1. [GMETRI](https://gmetri.com/)
1. [Intuit](https://www.intuit.com/)
1. [KintoHub](https://www.kintohub.com/)
1. [KompiTech GmbH](https://www.kompitech.com/)
1. [Lytt](https://www.lytt.co/)
1. [Mambu](https://www.mambu.com/)
1. [Mirantis](https://mirantis.com/)
1. [OpenSaaS Studio](https://opensaas.studio)
1. [Optoro](https://www.optoro.com/)
1. [Riskified](https://www.riskified.com/)
1. [Saildrone](https://www.saildrone.com/)
1. [Tesla](https://tesla.com/)
1. [tZERO](https://www.tzero.com/)
1. [Ticketmaster](https://ticketmaster.com)
1. [Yieldlab](https://www.yieldlab.de/)
1. [UBIO](https://ub.io/)
1. [Volvo Cars](https://www.volvocars.com/)
Argo CD uses git repositories as the source of truth for defining the desired application state as
well as the target deployment environments. Kubernetes manifests are specified as
[ksonnet](https://ksonnet.io) applications. Argo CD automates the deployment of the desired
application states in the specified target environments.
## Documentation
![Argo CD Architecture](docs/argocd_architecture.png)
To learn more about Argo CD [go to the complete documentation](https://argoproj.github.io/argo-cd/).
Application deployments can track updates to branches, tags, or pinned to a specific version of
manifests at a git commit. See [tracking strategies](docs/tracking_strategies.md) for additional
details about the different tracking strategies available.
## Community Blogs and Presentations
Argo CD is implemented as a kubernetes controller which continuously monitors running applications
and compares the current, live state against the desired target state (as specified in the git repo).
A deployed application whose live state deviates from the target state is considered out-of-sync.
Argo CD reports & visualizes the differences as well as providing facilities to automatically or
manually sync the live state back to the desired target state. Any modifications made to the desired
target state in the git repo can be automatically applied and reflected in the specified target
environments.
1. [Comparison of Argo CD, Spinnaker, Jenkins X, and Tekton](https://www.inovex.de/blog/spinnaker-vs-argo-cd-vs-tekton-vs-jenkins-x/)
1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://medium.com/ibm-cloud/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2-4395af317359)
1. [GitOps for Kubeflow using Argo CD](https://www.kubeflow.org/docs/use-cases/gitops-for-kubeflow/)
1. [GitOps Toolsets on Kubernetes with CircleCI and Argo CD](https://www.digitalocean.com/community/tutorials/webinar-series-gitops-tool-sets-on-kubernetes-with-circleci-and-argo-cd)
1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager](https://www.ibm.com/blogs/bluemix/2019/02/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2/)
1. [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be)
1. [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU). Among other things, describes how Kubeflow uses Argo CD to implement GitOPs for ML
1. [Argo CD - GitOps Continuous Delivery for Kubernetes](https://www.youtube.com/watch?v=aWDIQMbp1cc&feature=youtu.be&t=1m4s)
For additional details, see [architecture overview](docs/architecture.md).
## Features
* Automated deployment of applications to specified target environments
* Continuous monitoring of deployed applications
* Automated or manual syncing of applications to its target state
* Web and CLI based visualization of applications and differences between live vs. target state
* Rollback/Roll-anywhere to any application state committed in the git repository
* SSO Integration (OIDC, LDAP, SAML 2.0, GitLab, Microsoft, LinkedIn)
* Webhook Integration (GitHub, BitBucket, GitLab)
## What is ksonnet?
* [Jsonnet](http://jsonnet.org), the basis for ksonnet, is a domain specific configuration language,
which provides extreme flexibility for composing and manipulating JSON/YAML specifications.
* [Ksonnet](http://ksonnet.io) goes one step further by applying Jsonnet principles to Kubernetes
manifests. It provides an opinionated file & directory structure to organize applications into
reusable components, parameters, and environments. Environments can be hierarchical, which promotes
both re-use and granular customization of application and environment specifications.
## Why ksonnet?
Application configuration management is a hard problem and grows rapidly in complexity as you deploy
more applications, against more and more environments. Current templating systems, such as Jinja,
and Golang templating, are unnatural ways to maintain kubernetes manifests, and are not well suited to
capture subtle configuration differences between environments. Its ability to compose and re-use
application and environment configurations is also very limited.
Imagine we have a single guestbook application deployed in following environments:
| Environment | K8s Version | Application Image | DB Connection String | Environment Vars | Sidecars |
|---------------|-------------|------------------------|-----------------------|------------------|---------------|
| minikube | 1.10.0 | jesse/guestbook:latest | sql://locahost/db | DEBUG=true | |
| dev | 1.9.0 | app/guestbook:latest | sql://dev-test/db | DEBUG=true | |
| staging | 1.8.0 | app/guestbook:e3c0263 | sql://staging/db | | istio,dnsmasq |
| us-west-1 | 1.8.0 | app/guestbook:abc1234 | sql://prod/db | FOO_FEATURE=true | istio,dnsmasq |
| us-west-2 | 1.8.0 | app/guestbook:abc1234 | sql://prod/db | | istio,dnsmasq |
| us-east-1 | 1.9.0 | app/guestbook:abc1234 | sql://prod/db | BAR_FEATURE=true | istio,dnsmasq |
Ksonnet:
* Enables composition and re-use of common YAML specifications
* Allows overrides, additions, and subtractions of YAML sub-components specific to each environment
* Guarantees proper generation of K8s manifests suitable for the corresponding Kubernetes API version
* Provides [kubernetes-specific jsonnet libraries](https://github.com/ksonnet/ksonnet-lib) to enable
concise definition of kubernetes manifests
## Development Status
* Argo CD is in early development
## Roadmap
* PreSync, PostSync, OutOfSync hooks
* Customized application actions as Argo workflows
* Blue/Green & canary upgrades

View File

@@ -1 +1 @@
1.3.6
0.4.7

View File

@@ -1,22 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="131" height="20">
<linearGradient id="b" x2="0" y2="100%">
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
<stop offset="1" stop-opacity=".1"/>
</linearGradient>
<clipPath id="a">
<rect width="131" height="20" rx="3" fill="#fff"/>
</clipPath>
<g clip-path="url(#a)">
<path id="leftPath" fill="#555" d="M0 0h74v20H0z"/>
<path id="rightPath" fill="#4c1" d="M74 0h57v20H74z"/>
<path fill="url(#b)" d="M0 0h131v20H0z"/>
</g>
<g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="90">
<image x="5" y="3" width="14" height="14" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB8AAAAeCAYAAADU8sWcAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAAABPAAAATwFjiv3XAAACC2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KD0UqkwAACpZJREFUSA11VnmQFcUZ//qY4527by92WWTXwHKqEJDDg0MQUioKSWopK4oaS9RAlMofUSPGbFlBSaViKomgQSNmIdECkUS8ophdozEJArKoKCsgIFlY2Pu9N29mero7X78FxCR2Vb8309PTv+/4fb9vCHz1II2Nm+jmzYul2bJsdlMyO7LyPGlZtYqQck1ZHCgwLZWiROeJgB7bDzpYb/7o0y/emzXv4Pts8+ZGBUC0uf/vQf57wdw3NTVRnOYFfXvj6pJcadkUmbDGRoxVSEItSYAQQrUmRBP81VoRpkFTJUMuZA/3g/28q3dn89b7u885D4348vgf8NPAxY033LJmSlDizlSOU94f6UhoHaZdC/3QCHWON2gDYBhAYyRAWgyD4QSi1815f29++vv/+CoD2Lm2nAFGl8mndzyxMMgk5/iW6xzPi/xVk+oyE8+vLNt14FR/0uYW14ox0IwUJ4ZAaUpAaaBEYByiyLbikWWNnnThvPIxu+L717auVeb81tbWsyk4C34u8I3LnlhcSCameMzykg7TwzOJGIYWQj+M+voLYcSo/hxY1E65FECUq4G4RFP0nSjMCMVAIIKUnOHkw90L6qq/vXvbh02trV8yoBh2E0NMY9GiG+58fIGXTF6epyw7JOnamZQbz/tCnDrVVzgqqdwTT0ZTdWDN0wPpMh3ZPZqHLSw98C7Y4RwtnAodsTwGAg0ppkcxrlkYJFID+Z0bn/zelsFImzQRXfScNJFiOG689dcT/FT6GzlqedVJTHRpPH6yz/PyfTlx2IrLCpvSdWrv5OXkX9fOJB/Mnaz3XzItap+yMDoyZgEE7F1SceIghryeRNwnTBkEqhRIxiLF6bCpDXNybW2v/ruxcTzbt2+zZmfCvWT+zxNhbek3cxaPlyYsXVWSSHT25rxC3o+OWK6aDn7sZ3r79ZNY28w45Esxuhw5RjmVvEQPlIwgR8bOiE7VdrPaA2+TeGEEhFbhtAEmG5pzk5WqqbWTPv7jC8sLpgxNSRWZrUeWTAgor7EY9ytTiURvzg8GskGU5xxKkc33yDcX1yc7x3ijLxF+zZgoEljgCrSiXOWIG/WGrjifHxl1V9iyeKwW1lHNo5SKmDmcYinivwhjVpkor55sQj9+/D4MO9bpLW8RN6zJzMvbPDUkHaecE368N1+IUUJfIm5hPf1w2kTedlm/LgntmqEWEQGT3ScJYUh2NIxiiWupSUFasor1VgzXvPATfv6BC3Roq0E9MLqAXKQcAe1rqi/dv+q3DwQUw6djY6tiAaG1jLEw7nAn64WhcStPqZqpA6chap8aage077GwbQcJDx8Awq3T3DHlbeijUHWwytHFuuizCTergdQ+YocxLRHVPNcESRAJxqo60nbSvExvvvWx23MCVvucDyRt5hitwDRHHG09pZiaT7MlSUdUhtQGZttYRdwA4WnFQjFnDA4LX7UdIlgMYpaouBgGMnuBKVN/ZhgBRCMVynNAHOfRm777q4UcrW3EZxcEnD/kclYlpVaFKFIZrIRtluvdls/G9InnLZ9fpLXoJjwZAzZkDJ5mjkIj8HTCKETH9oD0eoikMSDQ5cTrLrUg4QoaFlxUH5MdYy6yAFAaYBHed6ODkMPOkMcFZggZKa0SkaL/jMeDpq5D1Vez9693V/wNUg1jQRY8yLX8BcSba8AaOgyBOdqtQBz7DNj8lZCeNY9SN6aiD3bTOc89s2iliDVvKa3uGxf6doQAxYE2oBbm0HQPG5IxqKhOg9ZpSSQj+pDU+jv5t7913spV5c70GcqLJwgMGw6ZJUuBLbgHPW0D4rgQdewDvnAllN58B0D918AvzVBn3jWi/p4Hq2/s+et1URgBRlVRMGVvBtITfwyuyYPAoIRmWSIrEqjZW1Ml/qN7dtRWzptfBzXDoaeri6TicWhes8Zsg9iMOaAyU0D3fw4qdSEkrryquP77xx+HZCwG3T09HEZfCBUXTxh125H9VZ9SHp1OfZF4aEWI9MM1AhmiVCmaIwV6a2F5ge2q8s7OBB8ytPiOZduw6pFHYOS4cUUQGsNWXj4MVLYdaGU9UAQ0o2H8eHh49WpAITReKqdmGC0PsvF2JB4Gu+i5RqKg5JSi82mOnu9GYGEhwbFjSOXadJII7Pa6Ed3hwU8igAU8lU7D/ffdZ1pFEUT19YI+sR9oZgLIjo8h6usDu2oIzJpxOc4ZphKQtoqFxw6LzxIV/RMxkabSUPAJRz3A5oc50x9T7Lf3xkR0R0xGSS+IRIFQOUUUkqvGXdR5tGXnLmjbYXI0OLCJykIBvFe2ABVHgCQqgMpjkH9xE9IY7cSgGd0w+8mOVjiy68DOh+sauusiwSM8F11nlhCm39/SvP7ux4qNZVfba/0TJ183Is9opWtxEXe5E/cC+Y5bf+CilmczqaC/JigEOjjYTrwtzwB8tAn4kJFGMIEmy0B98gb4B7tAYimHncd19ObLrP25rW0/rb76pQqsoKTEkKJgoT64biCObVx35xvGGz67qYW3Nl0RWYH/ftxmo7pyfpSoKFFjEiLxhE73jXaueHXlGw+PZ68rS8pQ01QlodUjTqcAfcQo8qGjQB36EwQfbFAuF8wjZbmNtStefZ4nC0uibKIHez92AeZEUln5wocGuLGxyUbg2cVEFp5a9pFz11MH/ZCP6M0V8gmbxWZL35K2paFmUmjDgCXAMQkjqv8wgFMGBFVSCw8g1wWsrB5IBrTDAvB0eaAkhVHCeGzSQDUm3bE9/1hs/d7dBhy5aSqAaNNWN6Mvdn9+e0KGfl8hZCd6vBwqABmISKSkCgkqr/a6QMWrwLpsKaYWhfrwi1ikfWBNvQmUhXItAlznICMlcopFFNXcEA25wBw/EIms2L4O1onZs5u46abFUsILDWhAc/OKo7H+/OtuEFqaUcsmIE8CR47zEMsSuJvRQcceODVmOqR/uAHKftAC6R+9AKcmzgVxci9qexwLTIDZ34+MR6FVErXECXw7kc+3/G7j8gPG0dbWJmQnum1+zDj3U2rJ0rWzvFRynu/YskMTb5vetmwI7xyeg7R0g072ynEGwa0PwZTRDbDn0GGI1j0Ii2pCKPCyqJTl+eei9tOl5Kr1eU2seuG5ZMBv2fjUIMkQymAWC6jouQE3jdlYZa43PLnsLbcn++eYVwi6FCQktTCEmO3QAx2rhOllAl6bOwsahg2FDTMvhRklWVSUMkxcWEyHJFbUqyCW9AtRSV//K18AF4XmbOWeBTegCF78ujTXf3hm+XvTeo49G8/67Sg+A0a0OGc6CDzIlFbDL3+8GPYuuxLWP7AYysprIQx9YNjdzAglyVkD3qGvd3dsWvv0infM2qBj53zr49rZsJsNZwa2WeTTFxtP3L1oe1VCzh3AWqOM2FJJsFBwbMuCUAgQyAqGrVVJ7Zdwyz2RZS/X/GbrguJ5eNYgyhfnncH5v+DmYcft18Y1T5S7fl9jPMN+4aM6IpeQPmgxThNA5AnuJFhIeI2tHafiNqEUW1XQL+8NrfRznENP1drNuTOA5/5/KezmAZ4zaJBSdVaUvQk5PsNTeqfrMsKQOui5LGKibBAjmKgSRk/RIMlc8BiWCLbI95Dx05jybrMkfsh+xfgPf+hH0AC4OlsAAAAASUVORK5CYII="/>
<text id="leftText1" x="435" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="470"></text>
<text id="leftText2" x="435" y="140" transform="scale(.1)" textLength="470"></text>
<text id="rightText1" x="995" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="470"></text>
<text id="rightText1" x="995" y="140" transform="scale(.1)" textLength="470"></text></g>
</svg>

Before

Width:  |  Height:  |  Size: 5.6 KiB

View File

@@ -1,35 +0,0 @@
# Built-in policy which defines two roles: role:readonly and role:admin,
# and additionally assigns the admin user to the role:admin role.
# There are two policy formats:
# 1. Applications (which belong to a project):
# p, <user/group>, <resource>, <action>, <project>/<object>
# 2. All other resources:
# p, <user/group>, <resource>, <action>, <object>
p, role:readonly, applications, get, */*, allow
p, role:readonly, certificates, get, *, allow
p, role:readonly, clusters, get, *, allow
p, role:readonly, repositories, get, *, allow
p, role:readonly, projects, get, *, allow
p, role:admin, applications, create, */*, allow
p, role:admin, applications, update, */*, allow
p, role:admin, applications, delete, */*, allow
p, role:admin, applications, sync, */*, allow
p, role:admin, applications, override, */*, allow
p, role:admin, applications, action/*, */*, allow
p, role:admin, certificates, create, *, allow
p, role:admin, certificates, update, *, allow
p, role:admin, certificates, delete, *, allow
p, role:admin, clusters, create, *, allow
p, role:admin, clusters, update, *, allow
p, role:admin, clusters, delete, *, allow
p, role:admin, repositories, create, *, allow
p, role:admin, repositories, update, *, allow
p, role:admin, repositories, delete, *, allow
p, role:admin, projects, create, *, allow
p, role:admin, projects, update, *, allow
p, role:admin, projects, delete, *, allow
g, role:admin, role:readonly
g, admin, role:admin
1 # Built-in policy which defines two roles: role:readonly and role:admin,
2 # and additionally assigns the admin user to the role:admin role.
3 # There are two policy formats:
4 # 1. Applications (which belong to a project):
5 # p, <user/group>, <resource>, <action>, <project>/<object>
6 # 2. All other resources:
7 # p, <user/group>, <resource>, <action>, <object>
8 p, role:readonly, applications, get, */*, allow
9 p, role:readonly, certificates, get, *, allow
10 p, role:readonly, clusters, get, *, allow
11 p, role:readonly, repositories, get, *, allow
12 p, role:readonly, projects, get, *, allow
13 p, role:admin, applications, create, */*, allow
14 p, role:admin, applications, update, */*, allow
15 p, role:admin, applications, delete, */*, allow
16 p, role:admin, applications, sync, */*, allow
17 p, role:admin, applications, override, */*, allow
18 p, role:admin, applications, action/*, */*, allow
19 p, role:admin, certificates, create, *, allow
20 p, role:admin, certificates, update, *, allow
21 p, role:admin, certificates, delete, *, allow
22 p, role:admin, clusters, create, *, allow
23 p, role:admin, clusters, update, *, allow
24 p, role:admin, clusters, delete, *, allow
25 p, role:admin, repositories, create, *, allow
26 p, role:admin, repositories, update, *, allow
27 p, role:admin, repositories, delete, *, allow
28 p, role:admin, projects, create, *, allow
29 p, role:admin, projects, update, *, allow
30 p, role:admin, projects, delete, *, allow
31 g, role:admin, role:readonly
32 g, admin, role:admin

View File

@@ -1,14 +0,0 @@
[request_definition]
r = sub, res, act, obj
[policy_definition]
p = sub, res, act, obj, eft
[role_definition]
g = _, _
[policy_effect]
e = some(where (p.eft == allow)) && !some(where (p.eft == deny))
[matchers]
m = g(r.sub, p.sub) && keyMatch(r.res, p.res) && keyMatch(r.act, p.act) && keyMatch(r.obj, p.obj)

File diff suppressed because it is too large Load Diff

View File

@@ -2,10 +2,18 @@ package main
import (
"context"
"flag"
"fmt"
"os"
"strconv"
"time"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
@@ -14,18 +22,8 @@ import (
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
"github.com/argoproj/argo-cd/reposerver"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
"github.com/argoproj/argo-cd/util/stats"
)
const (
@@ -37,30 +35,29 @@ const (
func newCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
repoServerTimeoutSeconds int
selfHealTimeoutSeconds int
statusProcessors int
operationProcessors int
logLevel string
glogLevel int
metricsPort int
kubectlParallelismLimit int64
cacheSrc func() (*cache.Cache, error)
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
statusProcessors int
operationProcessors int
logLevel string
glogLevel int
)
var command = cobra.Command{
Use: cliName,
Short: "application-controller is a controller to operate on applications CRD",
RunE: func(c *cobra.Command, args []string) error {
cli.SetLogLevel(logLevel)
cli.SetGLogLevel(glogLevel)
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
// Set the glog level for the k8s go-client
_ = flag.CommandLine.Parse([]string{})
_ = flag.Lookup("logtostderr").Value.Set("true")
_ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel))
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
kubeClient := kubernetes.NewForConfigOrDie(config)
appClient := appclientset.NewForConfigOrDie(config)
@@ -68,37 +65,31 @@ func newCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
// TODO (amatyushentsev): Use config map to store controller configuration
controllerConfig := controller.ApplicationControllerConfig{
Namespace: namespace,
InstanceID: "",
}
db := db.NewDB(namespace, kubeClient)
resyncDuration := time.Duration(appResyncPeriod) * time.Second
repoClientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds)
appStateManager := controller.NewAppStateManager(db, appClient, reposerver.NewRepositoryServerClientset(repoServerAddress), namespace)
appHealthManager := controller.NewAppHealthManager(db, namespace)
appController := controller.NewApplicationController(
namespace,
kubeClient,
appClient,
db,
appStateManager,
appHealthManager,
resyncDuration,
&controllerConfig)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cache, err := cacheSrc()
errors.CheckError(err)
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
kubectl := &kube.KubectlCmd{}
appController, err := controller.NewApplicationController(
namespace,
settingsMgr,
kubeClient,
appClient,
repoClientset,
cache,
kubectl,
resyncDuration,
time.Duration(selfHealTimeoutSeconds)*time.Second,
metricsPort,
kubectlParallelismLimit)
errors.CheckError(err)
log.Infof("Application Controller (version: %s) starting (namespace: %s)", common.GetVersion(), namespace)
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
log.Infof("Application Controller (version: %s) starting (namespace: %s)", argocd.GetVersion(), namespace)
go appController.Run(ctx, statusProcessors, operationProcessors)
// Wait forever
select {}
},
@@ -106,17 +97,11 @@ func newCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", defaultAppResyncPeriod, "Time period in seconds for application resync.")
command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().IntVar(&statusProcessors, "status-processors", 1, "Number of application status processors")
command.Flags().IntVar(&operationProcessors, "operation-processors", 1, "Number of application operation processors")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", 5, "Specifies timeout between application self heal attempts")
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", 20, "Number of allowed concurrent kubectl fork/execs. Any value less the 1 means no limit.")
cacheSrc = cache.AddCacheFlagsToCmd(&command)
return &command
}

View File

@@ -3,64 +3,48 @@ package main
import (
"fmt"
"net"
"net/http"
"os"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/metrics"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/stats"
"github.com/argoproj/argo-cd/util/tls"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/ksonnet"
"github.com/go-redis/redis"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-repo-server"
port = 8081
)
func newCommand() *cobra.Command {
var (
logLevel string
parallelismLimit int64
listenPort int
metricsPort int
cacheSrc func() (*cache.Cache, error)
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
logLevel string
)
var command = cobra.Command{
Use: cliName,
Short: "Run argocd-repo-server",
RunE: func(c *cobra.Command, args []string) error {
cli.SetLogLevel(logLevel)
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
errors.CheckError(err)
cache, err := cacheSrc()
errors.CheckError(err)
metricsServer := metrics.NewMetricsServer()
server, err := reposerver.NewServer(metricsServer, cache, tlsConfigCustomizer, parallelismLimit)
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
server := reposerver.NewServer(git.NewFactory(), newCache())
grpc := server.CreateGRPC()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", listenPort))
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
errors.CheckError(err)
http.Handle("/metrics", metricsServer.GetHandler())
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf(":%d", metricsPort), nil)) }()
ksVers, err := ksonnet.KsonnetVersion()
errors.CheckError(err)
log.Infof("argocd-repo-server %s serving on %s", common.GetVersion(), listener.Addr())
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
log.Infof("argocd-repo-server %s serving on %s", argocd.GetVersion(), listener.Addr())
log.Infof("ksonnet version: %s", ksVers)
err = grpc.Serve(listener)
errors.CheckError(err)
return nil
@@ -68,14 +52,19 @@ func newCommand() *cobra.Command {
}
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().Int64Var(&parallelismLimit, "parallelismlimit", 0, "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.")
command.Flags().IntVar(&listenPort, "port", common.DefaultPortRepoServer, "Listen on given port for incoming connections")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
cacheSrc = cache.AddCacheFlagsToCmd(&command)
return &command
}
func newCache() cache.Cache {
//return cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
client := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
return cache.NewRedisCache(client, repository.DefaultRepoCacheExpiration)
}
func main() {
if err := newCommand().Execute(); err != nil {
fmt.Println(err)

View File

@@ -2,91 +2,62 @@ package commands
import (
"context"
"time"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server"
"github.com/argoproj/argo-cd/util/cli"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/server"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/stats"
"github.com/argoproj/argo-cd/util/tls"
)
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
insecure bool
listenPort int
metricsPort int
logLevel string
glogLevel int
clientConfig clientcmd.ClientConfig
repoServerTimeoutSeconds int
staticAssetsDir string
baseHRef string
repoServerAddress string
dexServerAddress string
disableAuth bool
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
cacheSrc func() (*cache.Cache, error)
insecure bool
logLevel string
clientConfig clientcmd.ClientConfig
staticAssetsDir string
repoServerAddress string
disableAuth bool
)
var command = &cobra.Command{
Use: cliName,
Short: "Run the argocd API server",
Long: "Run the argocd API server",
Run: func(c *cobra.Command, args []string) {
cli.SetLogLevel(logLevel)
cli.SetGLogLevel(glogLevel)
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
errors.CheckError(err)
cache, err := cacheSrc()
errors.CheckError(err)
kubeclientset := kubernetes.NewForConfigOrDie(config)
appclientset := appclientset.NewForConfigOrDie(config)
repoclientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds)
repoclientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
argoCDOpts := server.ArgoCDServerOpts{
Insecure: insecure,
ListenPort: listenPort,
MetricsPort: metricsPort,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
BaseHRef: baseHRef,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DexServerAddr: dexServerAddress,
DisableAuth: disableAuth,
TLSConfigCustomizer: tlsConfigCustomizer,
Cache: cache,
Insecure: insecure,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DisableAuth: disableAuth,
}
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
argocd := server.NewServer(argoCDOpts)
for {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
argocd := server.NewServer(ctx, argoCDOpts)
argocd.Run(ctx, listenPort, metricsPort)
argocd.Run(ctx, 8080)
cancel()
}
},
@@ -95,17 +66,9 @@ func NewCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().BoolVar(&insecure, "insecure", false, "Run server without TLS")
command.Flags().StringVar(&staticAssetsDir, "staticassets", "", "Static assets directory path")
command.Flags().StringVar(&baseHRef, "basehref", "/", "Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address")
command.Flags().StringVar(&dexServerAddress, "dex-server", common.DefaultDexServerAddr, "Dex server address")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().BoolVar(&disableAuth, "disable-auth", false, "Disable client authentication")
command.AddCommand(cli.NewVersionCmd(cliName))
command.Flags().IntVar(&listenPort, "port", common.DefaultPortAPIServer, "Listen on given port")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDAPIServerMetrics, "Start metrics on given port")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
cacheSrc = cache.AddCacheFlagsToCmd(command)
return command
}

View File

@@ -1,38 +1,21 @@
package main
import (
"bufio"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"regexp"
"syscall"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/dex"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
@@ -43,15 +26,6 @@ import (
const (
// CLIName is the name of the CLI
cliName = "argocd-util"
// YamlSeparator separates sections of a YAML file
yamlSeparator = "---\n"
)
var (
configMapResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}
secretResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
applicationsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applications"}
appprojectsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "appprojects"}
)
// NewCommand returns a new instance of an argocd command
@@ -62,7 +36,7 @@ func NewCommand() *cobra.Command {
var command = &cobra.Command{
Use: cliName,
Short: "argocd-util has internal tools used by Argo CD",
Short: "argocd-util has internal tools used by ArgoCD",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
@@ -71,10 +45,6 @@ func NewCommand() *cobra.Command {
command.AddCommand(cli.NewVersionCmd(cliName))
command.AddCommand(NewRunDexCommand())
command.AddCommand(NewGenDexConfigCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.AddCommand(NewClusterConfig())
command.AddCommand(NewProjectsCommand())
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return command
@@ -86,7 +56,7 @@ func NewRunDexCommand() *cobra.Command {
)
var command = cobra.Command{
Use: "rundex",
Short: "Runs dex generating a config using settings from the Argo CD configmap and secret",
Short: "Runs dex generating a config using settings from the ArgoCD configmap and secret",
RunE: func(c *cobra.Command, args []string) error {
_, err := exec.LookPath("dex")
errors.CheckError(err)
@@ -95,22 +65,24 @@ func NewRunDexCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
prevSettings, err := settingsMgr.GetSettings()
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
updateCh := make(chan *settings.ArgoCDSettings, 1)
ctx := context.Background()
settingsMgr.StartNotifier(ctx, settings)
updateCh := make(chan struct{}, 1)
settingsMgr.Subscribe(updateCh)
for {
var cmd *exec.Cmd
dexCfgBytes, err := dex.GenerateDexConfigYAML(prevSettings)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
} else {
err = ioutil.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0644)
errors.CheckError(err)
log.Info(redactor(string(dexCfgBytes)))
log.Info(string(dexCfgBytes))
cmd = exec.Command("dex", "serve", "/tmp/dex.yaml")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -120,11 +92,10 @@ func NewRunDexCommand() *cobra.Command {
// loop until the dex config changes
for {
newSettings := <-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(newSettings)
<-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(dexCfgBytes) {
prevSettings = newSettings
log.Infof("dex config modified. restarting dex")
if cmd != nil && cmd.Process != nil {
err = cmd.Process.Signal(syscall.SIGTERM)
@@ -152,14 +123,14 @@ func NewGenDexConfigCommand() *cobra.Command {
)
var command = cobra.Command{
Use: "gendexcfg",
Short: "Generates a dex config from Argo CD settings",
Short: "Generates a dex config from ArgoCD settings",
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
@@ -169,29 +140,7 @@ func NewGenDexConfigCommand() *cobra.Command {
return nil
}
if out == "" {
dexCfg := make(map[string]interface{})
err := yaml.Unmarshal(dexCfgBytes, &dexCfg)
errors.CheckError(err)
if staticClientsInterface, ok := dexCfg["staticClients"]; ok {
if staticClients, ok := staticClientsInterface.([]interface{}); ok {
for i := range staticClients {
staticClient := staticClients[i]
if mappings, ok := staticClient.(map[string]interface{}); ok {
for key := range mappings {
if key == "secret" {
mappings[key] = "******"
}
}
staticClients[i] = mappings
}
}
dexCfg["staticClients"] = staticClients
}
}
errors.CheckError(err)
maskedDexCfgBytes, err := yaml.Marshal(dexCfg)
errors.CheckError(err)
fmt.Print(string(maskedDexCfgBytes))
fmt.Printf(string(dexCfgBytes))
} else {
err = ioutil.WriteFile(out, dexCfgBytes, 0644)
errors.CheckError(err)
@@ -205,341 +154,6 @@ func NewGenDexConfigCommand() *cobra.Command {
return &command
}
// NewImportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewImportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
prune bool
dryRun bool
)
var command = cobra.Command{
Use: "import SOURCE",
Short: "Import Argo CD data from stdin (specify `-') or a file",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = 100
config.Burst = 50
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
acdClients := newArgoCDClientsets(config, namespace)
var input []byte
if in := args[0]; in == "-" {
input, err = ioutil.ReadAll(os.Stdin)
} else {
input, err = ioutil.ReadFile(in)
}
errors.CheckError(err)
var dryRunMsg string
if dryRun {
dryRunMsg = " (dry run)"
}
// pruneObjects tracks live objects and it's current resource version. any remaining
// items in this map indicates the resource should be pruned since it no longer appears
// in the backup
pruneObjects := make(map[kube.ResourceKey]string)
configMaps, err := acdClients.configMaps.List(metav1.ListOptions{})
errors.CheckError(err)
for _, cm := range configMaps.Items {
cmName := cm.GetName()
if cmName == common.ArgoCDConfigMapName || cmName == common.ArgoCDRBACConfigMapName {
pruneObjects[kube.ResourceKey{Group: "", Kind: "ConfigMap", Name: cm.GetName()}] = cm.GetResourceVersion()
}
}
secrets, err := acdClients.secrets.List(metav1.ListOptions{})
errors.CheckError(err)
for _, secret := range secrets.Items {
if isArgoCDSecret(nil, secret) {
pruneObjects[kube.ResourceKey{Group: "", Kind: "Secret", Name: secret.GetName()}] = secret.GetResourceVersion()
}
}
applications, err := acdClients.applications.List(metav1.ListOptions{})
errors.CheckError(err)
for _, app := range applications.Items {
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "Application", Name: app.GetName()}] = app.GetResourceVersion()
}
projects, err := acdClients.projects.List(metav1.ListOptions{})
errors.CheckError(err)
for _, proj := range projects.Items {
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "AppProject", Name: proj.GetName()}] = proj.GetResourceVersion()
}
// Create or replace existing object
objs, err := kube.SplitYAML(string(input))
errors.CheckError(err)
for _, obj := range objs {
gvk := obj.GroupVersionKind()
key := kube.ResourceKey{Group: gvk.Group, Kind: gvk.Kind, Name: obj.GetName()}
resourceVersion, exists := pruneObjects[key]
delete(pruneObjects, key)
var dynClient dynamic.ResourceInterface
switch obj.GetKind() {
case "Secret":
dynClient = acdClients.secrets
case "ConfigMap":
dynClient = acdClients.configMaps
case "AppProject":
dynClient = acdClients.projects
case "Application":
dynClient = acdClients.applications
}
if !exists {
if !dryRun {
_, err = dynClient.Create(obj, metav1.CreateOptions{})
errors.CheckError(err)
}
fmt.Printf("%s/%s %s created%s\n", gvk.Group, gvk.Kind, obj.GetName(), dryRunMsg)
} else {
if !dryRun {
obj.SetResourceVersion(resourceVersion)
_, err = dynClient.Update(obj, metav1.UpdateOptions{})
errors.CheckError(err)
}
fmt.Printf("%s/%s %s replaced%s\n", gvk.Group, gvk.Kind, obj.GetName(), dryRunMsg)
}
}
// Delete objects not in backup
for key := range pruneObjects {
if prune {
var dynClient dynamic.ResourceInterface
switch key.Kind {
case "Secret":
dynClient = acdClients.secrets
case "AppProject":
dynClient = acdClients.projects
case "Application":
dynClient = acdClients.applications
default:
log.Fatalf("Unexpected kind '%s' in prune list", key.Kind)
}
if !dryRun {
err = dynClient.Delete(key.Name, &metav1.DeleteOptions{})
errors.CheckError(err)
}
fmt.Printf("%s/%s %s pruned%s\n", key.Group, key.Kind, key.Name, dryRunMsg)
} else {
fmt.Printf("%s/%s %s needs pruning\n", key.Group, key.Kind, key.Name)
}
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().BoolVar(&dryRun, "dry-run", false, "Print what will be performed")
command.Flags().BoolVar(&prune, "prune", false, "Prune secrets, applications and projects which do not appear in the backup")
return &command
}
type argoCDClientsets struct {
configMaps dynamic.ResourceInterface
secrets dynamic.ResourceInterface
applications dynamic.ResourceInterface
projects dynamic.ResourceInterface
}
func newArgoCDClientsets(config *rest.Config, namespace string) *argoCDClientsets {
dynamicIf, err := dynamic.NewForConfig(config)
errors.CheckError(err)
return &argoCDClientsets{
configMaps: dynamicIf.Resource(configMapResource).Namespace(namespace),
secrets: dynamicIf.Resource(secretResource).Namespace(namespace),
applications: dynamicIf.Resource(applicationsResource).Namespace(namespace),
projects: dynamicIf.Resource(appprojectsResource).Namespace(namespace),
}
}
// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewExportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = cobra.Command{
Use: "export",
Short: "Export all Argo CD data to stdout (default) or a file",
Run: func(c *cobra.Command, args []string) {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
var writer io.Writer
if out == "-" {
writer = os.Stdout
} else {
f, err := os.Create(out)
errors.CheckError(err)
defer util.Close(f)
writer = bufio.NewWriter(f)
}
acdClients := newArgoCDClientsets(config, namespace)
acdConfigMap, err := acdClients.configMaps.Get(common.ArgoCDConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
export(writer, *acdConfigMap)
acdRBACConfigMap, err := acdClients.configMaps.Get(common.ArgoCDRBACConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
export(writer, *acdRBACConfigMap)
acdKnownHostsConfigMap, err := acdClients.configMaps.Get(common.ArgoCDKnownHostsConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
export(writer, *acdKnownHostsConfigMap)
acdTLSCertsConfigMap, err := acdClients.configMaps.Get(common.ArgoCDTLSCertsConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
export(writer, *acdTLSCertsConfigMap)
referencedSecrets := getReferencedSecrets(*acdConfigMap)
secrets, err := acdClients.secrets.List(metav1.ListOptions{})
errors.CheckError(err)
for _, secret := range secrets.Items {
if isArgoCDSecret(referencedSecrets, secret) {
export(writer, secret)
}
}
projects, err := acdClients.projects.List(metav1.ListOptions{})
errors.CheckError(err)
for _, proj := range projects.Items {
export(writer, proj)
}
applications, err := acdClients.applications.List(metav1.ListOptions{})
errors.CheckError(err)
for _, app := range applications.Items {
export(writer, app)
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout")
return &command
}
// getReferencedSecrets examines the argocd-cm config for any referenced repo secrets and returns a
// map of all referenced secrets.
func getReferencedSecrets(un unstructured.Unstructured) map[string]bool {
var cm apiv1.ConfigMap
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &cm)
errors.CheckError(err)
referencedSecrets := make(map[string]bool)
if reposRAW, ok := cm.Data["repositories"]; ok {
repoCreds := make([]settings.RepoCredentials, 0)
err := yaml.Unmarshal([]byte(reposRAW), &repoCreds)
errors.CheckError(err)
for _, cred := range repoCreds {
if cred.PasswordSecret != nil {
referencedSecrets[cred.PasswordSecret.Name] = true
}
if cred.SSHPrivateKeySecret != nil {
referencedSecrets[cred.SSHPrivateKeySecret.Name] = true
}
if cred.UsernameSecret != nil {
referencedSecrets[cred.UsernameSecret.Name] = true
}
if cred.TLSClientCertDataSecret != nil {
referencedSecrets[cred.TLSClientCertDataSecret.Name] = true
}
if cred.TLSClientCertKeySecret != nil {
referencedSecrets[cred.TLSClientCertKeySecret.Name] = true
}
}
}
return referencedSecrets
}
// isArgoCDSecret returns whether or not the given secret is a part of Argo CD configuration
// (e.g. argocd-secret, repo credentials, or cluster credentials)
func isArgoCDSecret(repoSecretRefs map[string]bool, un unstructured.Unstructured) bool {
secretName := un.GetName()
if secretName == common.ArgoCDSecretName {
return true
}
if repoSecretRefs != nil {
if _, ok := repoSecretRefs[secretName]; ok {
return true
}
}
if labels := un.GetLabels(); labels != nil {
if _, ok := labels[common.LabelKeySecretType]; ok {
return true
}
}
if annotations := un.GetAnnotations(); annotations != nil {
if annotations[common.AnnotationKeyManagedBy] == common.AnnotationValueManagedByArgoCD {
return true
}
}
return false
}
// export writes the unstructured object and removes extraneous cruft from output before writing
func export(w io.Writer, un unstructured.Unstructured) {
name := un.GetName()
finalizers := un.GetFinalizers()
apiVersion := un.GetAPIVersion()
kind := un.GetKind()
labels := un.GetLabels()
annotations := un.GetAnnotations()
unstructured.RemoveNestedField(un.Object, "metadata")
un.SetName(name)
un.SetFinalizers(finalizers)
un.SetAPIVersion(apiVersion)
un.SetKind(kind)
un.SetLabels(labels)
un.SetAnnotations(annotations)
data, err := yaml.Marshal(un.Object)
errors.CheckError(err)
_, err = w.Write(data)
errors.CheckError(err)
_, err = w.Write([]byte(yamlSeparator))
errors.CheckError(err)
}
// NewClusterConfig returns a new instance of `argocd-util kubeconfig` command
func NewClusterConfig() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = &cobra.Command{
Use: "kubeconfig CLUSTER_URL OUTPUT_PATH",
Short: "Generates kubeconfig for the specified cluster",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
serverUrl := args[0]
output := args[1]
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeclientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(context.Background(), kubeclientset, namespace), kubeclientset).GetCluster(context.Background(), serverUrl)
errors.CheckError(err)
err = kube.WriteKubeConfig(cluster.RESTConfig(), namespace, output)
errors.CheckError(err)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}
func redactor(dirtyString string) string {
dirtyString = regexp.MustCompile("(clientSecret: )[^ \n]*").ReplaceAllString(dirtyString, "$1********")
return regexp.MustCompile("(secret: )[^ \n]*").ReplaceAllString(dirtyString, "$1********")
}
func main() {
if err := NewCommand().Execute(); err != nil {
fmt.Println(err)

View File

@@ -1,73 +0,0 @@
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
var textToRedact = `
- config:
clientID: aabbccddeeff00112233
clientSecret: $dex.github.clientSecret
orgs:
- name: your-github-org
redirectURI: https://argocd.example.com/api/dex/callback
id: github
name: GitHub
type: github
grpc:
addr: 0.0.0.0:5557
issuer: https://argocd.example.com/api/dex
oauth2:
skipApprovalScreen: true
staticClients:
- id: argo-cd
name: Argo CD
redirectURIs:
- https://argocd.example.com/auth/callback
secret: Dis9M-GA11oTwZVQQWdDklPQw-sWXZkWJFyyEhMs
- id: argo-cd-cli
name: Argo CD CLI
public: true
redirectURIs:
- http://localhost
storage:
type: memory
web:
http: 0.0.0.0:5556`
var expectedRedaction = `
- config:
clientID: aabbccddeeff00112233
clientSecret: ********
orgs:
- name: your-github-org
redirectURI: https://argocd.example.com/api/dex/callback
id: github
name: GitHub
type: github
grpc:
addr: 0.0.0.0:5557
issuer: https://argocd.example.com/api/dex
oauth2:
skipApprovalScreen: true
staticClients:
- id: argo-cd
name: Argo CD
redirectURIs:
- https://argocd.example.com/auth/callback
secret: ********
- id: argo-cd-cli
name: Argo CD CLI
public: true
redirectURIs:
- http://localhost
storage:
type: memory
web:
http: 0.0.0.0:5556`
func TestSecretsRedactor(t *testing.T) {
assert.Equal(t, expectedRedaction, redactor(textToRedact))
}

View File

@@ -1,192 +0,0 @@
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
appclient "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/typed/application/v1alpha1"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/kube"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
)
func NewProjectsCommand() *cobra.Command {
var command = &cobra.Command{
Use: "projects",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewUpdatePolicyRuleCommand())
return command
}
func globMatch(pattern string, val string) bool {
if pattern == "*" {
return true
}
if ok, err := filepath.Match(pattern, val); ok && err == nil {
return true
}
return false
}
func getModification(modification string, resource string, scope string, permission string) (func(string, string) string, error) {
switch modification {
case "set":
if scope == "" {
return nil, fmt.Errorf("Flag --group cannot be empty if permission should be set in role")
}
if permission == "" {
return nil, fmt.Errorf("Flag --permission cannot be empty if permission should be set in role")
}
return func(proj string, action string) string {
return fmt.Sprintf("%s, %s, %s/%s, %s", resource, action, proj, scope, permission)
}, nil
case "remove":
return func(proj string, action string) string {
return ""
}, nil
}
return nil, fmt.Errorf("modification %s is not supported", modification)
}
func saveProject(updated v1alpha1.AppProject, orig v1alpha1.AppProject, projectsIf appclient.AppProjectInterface, dryRun bool) error {
fmt.Printf("===== %s ======\n", updated.Name)
target, err := kube.ToUnstructured(&updated)
errors.CheckError(err)
live, err := kube.ToUnstructured(&orig)
if err != nil {
return err
}
_ = diff.PrintDiff(updated.Name, target, live)
if !dryRun {
_, err = projectsIf.Update(&updated)
if err != nil {
return err
}
}
return nil
}
func formatPolicy(proj string, role string, permission string) string {
return fmt.Sprintf("p, proj:%s:%s, %s", proj, role, permission)
}
func split(input string, delimiter string) []string {
parts := strings.Split(input, delimiter)
for i := range parts {
parts[i] = strings.TrimSpace(parts[i])
}
return parts
}
func NewUpdatePolicyRuleCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
resource string
scope string
rolePattern string
permission string
dryRun bool
)
var command = &cobra.Command{
Use: "update-role-policy PROJECT_GLOB MODIFICATION ACTION",
Short: "Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions.",
Example: ` # Add policy that allows executing any action (action/*) to roles which name matches to *deployer* in all projects
argocd-util projects update-role-policy '*' set 'action/*' --role '*deployer*' --resource applications --scope '*' --permission allow
# Remove policy that which manages running (action/*) from all roles which name matches *deployer* in all projects
argocd-util projects update-role-policy '*' remove override --role '*deployer*'
`,
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projectGlob := args[0]
modificationType := args[1]
action := args[2]
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = 100
config.Burst = 50
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
appclients := appclientset.NewForConfigOrDie(config)
modification, err := getModification(modificationType, resource, scope, permission)
errors.CheckError(err)
projIf := appclients.ArgoprojV1alpha1().AppProjects(namespace)
err = updateProjects(projIf, projectGlob, rolePattern, action, modification, dryRun)
errors.CheckError(err)
},
}
command.Flags().StringVar(&resource, "resource", "", "Resource e.g. 'applications'")
command.Flags().StringVar(&scope, "scope", "", "Resource scope e.g. '*'")
command.Flags().StringVar(&rolePattern, "role", "*", "Role name pattern e.g. '*deployer*'")
command.Flags().StringVar(&permission, "permission", "", "Action permission")
command.Flags().BoolVar(&dryRun, "dry-run", true, "Dry run")
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}
func updateProjects(projIf appclient.AppProjectInterface, projectGlob string, rolePattern string, action string, modification func(string, string) string, dryRun bool) error {
projects, err := projIf.List(v1.ListOptions{})
if err != nil {
return err
}
for _, proj := range projects.Items {
if !globMatch(projectGlob, proj.Name) {
continue
}
origProj := proj.DeepCopy()
updated := false
for i, role := range proj.Spec.Roles {
if !globMatch(rolePattern, role.Name) {
continue
}
actionPolicyIndex := -1
for i := range role.Policies {
parts := split(role.Policies[i], ",")
if len(parts) != 6 || parts[3] != action {
continue
}
actionPolicyIndex = i
break
}
policyPermission := modification(proj.Name, action)
if actionPolicyIndex == -1 && policyPermission != "" {
updated = true
role.Policies = append(role.Policies, formatPolicy(proj.Name, role.Name, policyPermission))
} else if actionPolicyIndex > -1 && policyPermission == "" {
updated = true
role.Policies = append(role.Policies[:actionPolicyIndex], role.Policies[actionPolicyIndex+1:]...)
} else if actionPolicyIndex > -1 && policyPermission != "" {
updated = true
role.Policies[actionPolicyIndex] = formatPolicy(proj.Name, role.Name, policyPermission)
}
proj.Spec.Roles[i] = role
}
if updated {
err = saveProject(proj, *origProj, projIf, dryRun)
if err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,78 +0,0 @@
package main
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
)
const (
namespace = "default"
)
func newProj(name string, roleNames ...string) *v1alpha1.AppProject {
var roles []v1alpha1.ProjectRole
for i := range roleNames {
roles = append(roles, v1alpha1.ProjectRole{Name: roleNames[i]})
}
return &v1alpha1.AppProject{ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
}, Spec: v1alpha1.AppProjectSpec{
Roles: roles,
}}
}
func TestUpdateProjects_FindMatchingProject(t *testing.T) {
clientset := fake.NewSimpleClientset(newProj("foo", "test"), newProj("bar", "test"))
modification, err := getModification("set", "*", "*", "allow")
assert.NoError(t, err)
err = updateProjects(clientset.ArgoprojV1alpha1().AppProjects(namespace), "ba*", "*", "set", modification, false)
assert.NoError(t, err)
fooProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get("foo", v1.GetOptions{})
assert.NoError(t, err)
assert.Len(t, fooProj.Spec.Roles[0].Policies, 0)
barProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get("bar", v1.GetOptions{})
assert.NoError(t, err)
assert.EqualValues(t, barProj.Spec.Roles[0].Policies, []string{"p, proj:bar:test, *, set, bar/*, allow"})
}
func TestUpdateProjects_FindMatchingRole(t *testing.T) {
clientset := fake.NewSimpleClientset(newProj("proj", "foo", "bar"))
modification, err := getModification("set", "*", "*", "allow")
assert.NoError(t, err)
err = updateProjects(clientset.ArgoprojV1alpha1().AppProjects(namespace), "*", "fo*", "set", modification, false)
assert.NoError(t, err)
proj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get("proj", v1.GetOptions{})
assert.NoError(t, err)
assert.EqualValues(t, proj.Spec.Roles[0].Policies, []string{"p, proj:proj:foo, *, set, proj/*, allow"})
assert.Len(t, proj.Spec.Roles[1].Policies, 0)
}
func TestGetModification_SetPolicy(t *testing.T) {
modification, err := getModification("set", "*", "*", "allow")
assert.NoError(t, err)
policy := modification("proj", "myaction")
assert.Equal(t, "*, myaction, proj/*, allow", policy)
}
func TestGetModification_RemovePolicy(t *testing.T) {
modification, err := getModification("remove", "*", "*", "allow")
assert.NoError(t, err)
policy := modification("proj", "myaction")
assert.Equal(t, "", policy)
}
func TestGetModification_NotSupported(t *testing.T) {
_, err := getModification("bar", "*", "*", "allow")
assert.Errorf(t, err, "modification bar is not supported")
}

View File

@@ -1,146 +0,0 @@
package commands
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"syscall"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
accountpkg "github.com/argoproj/argo-cd/pkg/apiclient/account"
"github.com/argoproj/argo-cd/pkg/apiclient/session"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/localconfig"
)
func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "account",
Short: "Manage account settings",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
command.AddCommand(NewAccountUpdatePasswordCommand(clientOpts))
command.AddCommand(NewAccountGetUserInfoCommand(clientOpts))
return command
}
func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
currentPassword string
newPassword string
)
var command = &cobra.Command{
Use: "update-password",
Short: "Update password",
Run: func(c *cobra.Command, args []string) {
if len(args) != 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
if currentPassword == "" {
fmt.Print("*** Enter current password: ")
password, err := terminal.ReadPassword(syscall.Stdin)
errors.CheckError(err)
currentPassword = string(password)
fmt.Print("\n")
}
if newPassword == "" {
var err error
newPassword, err = cli.ReadAndConfirmPassword()
errors.CheckError(err)
}
updatePasswordRequest := accountpkg.UpdatePasswordRequest{
NewPassword: newPassword,
CurrentPassword: currentPassword,
}
acdClient := argocdclient.NewClientOrDie(clientOpts)
conn, usrIf := acdClient.NewAccountClientOrDie()
defer util.Close(conn)
ctx := context.Background()
_, err := usrIf.UpdatePassword(ctx, &updatePasswordRequest)
errors.CheckError(err)
fmt.Printf("Password updated\n")
// Get a new JWT token after updating the password
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
configCtx, err := localCfg.ResolveContext(clientOpts.Context)
errors.CheckError(err)
claims, err := configCtx.User.Claims()
errors.CheckError(err)
tokenString := passwordLogin(acdClient, claims.Subject, newPassword)
localCfg.UpsertUser(localconfig.User{
Name: localCfg.CurrentContext,
AuthToken: tokenString,
})
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
errors.CheckError(err)
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
},
}
command.Flags().StringVar(&currentPassword, "current-password", "", "current password you wish to change")
command.Flags().StringVar(&newPassword, "new-password", "", "new password you want to update to")
return command
}
func NewAccountGetUserInfoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "get-user-info",
Short: "Get user info",
Run: func(c *cobra.Command, args []string) {
if len(args) != 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, client := argocdclient.NewClientOrDie(clientOpts).NewSessionClientOrDie()
defer util.Close(conn)
ctx := context.Background()
response, err := client.GetUserInfo(ctx, &session.GetUserInfoRequest{})
errors.CheckError(err)
switch output {
case "yaml":
yamlBytes, err := yaml.Marshal(response)
errors.CheckError(err)
fmt.Println(string(yamlBytes))
case "json":
jsonBytes, err := json.MarshalIndent(response, "", " ")
errors.CheckError(err)
fmt.Println(string(jsonBytes))
case "":
fmt.Printf("Logged In: %v\n", response.LoggedIn)
if response.LoggedIn {
fmt.Printf("Username: %s\n", response.Username)
fmt.Printf("Issuer: %s\n", response.Iss)
fmt.Printf("Groups: %v\n", strings.Join(response.Groups, ","))
}
default:
log.Fatalf("Unknown output format: %s", output)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "", "Output format. One of: yaml, json")
return command
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,175 +0,0 @@
package commands
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"text/tabwriter"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
applicationpkg "github.com/argoproj/argo-cd/pkg/apiclient/application"
"github.com/argoproj/argo-cd/util"
)
type DisplayedAction struct {
Group string
Kind string
Name string
Action string
Disabled bool
}
// NewApplicationResourceActionsCommand returns a new instance of an `argocd app actions` command
func NewApplicationResourceActionsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "actions",
Short: "Manage Resource actions",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
command.AddCommand(NewApplicationResourceActionsListCommand(clientOpts))
command.AddCommand(NewApplicationResourceActionsRunCommand(clientOpts))
return command
}
// NewApplicationResourceActionsListCommand returns a new instance of an `argocd app actions list` command
func NewApplicationResourceActionsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var namespace string
var kind string
var group string
var resourceName string
var output string
var command = &cobra.Command{
Use: "list APPNAME",
Short: "Lists available actions on a resource",
}
command.Run = func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName := args[0]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
ctx := context.Background()
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName})
errors.CheckError(err)
filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, true)
var availableActions []DisplayedAction
for i := range filteredObjects {
obj := filteredObjects[i]
gvk := obj.GroupVersionKind()
availActionsForResource, err := appIf.ListResourceActions(ctx, &applicationpkg.ApplicationResourceRequest{
Name: &appName,
Namespace: obj.GetNamespace(),
ResourceName: obj.GetName(),
Group: gvk.Group,
Kind: gvk.Kind,
})
errors.CheckError(err)
for _, action := range availActionsForResource.Actions {
displayAction := DisplayedAction{
Group: gvk.Group,
Kind: gvk.Kind,
Name: obj.GetName(),
Action: action.Name,
Disabled: action.Disabled,
}
availableActions = append(availableActions, displayAction)
}
}
switch output {
case "yaml":
yamlBytes, err := yaml.Marshal(availableActions)
errors.CheckError(err)
fmt.Println(string(yamlBytes))
case "json":
jsonBytes, err := json.MarshalIndent(availableActions, "", " ")
errors.CheckError(err)
fmt.Println(string(jsonBytes))
case "":
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "GROUP\tKIND\tNAME\tACTION\tDISABLED\n")
fmt.Println()
for _, action := range availableActions {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", action.Group, action.Kind, action.Name, action.Action, strconv.FormatBool(action.Disabled))
}
_ = w.Flush()
}
}
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource")
command.Flags().StringVar(&kind, "kind", "", "Kind")
command.Flags().StringVar(&group, "group", "", "Group")
command.Flags().StringVar(&namespace, "namespace", "", "Namespace")
command.Flags().StringVarP(&output, "out", "o", "", "Output format. One of: yaml, json")
return command
}
// NewApplicationResourceActionsRunCommand returns a new instance of an `argocd app actions run` command
func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var namespace string
var resourceName string
var kind string
var group string
var all bool
var command = &cobra.Command{
Use: "run APPNAME ACTION",
Short: "Runs an available action on resource(s)",
}
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource")
command.Flags().StringVar(&namespace, "namespace", "", "Namespace")
command.Flags().StringVar(&kind, "kind", "", "Kind")
command.Flags().StringVar(&group, "group", "", "Group")
errors.CheckError(command.MarkFlagRequired("kind"))
command.Flags().BoolVar(&all, "all", false, "Indicates whether to run the action on multiple matching resources")
command.Run = func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName := args[0]
actionName := args[1]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
ctx := context.Background()
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName})
errors.CheckError(err)
filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, all)
var resGroup = filteredObjects[0].GroupVersionKind().Group
for i := range filteredObjects[1:] {
if filteredObjects[i].GroupVersionKind().Group != resGroup {
log.Fatal("Ambiguous resource group. Use flag --group to specify resource group explicitly.")
}
}
for i := range filteredObjects {
obj := filteredObjects[i]
gvk := obj.GroupVersionKind()
objResourceName := obj.GetName()
_, err := appIf.RunResourceAction(context.Background(), &applicationpkg.ResourceActionRunRequest{
Name: &appName,
Namespace: obj.GetNamespace(),
ResourceName: objResourceName,
Group: gvk.Group,
Kind: gvk.Kind,
Action: actionName,
})
errors.CheckError(err)
}
}
return command
}

View File

@@ -1,55 +0,0 @@
package commands
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
)
func TestParseLabels(t *testing.T) {
validLabels := []string{"key=value", "foo=bar", "intuit=inc"}
result, err := parseLabels(validLabels)
assert.NoError(t, err)
assert.Len(t, result, 3)
invalidLabels := []string{"key=value", "too=many=equals"}
_, err = parseLabels(invalidLabels)
assert.Error(t, err)
emptyLabels := []string{}
result, err = parseLabels(emptyLabels)
assert.NoError(t, err)
assert.Len(t, result, 0)
}
func Test_setHelmOpt(t *testing.T) {
t.Run("Zero", func(t *testing.T) {
src := v1alpha1.ApplicationSource{}
setHelmOpt(&src, helmOpts{})
assert.Nil(t, src.Helm)
})
t.Run("ValueFiles", func(t *testing.T) {
src := v1alpha1.ApplicationSource{}
setHelmOpt(&src, helmOpts{valueFiles: []string{"foo"}})
assert.Equal(t, []string{"foo"}, src.Helm.ValueFiles)
})
t.Run("ReleaseName", func(t *testing.T) {
src := v1alpha1.ApplicationSource{}
setHelmOpt(&src, helmOpts{releaseName: "foo"})
assert.Equal(t, "foo", src.Helm.ReleaseName)
})
t.Run("HelmSets", func(t *testing.T) {
src := v1alpha1.ApplicationSource{}
setHelmOpt(&src, helmOpts{helmSets: []string{"foo=bar"}})
assert.Equal(t, []v1alpha1.HelmParameter{{Name: "foo", Value: "bar"}}, src.Helm.Parameters)
})
t.Run("HelmSetStrings", func(t *testing.T) {
src := v1alpha1.ApplicationSource{}
setHelmOpt(&src, helmOpts{helmSetStrings: []string{"foo=bar"}})
assert.Equal(t, []v1alpha1.HelmParameter{{Name: "foo", Value: "bar", ForceString: true}}, src.Helm.Parameters)
})
}

View File

@@ -1,290 +0,0 @@
package commands
import (
"context"
"fmt"
"os"
"sort"
"strings"
"text/tabwriter"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
certificatepkg "github.com/argoproj/argo-cd/pkg/apiclient/certificate"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
certutil "github.com/argoproj/argo-cd/util/cert"
"crypto/x509"
)
// NewCertCommand returns a new instance of an `argocd repo` command
func NewCertCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "cert",
Short: "Manage repository certificates and SSH known hosts entries",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
command.AddCommand(NewCertAddSSHCommand(clientOpts))
command.AddCommand(NewCertAddTLSCommand(clientOpts))
command.AddCommand(NewCertListCommand(clientOpts))
command.AddCommand(NewCertRemoveCommand(clientOpts))
return command
}
func NewCertAddTLSCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
fromFile string
upsert bool
)
var command = &cobra.Command{
Use: "add-tls SERVERNAME",
Short: "Add TLS certificate data for connecting to repository server SERVERNAME",
Run: func(c *cobra.Command, args []string) {
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer util.Close(conn)
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
var certificateArray []string
var err error
if fromFile != "" {
fmt.Printf("Reading TLS certificate data in PEM format from '%s'\n", fromFile)
certificateArray, err = certutil.ParseTLSCertificatesFromPath(fromFile)
} else {
fmt.Println("Enter TLS certificate data in PEM format. Press CTRL-D when finished.")
certificateArray, err = certutil.ParseTLSCertificatesFromStream(os.Stdin)
}
errors.CheckError(err)
certificateList := make([]appsv1.RepositoryCertificate, 0)
subjectMap := make(map[string]*x509.Certificate)
for _, entry := range certificateArray {
// We want to make sure to only send valid certificate data to the
// server, so we decode the certificate into X509 structure before
// further processing it.
x509cert, err := certutil.DecodePEMCertificateToX509(entry)
errors.CheckError(err)
// TODO: We need a better way to detect duplicates sent in the stream,
// maybe by using fingerprints? For now, no two certs with the same
// subject may be sent.
if subjectMap[x509cert.Subject.String()] != nil {
fmt.Printf("ERROR: Cert with subject '%s' already seen in the input stream.\n", x509cert.Subject.String())
continue
} else {
subjectMap[x509cert.Subject.String()] = x509cert
}
}
serverName := args[0]
if len(certificateArray) > 0 {
certificateList = append(certificateList, appsv1.RepositoryCertificate{
ServerName: serverName,
CertType: "https",
CertData: []byte(strings.Join(certificateArray, "\n")),
})
certificates, err := certIf.CreateCertificate(context.Background(), &certificatepkg.RepositoryCertificateCreateRequest{
Certificates: &appsv1.RepositoryCertificateList{
Items: certificateList,
},
Upsert: upsert,
})
errors.CheckError(err)
fmt.Printf("Created entry with %d PEM certificates for repository server %s\n", len(certificates.Items), serverName)
} else {
fmt.Printf("No valid certificates have been detected in the stream.\n")
}
},
}
command.Flags().StringVar(&fromFile, "from", "", "read TLS certificate data from file (default is to read from stdin)")
command.Flags().BoolVar(&upsert, "upsert", false, "Replace existing TLS certificate if certificate is different in input")
return command
}
// NewCertAddCommand returns a new instance of an `argocd cert add` command
func NewCertAddSSHCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
fromFile string
batchProcess bool
upsert bool
certificates []appsv1.RepositoryCertificate
)
var command = &cobra.Command{
Use: "add-ssh --batch",
Short: "Add SSH known host entries for repository servers",
Run: func(c *cobra.Command, args []string) {
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer util.Close(conn)
var sshKnownHostsLists []string
var err error
// --batch is a flag, but it is mandatory for now.
if batchProcess {
if fromFile != "" {
fmt.Printf("Reading SSH known hosts entries from file '%s'\n", fromFile)
sshKnownHostsLists, err = certutil.ParseSSHKnownHostsFromPath(fromFile)
} else {
fmt.Println("Enter SSH known hosts entries, one per line. Press CTRL-D when finished.")
sshKnownHostsLists, err = certutil.ParseSSHKnownHostsFromStream(os.Stdin)
}
} else {
err = fmt.Errorf("You need to specify --batch or specify --help for usage instructions")
}
errors.CheckError(err)
if len(sshKnownHostsLists) == 0 {
errors.CheckError(fmt.Errorf("No valid SSH known hosts data found."))
}
for _, knownHostsEntry := range sshKnownHostsLists {
hostname, certSubType, certData, err := certutil.TokenizeSSHKnownHostsEntry(knownHostsEntry)
errors.CheckError(err)
_, _, err = certutil.KnownHostsLineToPublicKey(knownHostsEntry)
errors.CheckError(err)
certificate := appsv1.RepositoryCertificate{
ServerName: hostname,
CertType: "ssh",
CertSubType: certSubType,
CertData: certData,
}
certificates = append(certificates, certificate)
}
certList := &appsv1.RepositoryCertificateList{Items: certificates}
response, err := certIf.CreateCertificate(context.Background(), &certificatepkg.RepositoryCertificateCreateRequest{
Certificates: certList,
Upsert: upsert,
})
errors.CheckError(err)
fmt.Printf("Successfully created %d SSH known host entries\n", len(response.Items))
},
}
command.Flags().StringVar(&fromFile, "from", "", "Read SSH known hosts data from file (default is to read from stdin)")
command.Flags().BoolVar(&batchProcess, "batch", false, "Perform batch processing by reading in SSH known hosts data (mandatory flag)")
command.Flags().BoolVar(&upsert, "upsert", false, "Replace existing SSH server public host keys if key is different in input")
return command
}
// NewCertRemoveCommand returns a new instance of an `argocd cert rm` command
func NewCertRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
certType string
certSubType string
certQuery certificatepkg.RepositoryCertificateQuery
)
var command = &cobra.Command{
Use: "rm REPOSERVER",
Short: "Remove certificate of TYPE for REPOSERVER",
Run: func(c *cobra.Command, args []string) {
if len(args) < 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer util.Close(conn)
hostNamePattern := args[0]
// Prevent the user from specifying a wildcard as hostname as precaution
// measure -- the user could still use "?*" or any other pattern to
// remove all certificates, but it's less likely that it happens by
// accident.
if hostNamePattern == "*" {
err := fmt.Errorf("A single wildcard is not allowed as REPOSERVER name.")
errors.CheckError(err)
}
certQuery = certificatepkg.RepositoryCertificateQuery{
HostNamePattern: hostNamePattern,
CertType: certType,
CertSubType: certSubType,
}
removed, err := certIf.DeleteCertificate(context.Background(), &certQuery)
errors.CheckError(err)
if len(removed.Items) > 0 {
for _, cert := range removed.Items {
fmt.Printf("Removed cert for '%s' of type '%s' (subtype '%s')\n", cert.ServerName, cert.CertType, cert.CertSubType)
}
} else {
fmt.Println("No certificates were removed (none matched the given patterns)")
}
},
}
command.Flags().StringVar(&certType, "cert-type", "", "Only remove certs of given type (ssh, https)")
command.Flags().StringVar(&certSubType, "cert-sub-type", "", "Only remove certs of given sub-type (only for ssh)")
return command
}
// NewCertListCommand returns a new instance of an `argocd cert rm` command
func NewCertListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
certType string
hostNamePattern string
sortOrder string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured certificates",
Run: func(c *cobra.Command, args []string) {
if certType != "" {
switch certType {
case "ssh":
case "https":
default:
fmt.Println("cert-type must be either ssh or https")
os.Exit(1)
}
}
conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie()
defer util.Close(conn)
certificates, err := certIf.ListCertificates(context.Background(), &certificatepkg.RepositoryCertificateQuery{HostNamePattern: hostNamePattern, CertType: certType})
errors.CheckError(err)
printCertTable(certificates.Items, sortOrder)
},
}
command.Flags().StringVar(&sortOrder, "sort", "", "set display sort order, valid: 'hostname', 'type'")
command.Flags().StringVar(&certType, "cert-type", "", "only list certificates of given type, valid: 'ssh','https'")
command.Flags().StringVar(&hostNamePattern, "hostname-pattern", "", "only list certificates for hosts matching given glob-pattern")
return command
}
// Print table of certificate info
func printCertTable(certs []appsv1.RepositoryCertificate, sortOrder string) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "HOSTNAME\tTYPE\tSUBTYPE\tINFO\n")
if sortOrder == "hostname" || sortOrder == "" {
sort.Slice(certs, func(i, j int) bool {
return certs[i].ServerName < certs[j].ServerName
})
} else if sortOrder == "type" {
sort.Slice(certs, func(i, j int) bool {
return certs[i].CertType < certs[j].CertType
})
}
for _, c := range certs {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.ServerName, c.CertType, c.CertSubType, c.CertInfo)
}
_ = w.Flush()
}

View File

@@ -9,20 +9,17 @@ import (
"strings"
"text/tabwriter"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
clusterpkg "github.com/argoproj/argo-cd/pkg/apiclient/cluster"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/clusterauth"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// NewClusterCommand returns a new instance of an `argocd cluster` command
@@ -40,19 +37,11 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
command.AddCommand(NewClusterGetCommand(clientOpts))
command.AddCommand(NewClusterListCommand(clientOpts))
command.AddCommand(NewClusterRemoveCommand(clientOpts))
command.AddCommand(NewClusterRotateAuthCommand(clientOpts))
return command
}
// NewClusterAddCommand returns a new instance of an `argocd cluster add` command
func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
var (
inCluster bool
upsert bool
awsRoleArn string
awsClusterName string
systemNamespace string
)
var command = &cobra.Command{
Use: "add",
Short: fmt.Sprintf("%s cluster add CONTEXT", cliName),
@@ -69,7 +58,6 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
if clstContext == nil {
log.Fatalf("Context %s does not exist in kubeconfig", args[0])
}
overrides := clientcmd.ConfigOverrides{
Context: *clstContext,
}
@@ -77,41 +65,18 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
managerBearerToken := ""
var awsAuthConf *argoappv1.AWSAuthConfig
if awsClusterName != "" {
awsAuthConf = &argoappv1.AWSAuthConfig{
ClusterName: awsClusterName,
RoleARN: awsRoleArn,
}
} else {
// Install RBAC resources for managing the cluster
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, systemNamespace)
errors.CheckError(err)
}
// Install RBAC resources for managing the cluster
managerBearerToken := common.InstallClusterManagerRBAC(conf)
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
clst := NewCluster(args[0], conf, managerBearerToken, awsAuthConf)
if inCluster {
clst.Server = common.KubernetesInternalAPIServerAddr
}
clstCreateReq := clusterpkg.ClusterCreateRequest{
Cluster: clst,
Upsert: upsert,
}
clst, err = clusterIf.Create(context.Background(), &clstCreateReq)
clst := NewCluster(args[0], conf, managerBearerToken)
clst, err = clusterIf.Create(context.Background(), clst)
errors.CheckError(err)
fmt.Printf("Cluster '%s' added\n", clst.Name)
},
}
command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file")
command.Flags().BoolVar(&inCluster, "in-cluster", false, "Indicates Argo CD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs")
command.Flags().StringVar(&awsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws-iam-authenticator will be used to access cluster")
command.Flags().StringVar(&awsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.")
command.Flags().StringVar(&systemNamespace, "system-namespace", common.DefaultSystemNamespace, "Use different system namespace")
return command
}
@@ -131,20 +96,9 @@ func printKubeContexts(ca clientcmd.ConfigAccess) {
}
sort.Strings(contextNames)
if config.Clusters == nil {
return
}
for _, name := range contextNames {
// ignore malformed kube config entries
context := config.Contexts[name]
if context == nil {
continue
}
cluster := config.Clusters[context.Cluster]
if cluster == nil {
continue
}
prefix := " "
if config.CurrentContext == name {
prefix = "*"
@@ -154,12 +108,24 @@ func printKubeContexts(ca clientcmd.ConfigAccess) {
}
}
func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAuthConf *argoappv1.AWSAuthConfig) *argoappv1.Cluster {
func NewCluster(name string, conf *rest.Config, managerBearerToken string) *argoappv1.Cluster {
tlsClientConfig := argoappv1.TLSClientConfig{
Insecure: conf.TLSClientConfig.Insecure,
ServerName: conf.TLSClientConfig.ServerName,
CertData: conf.TLSClientConfig.CertData,
KeyData: conf.TLSClientConfig.KeyData,
CAData: conf.TLSClientConfig.CAData,
}
if len(conf.TLSClientConfig.CertData) == 0 && conf.TLSClientConfig.CertFile != "" {
data, err := ioutil.ReadFile(conf.TLSClientConfig.CertFile)
errors.CheckError(err)
tlsClientConfig.CertData = data
}
if len(conf.TLSClientConfig.KeyData) == 0 && conf.TLSClientConfig.KeyFile != "" {
data, err := ioutil.ReadFile(conf.TLSClientConfig.KeyFile)
errors.CheckError(err)
tlsClientConfig.KeyData = data
}
if len(conf.TLSClientConfig.CAData) == 0 && conf.TLSClientConfig.CAFile != "" {
data, err := ioutil.ReadFile(conf.TLSClientConfig.CAFile)
errors.CheckError(err)
@@ -171,7 +137,6 @@ func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAu
Config: argoappv1.ClusterConfig{
BearerToken: managerBearerToken,
TLSClientConfig: tlsClientConfig,
AWSAuthConfig: awsAuthConf,
},
}
return &clst
@@ -180,7 +145,7 @@ func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAu
// NewClusterGetCommand returns a new instance of an `argocd cluster get` command
func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "get CLUSTER",
Use: "get",
Short: "Get cluster information",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
@@ -190,7 +155,7 @@ func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
for _, clusterName := range args {
clst, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Server: clusterName})
clst, err := clusterIf.Get(context.Background(), &cluster.ClusterQuery{Server: clusterName})
errors.CheckError(err)
yamlBytes, err := yaml.Marshal(clst)
errors.CheckError(err)
@@ -204,7 +169,7 @@ func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
// NewClusterRemoveCommand returns a new instance of an `argocd cluster list` command
func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rm CLUSTER",
Use: "rm",
Short: "Remove cluster credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
@@ -213,15 +178,10 @@ func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
}
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
// clientset, err := kubernetes.NewForConfig(conf)
// errors.CheckError(err)
for _, clusterName := range args {
// TODO(jessesuen): find the right context and remove manager RBAC artifacts
// err := clusterauth.UninstallClusterManagerRBAC(clientset)
// errors.CheckError(err)
_, err := clusterIf.Delete(context.Background(), &clusterpkg.ClusterQuery{Server: clusterName})
// common.UninstallClusterManagerRBAC(conf)
_, err := clusterIf.Delete(context.Background(), &cluster.ClusterQuery{Server: clusterName})
errors.CheckError(err)
}
},
@@ -229,65 +189,22 @@ func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
return command
}
// Print table of cluster information
func printClusterTable(clusters []argoappv1.Cluster) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "SERVER\tNAME\tVERSION\tSTATUS\tMESSAGE\n")
for _, c := range clusters {
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", c.Server, c.Name, c.ServerVersion, c.ConnectionState.Status, c.ConnectionState.Message)
}
_ = w.Flush()
}
// Print list of cluster servers
func printClusterServers(clusters []argoappv1.Cluster) {
for _, c := range clusters {
fmt.Println(c.Server)
}
}
// NewClusterListCommand returns a new instance of an `argocd cluster rm` command
func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured clusters",
Run: func(c *cobra.Command, args []string) {
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
clusters, err := clusterIf.List(context.Background(), &clusterpkg.ClusterQuery{})
clusters, err := clusterIf.List(context.Background(), &cluster.ClusterQuery{})
errors.CheckError(err)
if output == "server" {
printClusterServers(clusters.Items)
} else {
printClusterTable(clusters.Items)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "SERVER\tNAME\tMESSAGE\n")
for _, c := range clusters.Items {
fmt.Fprintf(w, "%s\t%s\t%s\n", c.Server, c.Name, c.Message)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: wide|server")
return command
}
// NewClusterRotateAuthCommand returns a new instance of an `argocd cluster rotate-auth` command
func NewClusterRotateAuthCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rotate-auth CLUSTER",
Short: fmt.Sprintf("%s cluster rotate-auth CLUSTER", cliName),
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
clusterQuery := clusterpkg.ClusterQuery{
Server: args[0],
}
_, err := clusterIf.RotateAuth(context.Background(), &clusterQuery)
errors.CheckError(err)
fmt.Printf("Cluster '%s' rotated auth\n", clusterQuery.Server)
_ = w.Flush()
},
}
return command

View File

@@ -1,31 +0,0 @@
package commands
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
)
func Test_printClusterTable(t *testing.T) {
printClusterTable([]v1alpha1.Cluster{
{
Server: "my-server",
Name: "my-name",
Config: v1alpha1.ClusterConfig{
Username: "my-username",
Password: "my-password",
BearerToken: "my-bearer-token",
TLSClientConfig: v1alpha1.TLSClientConfig{},
AWSAuthConfig: nil,
},
ConnectionState: v1alpha1.ConnectionState{
Status: "my-status",
Message: "my-message",
ModifiedAt: &metav1.Time{},
},
ServerVersion: "my-version",
},
})
}

View File

@@ -2,8 +2,4 @@ package commands
const (
cliName = "argocd"
// DefaultSSOLocalPort is the localhost port to listen on for the temporary web server performing
// the OAuth2 login flow.
DefaultSSOLocalPort = 8085
)

View File

@@ -1,233 +0,0 @@
package commands
import (
"fmt"
"io"
"log"
"os"
"github.com/spf13/cobra"
)
const (
bashCompletionFunc = `
__argocd_list_apps() {
local -a argocd_out
if argocd_out=($(argocd app list --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_app_history() {
local app=$1
local -a argocd_out
if argocd_out=($(argocd app history $app --output id 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_app_rollback() {
local -a command
for comp_word in "${COMP_WORDS[@]}"; do
if [[ $comp_word =~ ^-.*$ ]]; then
continue
fi
command+=($comp_word)
done
# fourth arg is app (if present): e.g.- argocd app rollback guestbook
local app=${command[3]}
local id=${command[4]}
if [[ -z $app || $app == $cur ]]; then
__argocd_list_apps
elif [[ -z $id || $id == $cur ]]; then
__argocd_list_app_history $app
fi
}
__argocd_list_servers() {
local -a argocd_out
if argocd_out=($(argocd cluster list --output server 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_repos() {
local -a argocd_out
if argocd_out=($(argocd repo list --output url 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_projects() {
local -a argocd_out
if argocd_out=($(argocd proj list --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_list_namespaces() {
local -a argocd_out
if argocd_out=($(kubectl get namespaces --no-headers 2>/dev/null | cut -f1 -d' ' 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_proj_server_namespace() {
local -a command
for comp_word in "${COMP_WORDS[@]}"; do
if [[ $comp_word =~ ^-.*$ ]]; then
continue
fi
command+=($comp_word)
done
# expect something like this: argocd proj add-destination PROJECT SERVER NAMESPACE
local project=${command[3]}
local server=${command[4]}
local namespace=${command[5]}
if [[ -z $project || $project == $cur ]]; then
__argocd_list_projects
elif [[ -z $server || $server == $cur ]]; then
__argocd_list_servers
elif [[ -z $namespace || $namespace == $cur ]]; then
__argocd_list_namespaces
fi
}
__argocd_list_project_role() {
local project="$1"
local -a argocd_out
if argocd_out=($(argocd proj role list "$project" --output=name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argocd_out[*]}" -- "$cur" ) )
fi
}
__argocd_proj_role(){
local -a command
for comp_word in "${COMP_WORDS[@]}"; do
if [[ $comp_word =~ ^-.*$ ]]; then
continue
fi
command+=($comp_word)
done
# expect something like this: argocd proj role add-policy PROJECT ROLE-NAME
local project=${command[4]}
local role=${command[5]}
if [[ -z $project || $project == $cur ]]; then
__argocd_list_projects
elif [[ -z $role || $role == $cur ]]; then
__argocd_list_project_role $project
fi
}
__argocd_custom_func() {
case ${last_command} in
argocd_app_delete | \
argocd_app_diff | \
argocd_app_edit | \
argocd_app_get | \
argocd_app_history | \
argocd_app_manifests | \
argocd_app_patch-resource | \
argocd_app_set | \
argocd_app_sync | \
argocd_app_terminate-op | \
argocd_app_unset | \
argocd_app_wait | \
argocd_app_create)
__argocd_list_apps
return
;;
argocd_app_rollback)
__argocd_app_rollback
return
;;
argocd_cluster_get | \
argocd_cluster_rm | \
argocd_login | \
argocd_cluster_add)
__argocd_list_servers
return
;;
argocd_repo_rm | \
argocd_repo_add)
__argocd_list_repos
return
;;
argocd_proj_add-destination | \
argocd_proj_remove-destination)
__argocd_proj_server_namespace
return
;;
argocd_proj_add-source | \
argocd_proj_remove-source | \
argocd_proj_allow-cluster-resource | \
argocd_proj_allow-namespace-resource | \
argocd_proj_deny-cluster-resource | \
argocd_proj_deny-namespace-resource | \
argocd_proj_delete | \
argocd_proj_edit | \
argocd_proj_get | \
argocd_proj_set | \
argocd_proj_role_list)
__argocd_list_projects
return
;;
argocd_proj_role_remove-policy | \
argocd_proj_role_add-policy | \
argocd_proj_role_create | \
argocd_proj_role_delete | \
argocd_proj_role_get | \
argocd_proj_role_create-token | \
argocd_proj_role_delete-token)
__argocd_proj_role
return
;;
*)
;;
esac
}
`
)
func NewCompletionCommand() *cobra.Command {
var command = &cobra.Command{
Use: "completion SHELL",
Short: "output shell completion code for the specified shell (bash or zsh)",
Long: `Write bash or zsh shell completion code to standard output.
For bash, ensure you have bash completions installed and enabled.
To access completions in your current shell, run
$ source <(argocd completion bash)
Alternatively, write it to a file and source in .bash_profile
For zsh, output to a file in a directory referenced by the $fpath shell
variable.
`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.HelpFunc()(cmd, args)
os.Exit(1)
}
shell := args[0]
rootCommand := NewCommand()
rootCommand.BashCompletionFunction = bashCompletionFunc
availableCompletions := map[string]func(io.Writer) error{
"bash": rootCommand.GenBashCompletion,
"zsh": rootCommand.GenZshCompletion,
}
completion, ok := availableCompletions[shell]
if !ok {
fmt.Printf("Invalid shell '%s'. The supported shells are bash and zsh.\n", shell)
os.Exit(1)
}
if err := completion(os.Stdout); err != nil {
log.Fatal(err)
}
},
}
return command
}

View File

@@ -8,48 +8,25 @@ import (
"strings"
"text/tabwriter"
"github.com/spf13/pflag"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/localconfig"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewContextCommand returns a new instance of an `argocd ctx` command
func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var delete bool
var command = &cobra.Command{
Use: "context",
Aliases: []string{"ctx"},
Short: "Switch between contexts",
Run: func(c *cobra.Command, args []string) {
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
deletePresentContext := false
c.Flags().Visit(func(f *pflag.Flag) {
if f.Name == "delete" {
deletePresentContext = true
}
})
if len(args) == 0 {
if deletePresentContext {
err := deleteContext(localCfg.CurrentContext, clientOpts.ConfigPath)
errors.CheckError(err)
return
} else {
printArgoCDContexts(clientOpts.ConfigPath)
return
}
printArgoCDContexts(clientOpts.ConfigPath)
return
}
ctxName := args[0]
argoCDDir, err := localconfig.DefaultConfigDir()
errors.CheckError(err)
prevCtxFile := path.Join(argoCDDir, ".prev-ctx")
@@ -59,6 +36,8 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
errors.CheckError(err)
ctxName = string(prevCtxBytes)
}
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
if localCfg.CurrentContext == ctxName {
fmt.Printf("Already at context '%s'\n", localCfg.CurrentContext)
return
@@ -68,7 +47,6 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
prevCtx := localCfg.CurrentContext
localCfg.CurrentContext = ctxName
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
errors.CheckError(err)
err = ioutil.WriteFile(prevCtxFile, []byte(prevCtx), 0644)
@@ -76,43 +54,9 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
fmt.Printf("Switched to context '%s'\n", localCfg.CurrentContext)
},
}
command.Flags().BoolVar(&delete, "delete", false, "Delete the context instead of switching to it")
return command
}
func deleteContext(context, configPath string) error {
localCfg, err := localconfig.ReadLocalConfig(configPath)
errors.CheckError(err)
if localCfg == nil {
return fmt.Errorf("Nothing to logout from")
}
serverName, ok := localCfg.RemoveContext(context)
if !ok {
return fmt.Errorf("Context %s does not exist", context)
}
_ = localCfg.RemoveUser(context)
_ = localCfg.RemoveServer(serverName)
if localCfg.IsEmpty() {
err = localconfig.DeleteLocalConfig(configPath)
errors.CheckError(err)
} else {
if localCfg.CurrentContext == context {
localCfg.CurrentContext = localCfg.Contexts[0].Name
}
err = localconfig.ValidateLocalConfig(*localCfg)
if err != nil {
return fmt.Errorf("Error in logging out")
}
err = localconfig.WriteLocalConfig(*localCfg, configPath)
errors.CheckError(err)
}
fmt.Printf("Context '%s' deleted\n", context)
return nil
}
func printArgoCDContexts(configPath string) {
localCfg, err := localconfig.ReadLocalConfig(configPath)
errors.CheckError(err)

View File

@@ -1,60 +0,0 @@
package commands
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/argoproj/argo-cd/util/localconfig"
)
const testConfig = `contexts:
- name: argocd.example.com:443
server: argocd.example.com:443
user: argocd.example.com:443
- name: localhost:8080
server: localhost:8080
user: localhost:8080
current-context: localhost:8080
servers:
- server: argocd.example.com:443
- plain-text: true
server: localhost:8080
users:
- auth-token: vErrYS3c3tReFRe$hToken
name: argocd.example.com:443
refresh-token: vErrYS3c3tReFRe$hToken
- auth-token: vErrYS3c3tReFRe$hToken
name: localhost:8080`
const testConfigFilePath = "./testdata/config"
func TestContextDelete(t *testing.T) {
// Write the test config file
err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
err = deleteContext("localhost:8080", testConfigFilePath)
assert.NoError(t, err)
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "argocd.example.com:443")
assert.NotContains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
assert.NotContains(t, localConfig.Servers, localconfig.Server{PlainText: true, Server: "localhost:8080"})
assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "localhost:8080"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd.example.com:443", Server: "argocd.example.com:443", User: "argocd.example.com:443"})
// Write the file again so that no conflicts are made in git
err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
}

View File

@@ -0,0 +1,75 @@
package commands
import (
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/install"
"github.com/argoproj/argo-cd/util/cli"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
)
// NewInstallCommand returns a new instance of `argocd install` command
func NewInstallCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
installOpts install.InstallOptions
)
var command = &cobra.Command{
Use: "install",
Short: "Install Argo CD",
Long: "Install Argo CD",
Run: func(c *cobra.Command, args []string) {
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if wasSpecified {
installOpts.Namespace = namespace
}
installer, err := install.NewInstaller(conf, installOpts)
errors.CheckError(err)
installer.Install()
},
}
command.Flags().BoolVar(&installOpts.Upgrade, "upgrade", false, "upgrade controller/ui deployments and configmap if already installed")
command.Flags().BoolVar(&installOpts.DryRun, "dry-run", false, "print the kubernetes manifests to stdout instead of installing")
command.Flags().StringVar(&installOpts.SuperuserPassword, "superuser-password", "", "password for super user")
command.Flags().StringVar(&installOpts.ControllerImage, "controller-image", install.DefaultControllerImage, "use a specified controller image")
command.Flags().StringVar(&installOpts.ServerImage, "server-image", install.DefaultServerImage, "use a specified api server image")
command.Flags().StringVar(&installOpts.UIImage, "ui-image", install.DefaultUIImage, "use a specified ui image")
command.Flags().StringVar(&installOpts.RepoServerImage, "repo-server-image", install.DefaultRepoServerImage, "use a specified repo server image")
command.Flags().StringVar(&installOpts.ImagePullPolicy, "image-pull-policy", "", "set the image pull policy of the pod specs")
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.AddCommand(newSettingsCommand())
return command
}
// newSettingsCommand returns a new instance of `argocd install settings` command
func newSettingsCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
installOpts install.InstallOptions
)
var command = &cobra.Command{
Use: "settings",
Short: "Creates or updates ArgoCD settings",
Long: "Creates or updates ArgoCD settings",
Run: func(c *cobra.Command, args []string) {
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if wasSpecified {
installOpts.Namespace = namespace
}
installer, err := install.NewInstaller(conf, installOpts)
errors.CheckError(err)
installer.InstallSettings()
},
}
command.Flags().BoolVar(&installOpts.UpdateSuperuser, "update-superuser", false, "force updating the superuser password")
command.Flags().StringVar(&installOpts.SuperuserPassword, "superuser-password", "", "password for super user")
command.Flags().BoolVar(&installOpts.UpdateSignature, "update-signature", false, "force updating the server-side token signing signature")
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}

View File

@@ -2,29 +2,28 @@ package commands
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/coreos/go-oidc"
"github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
sessionpkg "github.com/argoproj/argo-cd/pkg/apiclient/session"
settingspkg "github.com/argoproj/argo-cd/pkg/apiclient/settings"
"github.com/argoproj/argo-cd/server/session"
"github.com/argoproj/argo-cd/server/settings"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/localconfig"
oidcutil "github.com/argoproj/argo-cd/util/oidc"
"github.com/argoproj/argo-cd/util/rand"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
)
// NewLoginCommand returns a new instance of `argocd login` command
@@ -34,7 +33,6 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
username string
password string
sso bool
ssoPort int
)
var command = &cobra.Command{
Use: "login SERVER",
@@ -68,7 +66,6 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
ServerAddr: server,
Insecure: globalClientOpts.Insecure,
PlainText: globalClientOpts.PlainText,
GRPCWeb: globalClientOpts.GRPCWeb,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
setConn, setIf := acdClient.NewSettingsClientOrDie()
@@ -80,19 +77,23 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
// Perform the login
var tokenString string
var refreshToken string
if !sso {
tokenString = passwordLogin(acdClient, username, password)
} else {
ctx := context.Background()
httpClient, err := acdClient.HTTPClient()
acdSet, err := setIf.Get(context.Background(), &settings.SettingsQuery{})
errors.CheckError(err)
ctx = oidc.ClientContext(ctx, httpClient)
acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{})
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider)
if !ssoConfigured(acdSet) {
log.Fatalf("ArgoCD instance is not configured with SSO")
}
tokenString = oauth2Login(server, clientOpts.PlainText)
// The token which we just received from the OAuth2 flow, was from dex. ArgoCD
// currently does not back dex with any kind of persistent storage (it is run
// in-memory). As a result, this token cannot be used in any permanent capacity.
// Restarts of dex will result in a different signing key, and sessions becoming
// invalid. Instead we turn-around and ask ArgoCD to re-sign the token (who *does*
// have persistence of signing keys), and is what we store in the config. Should we
// ever decide to have a database layer for dex, the next line can be removed.
tokenString = tokenLogin(acdClient, tokenString)
}
parser := &jwt.Parser{
@@ -113,12 +114,10 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
Server: server,
PlainText: globalClientOpts.PlainText,
Insecure: globalClientOpts.Insecure,
GRPCWeb: globalClientOpts.GRPCWeb,
})
localCfg.UpsertUser(localconfig.User{
Name: ctxName,
AuthToken: tokenString,
RefreshToken: refreshToken,
Name: ctxName,
AuthToken: tokenString,
})
if ctxName == "" {
ctxName = server
@@ -137,8 +136,7 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
command.Flags().StringVar(&ctxName, "name", "", "name to use for the context")
command.Flags().StringVar(&username, "username", "", "the username of an account to authenticate")
command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate")
command.Flags().BoolVar(&sso, "sso", false, "perform SSO login")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application")
command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login")
return command
}
@@ -152,112 +150,94 @@ func userDisplayName(claims jwt.MapClaims) string {
return claims["sub"].(string)
}
// oauth2Login opens a browser, runs a temporary HTTP server to delegate OAuth2 login flow and
// returns the JWT token and a refresh token (if supported)
func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCConfig, oauth2conf *oauth2.Config, provider *oidc.Provider) (string, string) {
oauth2conf.RedirectURL = fmt.Sprintf("http://localhost:%d/auth/callback", port)
oidcConf, err := oidcutil.ParseConfig(provider)
errors.CheckError(err)
log.Debug("OIDC Configuration:")
log.Debugf(" supported_scopes: %v", oidcConf.ScopesSupported)
log.Debugf(" response_types_supported: %v", oidcConf.ResponseTypesSupported)
func ssoConfigured(set *settings.Settings) bool {
return set.DexConfig != nil && len(set.DexConfig.Connectors) > 0
}
// handledRequests ensures we do not handle more requests than necessary
handledRequests := 0
// completionChan is to signal flow completed. Non-empty string indicates error
completionChan := make(chan string)
// stateNonce is an OAuth2 state nonce
stateNonce := rand.RandString(10)
var tokenString string
var refreshToken string
handleErr := func(w http.ResponseWriter, errMsg string) {
http.Error(w, errMsg, http.StatusBadRequest)
completionChan <- errMsg
// getFreePort asks the kernel for a free open port that is ready to use.
func getFreePort() (int, error) {
ln, err := net.Listen("tcp", "[::]:0")
if err != nil {
return 0, err
}
return ln.Addr().(*net.TCPAddr).Port, ln.Close()
}
// oauth2Login opens a browser, runs a temporary HTTP server to delegate OAuth2 login flow and returns the JWT token
func oauth2Login(host string, plaintext bool) string {
ctx := context.Background()
port, err := getFreePort()
errors.CheckError(err)
var scheme = "https"
if plaintext {
scheme = "http"
}
conf := &oauth2.Config{
ClientID: common.ArgoCDCLIClientAppID,
Scopes: []string{"openid", "profile", "email", "groups", "offline_access"},
Endpoint: oauth2.Endpoint{
AuthURL: fmt.Sprintf("%s://%s%s/auth", scheme, host, common.DexAPIEndpoint),
TokenURL: fmt.Sprintf("%s://%s%s/token", scheme, host, common.DexAPIEndpoint),
},
RedirectURL: fmt.Sprintf("http://localhost:%d/auth/callback", port),
}
srv := &http.Server{Addr: ":" + strconv.Itoa(port)}
var tokenString string
loginCompleted := make(chan struct{})
// Authorization redirect callback from OAuth2 auth flow.
// Handles both implicit and authorization code flow
callbackHandler := func(w http.ResponseWriter, r *http.Request) {
log.Debugf("Callback: %s", r.URL)
defer func() {
loginCompleted <- struct{}{}
}()
if formErr := r.FormValue("error"); formErr != "" {
handleErr(w, formErr+": "+r.FormValue("error_description"))
// Authorization redirect callback from OAuth2 auth flow.
if errMsg := r.FormValue("error"); errMsg != "" {
http.Error(w, errMsg+": "+r.FormValue("error_description"), http.StatusBadRequest)
log.Fatal(errMsg)
return
}
code := r.FormValue("code")
if code == "" {
errMsg := fmt.Sprintf("no code in request: %q", r.Form)
http.Error(w, errMsg, http.StatusBadRequest)
log.Fatal(errMsg)
return
}
tok, err := conf.Exchange(ctx, code)
errors.CheckError(err)
log.Info("Authentication successful")
var ok bool
tokenString, ok = tok.Extra("id_token").(string)
if !ok {
errMsg := "no id_token in token response"
http.Error(w, errMsg, http.StatusInternalServerError)
log.Fatal(errMsg)
return
}
handledRequests++
if handledRequests > 2 {
// Since implicit flow will redirect back to ourselves, this counter ensures we do not
// fallinto a redirect loop (e.g. user visits the page by hand)
handleErr(w, "Unable to complete login flow: too many redirects")
return
}
if len(r.Form) == 0 {
// If we get here, no form data was set. We presume to be performing an implicit login
// flow where the id_token is contained in a URL fragment, making it inaccessible to be
// read from the request. This javascript will redirect the browser to send the
// fragments as query parameters so our callback handler can read and return token.
fmt.Fprintf(w, `<script>window.location.search = window.location.hash.substring(1)</script>`)
return
}
if state := r.FormValue("state"); state != stateNonce {
handleErr(w, "Unknown state nonce")
return
}
tokenString = r.FormValue("id_token")
if tokenString == "" {
code := r.FormValue("code")
if code == "" {
handleErr(w, fmt.Sprintf("no code in request: %q", r.Form))
return
}
tok, err := oauth2conf.Exchange(ctx, code)
if err != nil {
handleErr(w, err.Error())
return
}
var ok bool
tokenString, ok = tok.Extra("id_token").(string)
if !ok {
handleErr(w, "no id_token in token response")
return
}
refreshToken, _ = tok.Extra("refresh_token").(string)
}
log.Debugf("Token: %s", tokenString)
successPage := `
<div style="height:100px; width:100%!; display:flex; flex-direction: column; justify-content: center; align-items:center; background-color:#2ecc71; color:white; font-size:22"><div>Authentication successful!</div></div>
<p style="margin-top:20px; font-size:18; text-align:center">Authentication was successful, you can now return to CLI. This page will close automatically</p>
<script>window.onload=function(){setTimeout(this.close, 4000)}</script>
`
fmt.Fprint(w, successPage)
completionChan <- ""
fmt.Fprintf(w, successPage)
}
srv := &http.Server{Addr: "localhost:" + strconv.Itoa(port)}
http.HandleFunc("/auth/callback", callbackHandler)
// add transport for self-signed certificate to context
sslcli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
ctx = context.WithValue(ctx, oauth2.HTTPClient, sslcli)
// Redirect user to login & consent page to ask for permission for the scopes specified above.
fmt.Printf("Opening browser for authentication\n")
var url string
grantType := oidcutil.InferGrantType(oidcConf)
opts := []oauth2.AuthCodeOption{oauth2.AccessTypeOffline}
if claimsRequested := oidcSettings.GetIDTokenClaims(); claimsRequested != nil {
opts = oidcutil.AppendClaimsAuthenticationRequestParameter(opts, claimsRequested)
}
switch grantType {
case oidcutil.GrantTypeAuthorizationCode:
url = oauth2conf.AuthCodeURL(stateNonce, opts...)
case oidcutil.GrantTypeImplicit:
url = oidcutil.ImplicitFlowURL(oauth2conf, stateNonce, opts...)
default:
log.Fatalf("Unsupported grant type: %v", grantType)
}
fmt.Printf("Performing %s flow login: %s\n", grantType, url)
log.Info("Opening browser for authentication")
url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
log.Infof("Authentication URL: %s", url)
time.Sleep(1 * time.Second)
err = open.Run(url)
errors.CheckError(err)
@@ -266,24 +246,16 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo
log.Fatalf("listen: %s\n", err)
}
}()
errMsg := <-completionChan
if errMsg != "" {
log.Fatal(errMsg)
}
fmt.Printf("Authentication successful\n")
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
<-loginCompleted
_ = srv.Shutdown(ctx)
log.Debugf("Token: %s", tokenString)
log.Debugf("Refresh Token: %s", refreshToken)
return tokenString, refreshToken
return tokenString
}
func passwordLogin(acdClient argocdclient.Client, username, password string) string {
username, password = cli.PromptCredentials(username, password)
sessConn, sessionIf := acdClient.NewSessionClientOrDie()
defer util.Close(sessConn)
sessionRequest := sessionpkg.SessionCreateRequest{
sessionRequest := session.SessionCreateRequest{
Username: username,
Password: password,
}
@@ -291,3 +263,14 @@ func passwordLogin(acdClient argocdclient.Client, username, password string) str
errors.CheckError(err)
return createdSession.Token
}
func tokenLogin(acdClient argocdclient.Client, token string) string {
sessConn, sessionIf := acdClient.NewSessionClientOrDie()
defer util.Close(sessConn)
sessionRequest := session.SessionCreateRequest{
Token: token,
}
createdSession, err := sessionIf.Create(context.Background(), &sessionRequest)
errors.CheckError(err)
return createdSession.Token
}

View File

@@ -1,50 +0,0 @@
package commands
import (
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/localconfig"
)
// NewLogoutCommand returns a new instance of `argocd logout` command
func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "logout CONTEXT",
Short: "Log out from Argo CD",
Long: "Log out from Argo CD",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
context := args[0]
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
errors.CheckError(err)
if localCfg == nil {
log.Fatalf("Nothing to logout from")
}
ok := localCfg.RemoveToken(context)
if !ok {
log.Fatalf("Context %s does not exist", context)
}
err = localconfig.ValidateLocalConfig(*localCfg)
if err != nil {
log.Fatalf("Error in logging out: %s", err)
}
err = localconfig.WriteLocalConfig(*localCfg, globalClientOpts.ConfigPath)
errors.CheckError(err)
fmt.Printf("Logged out from '%s'\n", context)
},
}
return command
}

View File

@@ -1,39 +0,0 @@
package commands
import (
"io/ioutil"
"os"
"testing"
"github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/stretchr/testify/assert"
"github.com/argoproj/argo-cd/util/localconfig"
)
func TestLogout(t *testing.T) {
// Write the test config file
err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
command := NewLogoutCommand(&apiclient.ClientOptions{ConfigPath: testConfigFilePath})
command.Run(nil, []string{"localhost:8080"})
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
assert.NoError(t, err)
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "localhost:8080"})
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd.example.com:443", Server: "argocd.example.com:443", User: "argocd.example.com:443"})
// Write the file again so that no conflicts are made in git
err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
assert.NoError(t, err)
}

View File

@@ -1,690 +0,0 @@
package commands
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"net/url"
"os"
"strings"
"text/tabwriter"
"time"
"github.com/dustin/go-humanize"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
projectpkg "github.com/argoproj/argo-cd/pkg/apiclient/project"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/config"
"github.com/argoproj/argo-cd/util/git"
)
type projectOpts struct {
description string
destinations []string
sources []string
orphanedResourcesEnabled bool
orphanedResourcesWarn bool
}
type policyOpts struct {
action string
permission string
object string
}
func (opts *projectOpts) GetDestinations() []v1alpha1.ApplicationDestination {
destinations := make([]v1alpha1.ApplicationDestination, 0)
for _, destStr := range opts.destinations {
parts := strings.Split(destStr, ",")
if len(parts) != 2 {
log.Fatalf("Expected destination of the form: server,namespace. Received: %s", destStr)
} else {
destinations = append(destinations, v1alpha1.ApplicationDestination{
Server: parts[0],
Namespace: parts[1],
})
}
}
return destinations
}
// NewProjectCommand returns a new instance of an `argocd proj` command
func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "proj",
Short: "Manage projects",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
command.AddCommand(NewProjectRoleCommand(clientOpts))
command.AddCommand(NewProjectCreateCommand(clientOpts))
command.AddCommand(NewProjectGetCommand(clientOpts))
command.AddCommand(NewProjectDeleteCommand(clientOpts))
command.AddCommand(NewProjectListCommand(clientOpts))
command.AddCommand(NewProjectSetCommand(clientOpts))
command.AddCommand(NewProjectEditCommand(clientOpts))
command.AddCommand(NewProjectAddDestinationCommand(clientOpts))
command.AddCommand(NewProjectRemoveDestinationCommand(clientOpts))
command.AddCommand(NewProjectAddSourceCommand(clientOpts))
command.AddCommand(NewProjectRemoveSourceCommand(clientOpts))
command.AddCommand(NewProjectAllowClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectAllowNamespaceResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyNamespaceResourceCommand(clientOpts))
command.AddCommand(NewProjectWindowsCommand(clientOpts))
return command
}
func addProjFlags(command *cobra.Command, opts *projectOpts) {
command.Flags().StringVarP(&opts.description, "description", "", "", "Project description")
command.Flags().StringArrayVarP(&opts.destinations, "dest", "d", []string{},
"Permitted destination server and namespace (e.g. https://192.168.99.100:8443,default)")
command.Flags().StringArrayVarP(&opts.sources, "src", "s", []string{}, "Permitted source repository URL")
command.Flags().BoolVar(&opts.orphanedResourcesEnabled, "orphaned-resources", false, "Enables orphaned resources monitoring")
command.Flags().BoolVar(&opts.orphanedResourcesWarn, "orphaned-resources-warn", false, "Specifies if applications should be a warning condition when orphaned resources detected")
}
func getOrphanedResourcesSettings(c *cobra.Command, opts projectOpts) *v1alpha1.OrphanedResourcesMonitorSettings {
warnChanged := c.Flag("orphaned-resources-warn").Changed
if opts.orphanedResourcesEnabled || warnChanged {
settings := v1alpha1.OrphanedResourcesMonitorSettings{}
if warnChanged {
settings.Warn = pointer.BoolPtr(opts.orphanedResourcesWarn)
}
return &settings
}
return nil
}
func addPolicyFlags(command *cobra.Command, opts *policyOpts) {
command.Flags().StringVarP(&opts.action, "action", "a", "", "Action to grant/deny permission on (e.g. get, create, list, update, delete)")
command.Flags().StringVarP(&opts.permission, "permission", "p", "allow", "Whether to allow or deny access to object with the action. This can only be 'allow' or 'deny'")
command.Flags().StringVarP(&opts.object, "object", "o", "", "Object within the project to grant/deny access. Use '*' for a wildcard. Will want access to '<project>/<object>'")
}
func humanizeTimestamp(epoch int64) string {
ts := time.Unix(epoch, 0)
return fmt.Sprintf("%s (%s)", ts.Format(time.RFC3339), humanize.Time(ts))
}
// NewProjectCreateCommand returns a new instance of an `argocd proj create` command
func NewProjectCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts projectOpts
fileURL string
upsert bool
)
var command = &cobra.Command{
Use: "create PROJECT",
Short: "Create a project",
Run: func(c *cobra.Command, args []string) {
var proj v1alpha1.AppProject
if fileURL == "-" {
// read stdin
reader := bufio.NewReader(os.Stdin)
err := config.UnmarshalReader(reader, &proj)
if err != nil {
log.Fatalf("unable to read manifest from stdin: %v", err)
}
} else if fileURL != "" {
// read uri
parsedURL, err := url.ParseRequestURI(fileURL)
if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") {
err = config.UnmarshalLocalFile(fileURL, &proj)
} else {
err = config.UnmarshalRemoteFile(fileURL, &proj)
}
errors.CheckError(err)
if len(args) == 1 && args[0] != proj.Name {
log.Fatalf("project name '%s' does not match project spec metadata.name '%s'", args[0], proj.Name)
}
} else {
// read arguments
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
proj = v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{Name: projName},
Spec: v1alpha1.AppProjectSpec{
Description: opts.description,
Destinations: opts.GetDestinations(),
SourceRepos: opts.sources,
OrphanedResources: getOrphanedResourcesSettings(c, opts),
},
}
}
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
_, err := projIf.Create(context.Background(), &projectpkg.ProjectCreateRequest{Project: &proj, Upsert: upsert})
errors.CheckError(err)
},
}
command.Flags().BoolVar(&upsert, "upsert", false, "Allows to override a project with the same name even if supplied project spec is different from existing spec")
command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the project")
err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"})
if err != nil {
log.Fatal(err)
}
addProjFlags(command, &opts)
return command
}
// NewProjectSetCommand returns a new instance of an `argocd proj set` command
func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts projectOpts
)
var command = &cobra.Command{
Use: "set PROJECT",
Short: "Set project parameters",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
visited := 0
c.Flags().Visit(func(f *pflag.Flag) {
visited++
switch f.Name {
case "description":
proj.Spec.Description = opts.description
case "dest":
proj.Spec.Destinations = opts.GetDestinations()
case "src":
proj.Spec.SourceRepos = opts.sources
case "orphaned-resources", "orphaned-resources-warn":
proj.Spec.OrphanedResources = getOrphanedResourcesSettings(c, opts)
}
})
if visited == 0 {
log.Error("Please set at least one option to update")
c.HelpFunc()(c, args)
os.Exit(1)
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
addProjFlags(command, &opts)
return command
}
// NewProjectAddDestinationCommand returns a new instance of an `argocd proj add-destination` command
func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add-destination PROJECT SERVER NAMESPACE",
Short: "Add project destination",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
server := args[1]
namespace := args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for _, dest := range proj.Spec.Destinations {
if dest.Namespace == namespace && dest.Server == server {
log.Fatal("Specified destination is already defined in project")
}
}
proj.Spec.Destinations = append(proj.Spec.Destinations, v1alpha1.ApplicationDestination{Server: server, Namespace: namespace})
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectRemoveDestinationCommand returns a new instance of an `argocd proj remove-destination` command
func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "remove-destination PROJECT SERVER NAMESPACE",
Short: "Remove project destination",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
server := args[1]
namespace := args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
index := -1
for i, dest := range proj.Spec.Destinations {
if dest.Namespace == namespace && dest.Server == server {
index = i
break
}
}
if index == -1 {
log.Fatal("Specified destination does not exist in project")
} else {
proj.Spec.Destinations = append(proj.Spec.Destinations[:index], proj.Spec.Destinations[index+1:]...)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
return command
}
// NewProjectAddSourceCommand returns a new instance of an `argocd proj add-src` command
func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add-source PROJECT URL",
Short: "Add project source repository",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
url := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for _, item := range proj.Spec.SourceRepos {
if item == "*" && item == url {
fmt.Printf("Source repository '*' already allowed in project\n")
return
}
if git.SameURL(item, url) {
fmt.Printf("Source repository '%s' already allowed in project\n", item)
return
}
}
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos, url)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
func modifyProjectResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, action func(proj *v1alpha1.AppProject, group string, kind string) bool) *cobra.Command {
return &cobra.Command{
Use: cmdUse,
Short: cmdDesc,
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, group, kind := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
if action(proj, group, kind) {
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
}
// NewProjectAllowNamespaceResourceCommand returns a new instance of an `deny-cluster-resources` command
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-namespace-resource PROJECT GROUP KIND"
desc := "Removes a namespaced API resource from the blacklist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.NamespaceResourceBlacklist {
if item.Group == group && item.Kind == kind {
index = i
break
}
}
if index == -1 {
fmt.Printf("Group '%s' and kind '%s' not in blacklisted namespaced resources\n", group, kind)
return false
}
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist[:index], proj.Spec.NamespaceResourceBlacklist[index+1:]...)
return true
})
}
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-namespace-resource PROJECT GROUP KIND"
desc := "Adds a namespaced API resource to the blacklist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.NamespaceResourceBlacklist {
if item.Group == group && item.Kind == kind {
fmt.Printf("Group '%s' and kind '%s' already present in blacklisted namespaced resources\n", group, kind)
return false
}
}
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist, v1.GroupKind{Group: group, Kind: kind})
return true
})
}
// NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-cluster-resource PROJECT GROUP KIND"
desc := "Removes a cluster-scoped API resource from the whitelist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.ClusterResourceWhitelist {
if item.Group == group && item.Kind == kind {
index = i
break
}
}
if index == -1 {
fmt.Printf("Group '%s' and kind '%s' not in whitelisted cluster resources\n", group, kind)
return false
}
proj.Spec.ClusterResourceWhitelist = append(proj.Spec.ClusterResourceWhitelist[:index], proj.Spec.ClusterResourceWhitelist[index+1:]...)
return true
})
}
// NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-cluster-resource PROJECT GROUP KIND"
desc := "Adds a cluster-scoped API resource to the whitelist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.ClusterResourceWhitelist {
if item.Group == group && item.Kind == kind {
fmt.Printf("Group '%s' and kind '%s' already present in whitelisted cluster resources\n", group, kind)
return false
}
}
proj.Spec.ClusterResourceWhitelist = append(proj.Spec.ClusterResourceWhitelist, v1.GroupKind{Group: group, Kind: kind})
return true
})
}
// NewProjectRemoveSourceCommand returns a new instance of an `argocd proj remove-src` command
func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "remove-source PROJECT URL",
Short: "Remove project source repository",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
url := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
index := -1
for i, item := range proj.Spec.SourceRepos {
if item == url {
index = i
break
}
}
if index == -1 {
fmt.Printf("Source repository '%s' does not exist in project\n", url)
} else {
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos[:index], proj.Spec.SourceRepos[index+1:]...)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
return command
}
// NewProjectDeleteCommand returns a new instance of an `argocd proj delete` command
func NewProjectDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT",
Short: "Delete project",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
for _, name := range args {
_, err := projIf.Delete(context.Background(), &projectpkg.ProjectQuery{Name: name})
errors.CheckError(err)
}
},
}
return command
}
// Print list of project names
func printProjectNames(projects []v1alpha1.AppProject) {
for _, p := range projects {
fmt.Println(p.Name)
}
}
// Print table of project info
func printProjectTable(projects []v1alpha1.AppProject) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\tORPHANED-RESOURCES\n")
for _, p := range projects {
printProjectLine(w, &p)
}
_ = w.Flush()
}
// NewProjectListCommand returns a new instance of an `argocd proj list` command
func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List projects",
Run: func(c *cobra.Command, args []string) {
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
projects, err := projIf.List(context.Background(), &projectpkg.ProjectQuery{})
errors.CheckError(err)
if output == "name" {
printProjectNames(projects.Items)
} else {
printProjectTable(projects.Items)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: wide|name")
return command
}
func formatOrphanedResources(p *v1alpha1.AppProject) string {
if p.Spec.OrphanedResources == nil {
return "disabled"
}
return fmt.Sprintf("enabled (warn=%v)", p.Spec.OrphanedResources.IsWarn())
}
func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
var destinations, sourceRepos, clusterWhitelist, namespaceBlacklist string
switch len(p.Spec.Destinations) {
case 0:
destinations = "<none>"
case 1:
destinations = fmt.Sprintf("%s,%s", p.Spec.Destinations[0].Server, p.Spec.Destinations[0].Namespace)
default:
destinations = fmt.Sprintf("%d destinations", len(p.Spec.Destinations))
}
switch len(p.Spec.SourceRepos) {
case 0:
sourceRepos = "<none>"
case 1:
sourceRepos = p.Spec.SourceRepos[0]
default:
sourceRepos = fmt.Sprintf("%d repos", len(p.Spec.SourceRepos))
}
switch len(p.Spec.ClusterResourceWhitelist) {
case 0:
clusterWhitelist = "<none>"
case 1:
clusterWhitelist = fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[0].Group, p.Spec.ClusterResourceWhitelist[0].Kind)
default:
clusterWhitelist = fmt.Sprintf("%d resources", len(p.Spec.ClusterResourceWhitelist))
}
switch len(p.Spec.NamespaceResourceBlacklist) {
case 0:
namespaceBlacklist = "<none>"
default:
namespaceBlacklist = fmt.Sprintf("%d resources", len(p.Spec.NamespaceResourceBlacklist))
}
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, formatOrphanedResources(p))
}
// NewProjectGetCommand returns a new instance of an `argocd proj get` command
func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
const printProjFmtStr = "%-34s%s\n"
var command = &cobra.Command{
Use: "get PROJECT",
Short: "Get project details",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
p, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
fmt.Printf(printProjFmtStr, "Name:", p.Name)
fmt.Printf(printProjFmtStr, "Description:", p.Spec.Description)
// Print destinations
dest0 := "<none>"
if len(p.Spec.Destinations) > 0 {
dest0 = fmt.Sprintf("%s,%s", p.Spec.Destinations[0].Server, p.Spec.Destinations[0].Namespace)
}
fmt.Printf(printProjFmtStr, "Destinations:", dest0)
for i := 1; i < len(p.Spec.Destinations); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s,%s", p.Spec.Destinations[i].Server, p.Spec.Destinations[i].Namespace))
}
// Print sources
src0 := "<none>"
if len(p.Spec.SourceRepos) > 0 {
src0 = p.Spec.SourceRepos[0]
}
fmt.Printf(printProjFmtStr, "Repositories:", src0)
for i := 1; i < len(p.Spec.SourceRepos); i++ {
fmt.Printf(printProjFmtStr, "", p.Spec.SourceRepos[i])
}
// Print whitelisted cluster resources
cwl0 := "<none>"
if len(p.Spec.ClusterResourceWhitelist) > 0 {
cwl0 = fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[0].Group, p.Spec.ClusterResourceWhitelist[0].Kind)
}
fmt.Printf(printProjFmtStr, "Whitelisted Cluster Resources:", cwl0)
for i := 1; i < len(p.Spec.ClusterResourceWhitelist); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[i].Group, p.Spec.ClusterResourceWhitelist[i].Kind))
}
// Print blacklisted namespaced resources
rbl0 := "<none>"
if len(p.Spec.NamespaceResourceBlacklist) > 0 {
rbl0 = fmt.Sprintf("%s/%s", p.Spec.NamespaceResourceBlacklist[0].Group, p.Spec.NamespaceResourceBlacklist[0].Kind)
}
fmt.Printf(printProjFmtStr, "Blacklisted Namespaced Resources:", rbl0)
for i := 1; i < len(p.Spec.NamespaceResourceBlacklist); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s/%s", p.Spec.NamespaceResourceBlacklist[i].Group, p.Spec.NamespaceResourceBlacklist[i].Kind))
}
fmt.Printf(printProjFmtStr, "Orphaned Resources:", formatOrphanedResources(p))
},
}
return command
}
func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "edit PROJECT",
Short: "Edit project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
projData, err := json.Marshal(proj.Spec)
errors.CheckError(err)
projData, err = yaml.JSONToYAML(projData)
errors.CheckError(err)
cli.InteractiveEdit(fmt.Sprintf("%s-*-edit.yaml", projName), projData, func(input []byte) error {
input, err = yaml.YAMLToJSON(input)
if err != nil {
return err
}
updatedSpec := v1alpha1.AppProjectSpec{}
err = json.Unmarshal(input, &updatedSpec)
if err != nil {
return err
}
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
if err != nil {
return err
}
proj.Spec = updatedSpec
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
if err != nil {
return fmt.Errorf("Failed to update project:\n%v", err)
}
return err
})
},
}
return command
}

View File

@@ -1,398 +0,0 @@
package commands
import (
"context"
"fmt"
"os"
"strconv"
"text/tabwriter"
timeutil "github.com/argoproj/pkg/time"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
projectpkg "github.com/argoproj/argo-cd/pkg/apiclient/project"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
)
const (
policyTemplate = "p, proj:%s:%s, applications, %s, %s/%s, %s"
)
// NewProjectRoleCommand returns a new instance of the `argocd proj role` command
func NewProjectRoleCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
roleCommand := &cobra.Command{
Use: "role",
Short: "Manage a project's roles",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
roleCommand.AddCommand(NewProjectRoleListCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleGetCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleCreateCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleDeleteCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleCreateTokenCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleDeleteTokenCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleAddPolicyCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleRemovePolicyCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleAddGroupCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleRemoveGroupCommand(clientOpts))
return roleCommand
}
// NewProjectRoleAddPolicyCommand returns a new instance of an `argocd proj role add-policy` command
func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts policyOpts
)
var command = &cobra.Command{
Use: "add-policy PROJECT ROLE-NAME",
Short: "Add a policy to a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
role, roleIndex, err := proj.GetRoleByName(roleName)
errors.CheckError(err)
policy := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission)
proj.Spec.Roles[roleIndex].Policies = append(role.Policies, policy)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
addPolicyFlags(command, &opts)
return command
}
// NewProjectRoleRemovePolicyCommand returns a new instance of an `argocd proj role remove-policy` command
func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts policyOpts
)
var command = &cobra.Command{
Use: "remove-policy PROJECT ROLE-NAME",
Short: "Remove a policy from a role within a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
role, roleIndex, err := proj.GetRoleByName(roleName)
errors.CheckError(err)
policyToRemove := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission)
duplicateIndex := -1
for i, policy := range role.Policies {
if policy == policyToRemove {
duplicateIndex = i
break
}
}
if duplicateIndex < 0 {
return
}
role.Policies[duplicateIndex] = role.Policies[len(role.Policies)-1]
proj.Spec.Roles[roleIndex].Policies = role.Policies[:len(role.Policies)-1]
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
addPolicyFlags(command, &opts)
return command
}
// NewProjectRoleCreateCommand returns a new instance of an `argocd proj role create` command
func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
description string
)
var command = &cobra.Command{
Use: "create PROJECT ROLE-NAME",
Short: "Create a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
_, _, err = proj.GetRoleByName(roleName)
if err == nil {
fmt.Printf("Role '%s' already exists\n", roleName)
return
}
proj.Spec.Roles = append(proj.Spec.Roles, v1alpha1.ProjectRole{Name: roleName, Description: description})
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Role '%s' created\n", roleName)
},
}
command.Flags().StringVarP(&description, "description", "", "", "Project description")
return command
}
// NewProjectRoleDeleteCommand returns a new instance of an `argocd proj role delete` command
func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ROLE-NAME",
Short: "Delete a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
_, index, err := proj.GetRoleByName(roleName)
if err != nil {
fmt.Printf("Role '%s' does not exist in project\n", roleName)
return
}
proj.Spec.Roles[index] = proj.Spec.Roles[len(proj.Spec.Roles)-1]
proj.Spec.Roles = proj.Spec.Roles[:len(proj.Spec.Roles)-1]
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Role '%s' deleted\n", roleName)
},
}
return command
}
// NewProjectRoleCreateTokenCommand returns a new instance of an `argocd proj role create-token` command
func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
expiresIn string
)
var command = &cobra.Command{
Use: "create-token PROJECT ROLE-NAME",
Short: "Create a project token",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
duration, err := timeutil.ParseDuration(expiresIn)
errors.CheckError(err)
token, err := projIf.CreateToken(context.Background(), &projectpkg.ProjectTokenCreateRequest{Project: projName, Role: roleName, ExpiresIn: int64(duration.Seconds())})
errors.CheckError(err)
fmt.Println(token.Token)
},
}
command.Flags().StringVarP(&expiresIn, "expires-in", "e", "0s", "Duration before the token will expire. (Default: No expiration)")
return command
}
// NewProjectRoleDeleteTokenCommand returns a new instance of an `argocd proj role delete-token` command
func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
Short: "Delete a project token",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
issuedAt, err := strconv.ParseInt(args[2], 10, 64)
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
_, err = projIf.DeleteToken(context.Background(), &projectpkg.ProjectTokenDeleteRequest{Project: projName, Role: roleName, Iat: issuedAt})
errors.CheckError(err)
},
}
return command
}
// Print list of project role names
func printProjectRoleListName(roles []v1alpha1.ProjectRole) {
for _, role := range roles {
fmt.Println(role.Name)
}
}
// Print table of project roles
func printProjectRoleListTable(roles []v1alpha1.ProjectRole) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ROLE-NAME\tDESCRIPTION\n")
for _, role := range roles {
fmt.Fprintf(w, "%s\t%s\n", role.Name, role.Description)
}
_ = w.Flush()
}
// NewProjectRoleListCommand returns a new instance of an `argocd proj roles list` command
func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List all the roles in a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
project, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
if output == "name" {
printProjectRoleListName(project.Spec.Roles)
} else {
printProjectRoleListTable(project.Spec.Roles)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: wide|name")
return command
}
// NewProjectRoleGetCommand returns a new instance of an `argocd proj roles get` command
func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "get PROJECT ROLE-NAME",
Short: "Get the details of a specific role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
role, _, err := proj.GetRoleByName(roleName)
errors.CheckError(err)
printRoleFmtStr := "%-15s%s\n"
fmt.Printf(printRoleFmtStr, "Role Name:", roleName)
fmt.Printf(printRoleFmtStr, "Description:", role.Description)
fmt.Printf("Policies:\n")
fmt.Printf("%s\n", proj.ProjectPoliciesString())
fmt.Printf("JWT Tokens:\n")
// TODO(jessesuen): print groups
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ID\tISSUED-AT\tEXPIRES-AT\n")
for _, token := range role.JWTTokens {
expiresAt := "<none>"
if token.ExpiresAt > 0 {
expiresAt = humanizeTimestamp(token.ExpiresAt)
}
fmt.Fprintf(w, "%d\t%s\t%s\n", token.IssuedAt, humanizeTimestamp(token.IssuedAt), expiresAt)
}
_ = w.Flush()
},
}
return command
}
// NewProjectRoleAddGroupCommand returns a new instance of an `argocd proj role add-group` command
func NewProjectRoleAddGroupCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add-group PROJECT ROLE-NAME GROUP-CLAIM",
Short: "Add a group claim to a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, roleName, groupName := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
updated, err := proj.AddGroupToRole(roleName, groupName)
errors.CheckError(err)
if !updated {
fmt.Printf("Group '%s' already present in role '%s'\n", groupName, roleName)
return
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Group '%s' added to role '%s'\n", groupName, roleName)
},
}
return command
}
// NewProjectRoleRemoveGroupCommand returns a new instance of an `argocd proj role remove-group` command
func NewProjectRoleRemoveGroupCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "remove-group PROJECT ROLE-NAME GROUP-CLAIM",
Short: "Remove a group claim from a role within a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, roleName, groupName := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
updated, err := proj.RemoveGroupFromRole(roleName, groupName)
errors.CheckError(err)
if !updated {
fmt.Printf("Group '%s' not present in role '%s'\n", groupName, roleName)
return
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Group '%s' removed from role '%s'\n", groupName, roleName)
},
}
return command
}

View File

@@ -1,311 +0,0 @@
package commands
import (
"context"
"os"
"github.com/spf13/cobra"
"fmt"
"strings"
"text/tabwriter"
"strconv"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
projectpkg "github.com/argoproj/argo-cd/pkg/apiclient/project"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
)
// NewProjectWindowsCommand returns a new instance of the `argocd proj windows` command
func NewProjectWindowsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
roleCommand := &cobra.Command{
Use: "windows",
Short: "Manage a project's sync windows",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
roleCommand.AddCommand(NewProjectWindowsDisableManualSyncCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsEnableManualSyncCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsAddWindowCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsDeleteCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsListCommand(clientOpts))
roleCommand.AddCommand(NewProjectWindowsUpdateCommand(clientOpts))
return roleCommand
}
// NewProjectSyncWindowsDisableManualSyncCommand returns a new instance of an `argocd proj windows disable-manual-sync` command
func NewProjectWindowsDisableManualSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "disable-manual-sync PROJECT ID",
Short: "Disable manual sync for a sync window",
Long: "Disable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for i, window := range proj.Spec.SyncWindows {
if id == i {
window.ManualSync = false
}
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectWindowsEnableManualSyncCommand returns a new instance of an `argocd proj windows enable-manual-sync` command
func NewProjectWindowsEnableManualSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "enable-manual-sync PROJECT ID",
Short: "Enable manual sync for a sync window",
Long: "Enable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for i, window := range proj.Spec.SyncWindows {
if id == i {
window.ManualSync = true
}
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows add` command
func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
kind string
schedule string
duration string
applications []string
namespaces []string
clusters []string
manualSync bool
)
var command = &cobra.Command{
Use: "add PROJECT",
Short: "Add a sync window to a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
err = proj.Spec.AddWindow(kind, schedule, duration, applications, namespaces, clusters, manualSync)
errors.CheckError(err)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
command.Flags().StringVarP(&kind, "kind", "k", "", "Sync window kind, either allow or deny")
command.Flags().StringVar(&schedule, "schedule", "", "Sync window schedule in cron format. (e.g. --schedule \"0 22 * * *\")")
command.Flags().StringVar(&duration, "duration", "", "Sync window duration. (e.g. --duration 1h)")
command.Flags().StringSliceVar(&applications, "applications", []string{}, "Applications that the schedule will be applied to. Comma separated, wildcards supported (e.g. --applications prod-\\*,website)")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "Namespaces that the schedule will be applied to. Comma separated, wildcards supported (e.g. --namespaces default,\\*-prod)")
command.Flags().StringSliceVar(&clusters, "clusters", []string{}, "Clusters that the schedule will be applied to. Comma separated, wildcards supported (e.g. --clusters prod,staging)")
command.Flags().BoolVar(&manualSync, "manual-sync", false, "Allow manual syncs for both deny and allow windows")
return command
}
// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows delete` command
func NewProjectWindowsDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ID",
Short: "Delete a sync window from a project. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
err = proj.Spec.DeleteWindow(id)
errors.CheckError(err)
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
return command
}
// NewProjectWindowsUpdateCommand returns a new instance of an `argocd proj windows update` command
func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
schedule string
duration string
applications []string
namespaces []string
clusters []string
)
var command = &cobra.Command{
Use: "update PROJECT ID",
Short: "Update a project sync window",
Long: "Update a project sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
id, err := strconv.Atoi(args[1])
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for i, window := range proj.Spec.SyncWindows {
if id == i {
err := window.Update(schedule, duration, applications, namespaces, clusters)
if err != nil {
errors.CheckError(err)
}
}
}
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
command.Flags().StringVar(&schedule, "schedule", "", "Sync window schedule in cron format. (e.g. --schedule \"0 22 * * *\")")
command.Flags().StringVar(&duration, "duration", "", "Sync window duration. (e.g. --duration 1h)")
command.Flags().StringSliceVar(&applications, "applications", []string{}, "Applications that the schedule will be applied to. Comma separated, wildcards supported (e.g. --applications prod-\\*,website)")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "Namespaces that the schedule will be applied to. Comma separated, wildcards supported (e.g. --namespaces default,\\*-prod)")
command.Flags().StringSliceVar(&clusters, "clusters", []string{}, "Clusters that the schedule will be applied to. Comma separated, wildcards supported (e.g. --clusters prod,staging)")
return command
}
// NewProjectWindowsListCommand returns a new instance of an `argocd proj windows list` command
func NewProjectWindowsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List project sync windows",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
printSyncWindows(proj)
},
}
return command
}
// Print table of sync window data
func printSyncWindows(proj *v1alpha1.AppProject) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var fmtStr string
headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC"}
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
fmt.Fprintf(w, fmtStr, headers...)
if proj.Spec.SyncWindows.HasWindows() {
for i, window := range proj.Spec.SyncWindows {
vals := []interface{}{
strconv.Itoa(i),
formatBoolOutput(window.Active()),
window.Kind,
window.Schedule,
window.Duration,
formatListOutput(window.Applications),
formatListOutput(window.Namespaces),
formatListOutput(window.Clusters),
formatManualOutput(window.ManualSync),
}
fmt.Fprintf(w, fmtStr, vals...)
}
}
_ = w.Flush()
}
func formatListOutput(list []string) string {
var o string
if len(list) == 0 {
o = "-"
} else {
o = strings.Join(list, ",")
}
return o
}
func formatBoolOutput(active bool) string {
var o string
if active {
o = "Active"
} else {
o = "Inactive"
}
return o
}
func formatManualOutput(active bool) string {
var o string
if active {
o = "Enabled"
} else {
o = "Disabled"
}
return o
}

View File

@@ -1,86 +0,0 @@
package commands
import (
"context"
"fmt"
"os"
"github.com/coreos/go-oidc"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
settingspkg "github.com/argoproj/argo-cd/pkg/apiclient/settings"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/localconfig"
"github.com/argoproj/argo-cd/util/session"
)
// NewReloginCommand returns a new instance of `argocd relogin` command
func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
password string
ssoPort int
)
var command = &cobra.Command{
Use: "relogin",
Short: "Refresh an expired authenticate token",
Long: "Refresh an expired authenticate token",
Run: func(c *cobra.Command, args []string) {
if len(args) != 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
errors.CheckError(err)
if localCfg == nil {
log.Fatalf("No context found. Login using `argocd login`")
}
configCtx, err := localCfg.ResolveContext(localCfg.CurrentContext)
errors.CheckError(err)
var tokenString string
var refreshToken string
clientOpts := argocdclient.ClientOptions{
ConfigPath: "",
ServerAddr: configCtx.Server.Server,
Insecure: configCtx.Server.Insecure,
GRPCWeb: globalClientOpts.GRPCWeb,
PlainText: configCtx.Server.PlainText,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
claims, err := configCtx.User.Claims()
errors.CheckError(err)
if claims.Issuer == session.SessionManagerClaimsIssuer {
fmt.Printf("Relogging in as '%s'\n", claims.Subject)
tokenString = passwordLogin(acdClient, claims.Subject, password)
} else {
fmt.Println("Reinitiating SSO login")
setConn, setIf := acdClient.NewSettingsClientOrDie()
defer util.Close(setConn)
ctx := context.Background()
httpClient, err := acdClient.HTTPClient()
errors.CheckError(err)
ctx = oidc.ClientContext(ctx, httpClient)
acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{})
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider)
}
localCfg.UpsertUser(localconfig.User{
Name: localCfg.CurrentContext,
AuthToken: tokenString,
RefreshToken: refreshToken,
})
err = localconfig.WriteLocalConfig(*localCfg, globalClientOpts.ConfigPath)
errors.CheckError(err)
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
},
}
command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application")
return command
}

View File

@@ -7,24 +7,22 @@ import (
"os"
"text/tabwriter"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
repositorypkg "github.com/argoproj/argo-cd/pkg/apiclient/repository"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/git"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewRepoCommand returns a new instance of an `argocd repo` command
func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "repo",
Short: "Manage repository connection parameters",
Short: "Manage git repository credentials",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
@@ -40,137 +38,46 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
// NewRepoAddCommand returns a new instance of an `argocd repo add` command
func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
repo appsv1.Repository
upsert bool
sshPrivateKeyPath string
insecureIgnoreHostKey bool
insecureSkipServerVerification bool
tlsClientCertPath string
tlsClientCertKeyPath string
enableLfs bool
repo appsv1.Repository
sshPrivateKeyPath string
)
// For better readability and easier formatting
var repoAddExamples = ` # Add a Git repository via SSH using a private key for authentication, ignoring the server's host key:
argocd repo add git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa
# Add a private Git repository via HTTPS using username/password and TLS client certificates:
argocd repo add https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key
# Add a private Git repository via HTTPS using username/password without verifying the server's TLS certificate
argocd repo add https://git.example.com/repos/repo --username git --password secret --insecure-skip-server-verification
# Add a public Helm repository named 'stable' via HTTPS
argocd repo add https://kubernetes-charts.storage.googleapis.com --type helm --name stable
# Add a private Helm repository named 'stable' via HTTPS
argocd repo add https://kubernetes-charts.storage.googleapis.com --type helm --name stable --username test --password test
`
var command = &cobra.Command{
Use: "add REPOURL",
Short: "Add git repository connection parameters",
Example: repoAddExamples,
Use: "add REPO",
Short: "Add git repository credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
// Repository URL
repo.Repo = args[0]
// Specifying ssh-private-key-path is only valid for SSH repositories
if sshPrivateKeyPath != "" {
if ok, _ := git.IsSSHURL(repo.Repo); ok {
keyData, err := ioutil.ReadFile(sshPrivateKeyPath)
if err != nil {
log.Fatal(err)
}
repo.SSHPrivateKey = string(keyData)
} else {
err := fmt.Errorf("--ssh-private-key-path is only supported for SSH repositories.")
errors.CheckError(err)
keyData, err := ioutil.ReadFile(sshPrivateKeyPath)
if err != nil {
log.Fatal(err)
}
repo.SSHPrivateKey = string(keyData)
}
// tls-client-cert-path and tls-client-cert-key-key-path must always be
// specified together
if (tlsClientCertPath != "" && tlsClientCertKeyPath == "") || (tlsClientCertPath == "" && tlsClientCertKeyPath != "") {
err := fmt.Errorf("--tls-client-cert-path and --tls-client-cert-key-path must be specified together")
errors.CheckError(err)
}
// Specifying tls-client-cert-path is only valid for HTTPS repositories
if tlsClientCertPath != "" {
if git.IsHTTPSURL(repo.Repo) {
tlsCertData, err := ioutil.ReadFile(tlsClientCertPath)
errors.CheckError(err)
tlsCertKey, err := ioutil.ReadFile(tlsClientCertKeyPath)
errors.CheckError(err)
repo.TLSClientCertData = string(tlsCertData)
repo.TLSClientCertKey = string(tlsCertKey)
} else {
err := fmt.Errorf("--tls-client-cert-path is only supported for HTTPS repositories")
errors.CheckError(err)
err := git.TestRepo(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey)
if err != nil {
if repo.Username != "" && repo.Password != "" || git.IsSSHURL(repo.Repo) {
// if everything was supplied or repo URL is SSH url, one of the inputs was definitely bad
log.Fatal(err)
}
// If we can't test the repo, it's probably private. Prompt for credentials and try again.
repo.Username, repo.Password = cli.PromptCredentials(repo.Username, repo.Password)
err = git.TestRepo(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey)
}
// InsecureIgnoreHostKey is deprecated and only here for backwards compat
repo.InsecureIgnoreHostKey = insecureIgnoreHostKey
repo.Insecure = insecureSkipServerVerification
repo.EnableLFS = enableLfs
if repo.Type == "helm" && repo.Name == "" {
errors.CheckError(fmt.Errorf("Must specify --name for repos of type 'helm'"))
}
errors.CheckError(err)
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer util.Close(conn)
// If the user set a username, but didn't supply password via --password,
// then we prompt for it
if repo.Username != "" && repo.Password == "" {
repo.Password = cli.PromptPassword(repo.Password)
}
// We let the server check access to the repository before adding it. If
// it is a private repo, but we cannot access with with the credentials
// that were supplied, we bail out.
repoAccessReq := repositorypkg.RepoAccessQuery{
Repo: repo.Repo,
Type: repo.Type,
Name: repo.Name,
Username: repo.Username,
Password: repo.Password,
SshPrivateKey: repo.SSHPrivateKey,
TlsClientCertData: repo.TLSClientCertData,
TlsClientCertKey: repo.TLSClientCertKey,
Insecure: repo.IsInsecure(),
}
_, err := repoIf.ValidateAccess(context.Background(), &repoAccessReq)
errors.CheckError(err)
repoCreateReq := repositorypkg.RepoCreateRequest{
Repo: &repo,
Upsert: upsert,
}
createdRepo, err := repoIf.Create(context.Background(), &repoCreateReq)
createdRepo, err := repoIf.Create(context.Background(), &repo)
errors.CheckError(err)
fmt.Printf("repository '%s' added\n", createdRepo.Repo)
},
}
command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"")
command.Flags().StringVar(&repo.Name, "name", "", "name of the repository, mandatory for repositories of type helm")
command.Flags().StringVar(&repo.Username, "username", "", "username to the repository")
command.Flags().StringVar(&repo.Password, "password", "", "password to the repository")
command.Flags().StringVar(&sshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().StringVar(&tlsClientCertPath, "tls-client-cert-path", "", "path to the TLS client cert (must be PEM format)")
command.Flags().StringVar(&tlsClientCertKeyPath, "tls-client-cert-key-path", "", "path to the TLS client cert's key path (must be PEM format)")
command.Flags().BoolVar(&insecureIgnoreHostKey, "insecure-ignore-host-key", false, "disables SSH strict host key checking (deprecated, use --insecure-skip-server-validation instead)")
command.Flags().BoolVar(&insecureSkipServerVerification, "insecure-skip-server-verification", false, "disables server certificate and host key checks")
command.Flags().BoolVar(&enableLfs, "enable-lfs", false, "enable git-lfs (Large File Support) on this repository")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
command.Flags().StringVar(&sshPrivateKeyPath, "sshPrivateKeyPath", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
return command
}
@@ -178,7 +85,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rm REPO",
Short: "Remove repository credentials",
Short: "Remove git repository credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
@@ -187,7 +94,7 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer util.Close(conn)
for _, repoURL := range args {
_, err := repoIf.Delete(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL})
_, err := repoIf.Delete(context.Background(), &repository.RepoQuery{Repo: repoURL})
errors.CheckError(err)
}
},
@@ -195,49 +102,23 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
return command
}
// Print table of repo info
func printRepoTable(repos appsv1.Repositories) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "TYPE\tNAME\tREPO\tINSECURE\tLFS\tUSER\tSTATUS\tMESSAGE\n")
for _, r := range repos {
var username string
if r.Username == "" {
username = "-"
} else {
username = r.Username
}
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%v\t%v\t%s\t%s\t%s\n", r.Type, r.Name, r.Repo, r.IsInsecure(), r.EnableLFS, username, r.ConnectionState.Status, r.ConnectionState.Message)
}
_ = w.Flush()
}
// Print list of repo urls
func printRepoUrls(repos appsv1.Repositories) {
for _, r := range repos {
fmt.Println(r.Repo)
}
}
// NewRepoListCommand returns a new instance of an `argocd repo rm` command
func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List configured repositories",
Run: func(c *cobra.Command, args []string) {
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer util.Close(conn)
repos, err := repoIf.List(context.Background(), &repositorypkg.RepoQuery{})
repos, err := repoIf.List(context.Background(), &repository.RepoQuery{})
errors.CheckError(err)
if output == "url" {
printRepoUrls(repos.Items)
} else {
printRepoTable(repos.Items)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "REPO\tUSER\tMESSAGE\n")
for _, r := range repos.Items {
fmt.Fprintf(w, "%s\t%s\t%s\n", r.Repo, r.Username, r.Message)
}
_ = w.Flush()
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: wide|url")
return command
}

View File

@@ -1,26 +1,13 @@
package commands
import (
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/config"
"github.com/argoproj/argo-cd/util/localconfig"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
)
func init() {
cobra.OnInitialize(initConfig)
}
var logLevel string
func initConfig() {
cli.SetLogLevel(logLevel)
}
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
@@ -30,35 +17,29 @@ func NewCommand() *cobra.Command {
var command = &cobra.Command{
Use: cliName,
Short: "argocd controls a Argo CD server",
Short: "argocd controls a ArgoCD server",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(NewCompletionCommand())
command.AddCommand(NewVersionCmd(&clientOpts))
command.AddCommand(NewClusterCommand(&clientOpts, pathOpts))
command.AddCommand(NewApplicationCommand(&clientOpts))
command.AddCommand(NewLoginCommand(&clientOpts))
command.AddCommand(NewReloginCommand(&clientOpts))
command.AddCommand(NewRepoCommand(&clientOpts))
command.AddCommand(NewInstallCommand())
command.AddCommand(NewUninstallCommand())
command.AddCommand(NewContextCommand(&clientOpts))
command.AddCommand(NewProjectCommand(&clientOpts))
command.AddCommand(NewAccountCommand(&clientOpts))
command.AddCommand(NewLogoutCommand(&clientOpts))
command.AddCommand(NewCertCommand(&clientOpts))
defaultLocalConfigPath, err := localconfig.DefaultLocalConfigPath()
errors.CheckError(err)
command.PersistentFlags().StringVar(&clientOpts.ConfigPath, "config", config.GetFlag("config", defaultLocalConfigPath), "Path to Argo CD config")
command.PersistentFlags().StringVar(&clientOpts.ServerAddr, "server", config.GetFlag("server", ""), "Argo CD server address")
command.PersistentFlags().BoolVar(&clientOpts.PlainText, "plaintext", config.GetBoolFlag("plaintext"), "Disable TLS")
command.PersistentFlags().BoolVar(&clientOpts.Insecure, "insecure", config.GetBoolFlag("insecure"), "Skip server certificate and domain verification")
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", config.GetFlag("server-crt", ""), "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", ""), "Authentication token")
command.PersistentFlags().BoolVar(&clientOpts.GRPCWeb, "grpc-web", config.GetBoolFlag("grpc-web"), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.")
command.PersistentFlags().StringVar(&logLevel, "loglevel", config.GetFlag("loglevel", "info"), "Set the logging level. One of: debug|info|warn|error")
command.PersistentFlags().StringSliceVarP(&clientOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)")
command.PersistentFlags().StringVar(&clientOpts.ConfigPath, "config", defaultLocalConfigPath, "Path to ArgoCD config")
command.PersistentFlags().StringVar(&clientOpts.ServerAddr, "server", "", "ArgoCD server address")
command.PersistentFlags().BoolVar(&clientOpts.PlainText, "plaintext", false, "Disable TLS")
command.PersistentFlags().BoolVar(&clientOpts.Insecure, "insecure", false, "Skip server certificate and domain verification")
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", "", "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", "", "Authentication token")
return command
}

View File

@@ -1,18 +0,0 @@
contexts:
- name: argocd.example.com:443
server: argocd.example.com:443
user: argocd.example.com:443
- name: localhost:8080
server: localhost:8080
user: localhost:8080
current-context: localhost:8080
servers:
- server: argocd.example.com:443
- plain-text: true
server: localhost:8080
users:
- auth-token: vErrYS3c3tReFRe$hToken
name: argocd.example.com:443
refresh-token: vErrYS3c3tReFRe$hToken
- auth-token: vErrYS3c3tReFRe$hToken
name: localhost:8080

View File

@@ -0,0 +1,40 @@
package commands
import (
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/install"
"github.com/argoproj/argo-cd/util/cli"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
)
// NewUninstallCommand returns a new instance of `argocd install` command
func NewUninstallCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
installOpts install.InstallOptions
deleteNamespace bool
deleteCRD bool
)
var command = &cobra.Command{
Use: "uninstall",
Short: "Uninstall Argo CD",
Long: "Uninstall Argo CD",
Run: func(c *cobra.Command, args []string) {
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if wasSpecified {
installOpts.Namespace = namespace
}
installer, err := install.NewInstaller(conf, installOpts)
errors.CheckError(err)
installer.Uninstall(deleteNamespace, deleteCRD)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().BoolVar(&deleteNamespace, "delete-namespace", false, "Also delete the namespace during uninstall")
command.Flags().BoolVar(&deleteCRD, "delete-crd", false, "Also delete the Application CRD during uninstall")
return command
}

View File

@@ -4,13 +4,12 @@ import (
"context"
"fmt"
"github.com/golang/protobuf/ptypes/empty"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/common"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util"
"github.com/golang/protobuf/ptypes/empty"
"github.com/spf13/cobra"
)
// NewVersionCmd returns a new `version` command to be used as a sub-command to root
@@ -22,7 +21,7 @@ func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command {
Use: "version",
Short: fmt.Sprintf("Print version information"),
Run: func(cmd *cobra.Command, args []string) {
version := common.GetVersion()
version := argocd.GetVersion()
fmt.Printf("%s: %s\n", cliName, version)
if !short {
fmt.Printf(" BuildDate: %s\n", version.BuildDate)
@@ -55,10 +54,6 @@ func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command {
fmt.Printf(" GoVersion: %s\n", serverVers.GoVersion)
fmt.Printf(" Compiler: %s\n", serverVers.Compiler)
fmt.Printf(" Platform: %s\n", serverVers.Platform)
fmt.Printf(" Ksonnet Version: %s\n", serverVers.KsonnetVersion)
fmt.Printf(" Kustomize Version: %s\n", serverVers.KustomizeVersion)
fmt.Printf(" Helm Version: %s\n", serverVers.HelmVersion)
fmt.Printf(" Kubectl Version: %s\n", serverVers.KubectlVersion)
}
},

View File

@@ -1,149 +1,82 @@
package common
// Default service addresses and URLS of Argo CD internal services
const (
// DefaultRepoServerAddr is the gRPC address of the Argo CD repo server
DefaultRepoServerAddr = "argocd-repo-server:8081"
// DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against
DefaultDexServerAddr = "http://argocd-dex-server:5556"
// DefaultRedisAddr is the default redis address
DefaultRedisAddr = "argocd-redis:6379"
import (
"github.com/argoproj/argo-cd/pkg/apis/application"
rbacv1 "k8s.io/api/rbac/v1"
)
// Kubernetes ConfigMap and Secret resource names which hold Argo CD settings
const (
ArgoCDConfigMapName = "argocd-cm"
ArgoCDSecretName = "argocd-secret"
ArgoCDRBACConfigMapName = "argocd-rbac-cm"
// Contains SSH known hosts data for connecting repositories. Will get mounted as volume to pods
ArgoCDKnownHostsConfigMapName = "argocd-ssh-known-hosts-cm"
// Contains TLS certificate data for connecting repositories. Will get mounted as volume to pods
ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm"
)
// MetadataPrefix is the prefix used for our labels and annotations
MetadataPrefix = "argocd.argoproj.io"
// Some default configurables
const (
DefaultSystemNamespace = "kube-system"
DefaultRepoType = "git"
)
// SecretTypeRepository indicates a secret type of repository
SecretTypeRepository = "repository"
// Default listener ports for ArgoCD components
const (
DefaultPortAPIServer = 8080
DefaultPortRepoServer = 8081
DefaultPortArgoCDMetrics = 8082
DefaultPortArgoCDAPIServerMetrics = 8083
DefaultPortRepoServerMetrics = 8084
)
// SecretTypeCluster indicates a secret type of cluster
SecretTypeCluster = "cluster"
// Default paths on the pod's file system
const (
// The default base path where application config is located
DefaultPathAppConfig = "/app/config"
// The default path where TLS certificates for repositories are located
DefaultPathTLSConfig = "/app/config/tls"
// The default path where SSH known hosts are stored
DefaultPathSSHConfig = "/app/config/ssh"
// Default name for the SSH known hosts file
DefaultSSHKnownHostsName = "ssh_known_hosts"
)
// Argo CD application related constants
const (
// KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster
KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc"
// DefaultAppProjectName contains name of 'default' app project, which is available in every Argo CD installation
DefaultAppProjectName = "default"
// ArgoCDAdminUsername is the username of the 'admin' user
ArgoCDAdminUsername = "admin"
// ArgoCDUserAgentName is the default user-agent name used by the gRPC API client library and grpc-gateway
ArgoCDUserAgentName = "argocd-client"
// AuthCookieName is the HTTP cookie name where we store our auth token
AuthCookieName = "argocd.token"
// RevisionHistoryLimit is the max number of successful sync to keep in history
RevisionHistoryLimit = 10
// K8sClientConfigQPS controls the QPS to be used in K8s REST client configs
K8sClientConfigQPS = 25
// K8sClientConfigBurst controls the burst to be used in K8s REST client configs
K8sClientConfigBurst = 50
// ResourcesFinalizerName is a number of application CRD finalizer
ResourcesFinalizerName = "resources-finalizer." + MetadataPrefix
)
const (
ArgoCDAdminUsername = "admin"
ArgoCDSecretName = "argocd-secret"
ArgoCDConfigMapName = "argocd-cm"
)
// Dex related constants
const (
// DexAPIEndpoint is the endpoint where we serve the Dex API server
DexAPIEndpoint = "/api/dex"
// LoginEndpoint is Argo CD's shorthand login endpoint which redirects to dex's OAuth 2.0 provider's consent page
// LoginEndpoint is ArgoCD's shorthand login endpoint which redirects to dex's OAuth 2.0 provider's consent page
LoginEndpoint = "/auth/login"
// CallbackEndpoint is Argo CD's final callback endpoint we reach after OAuth 2.0 login flow has been completed
// CallbackEndpoint is ArgoCD's final callback endpoint we reach after OAuth 2.0 login flow has been completed
CallbackEndpoint = "/auth/callback"
// DexCallbackEndpoint is Argo CD's final callback endpoint when Dex is configured
DexCallbackEndpoint = "/api/dex/callback"
// ArgoCDClientAppName is name of the Oauth client app used when registering our web app to dex
ArgoCDClientAppName = "Argo CD"
ArgoCDClientAppName = "ArgoCD"
// ArgoCDClientAppID is the Oauth client ID we will use when registering our app to dex
ArgoCDClientAppID = "argo-cd"
// ArgoCDCLIClientAppName is name of the Oauth client app used when registering our CLI to dex
ArgoCDCLIClientAppName = "Argo CD CLI"
ArgoCDCLIClientAppName = "ArgoCD CLI"
// ArgoCDCLIClientAppID is the Oauth client ID we will use when registering our CLI to dex
ArgoCDCLIClientAppID = "argo-cd-cli"
)
// Resource metadata labels and annotations (keys and values) used by Argo CD components
const (
// LabelKeyAppInstance is the label key to use to uniquely identify the instance of an application
// The Argo CD application name is used as the instance name
LabelKeyAppInstance = "app.kubernetes.io/instance"
// LegacyLabelApplicationName is the legacy label (v0.10 and below) and is superceded by 'app.kubernetes.io/instance'
LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name"
// LabelKeySecretType contains the type of argocd secret (currently: 'cluster')
LabelKeySecretType = "argocd.argoproj.io/secret-type"
// LabelValueSecretTypeCluster indicates a secret type of cluster
LabelValueSecretTypeCluster = "cluster"
// AnnotationCompareOptions is a comma-separated list of options for comparison
AnnotationCompareOptions = "argocd.argoproj.io/compare-options"
// AnnotationSyncOptions is a comma-separated list of options for syncing
AnnotationSyncOptions = "argocd.argoproj.io/sync-options"
// AnnotationSyncWave indicates which wave of the sync the resource or hook should be in
AnnotationSyncWave = "argocd.argoproj.io/sync-wave"
// AnnotationKeyHook contains the hook type of a resource
AnnotationKeyHook = "argocd.argoproj.io/hook"
// AnnotationKeyHookDeletePolicy is the policy of deleting a hook
AnnotationKeyHookDeletePolicy = "argocd.argoproj.io/hook-delete-policy"
// AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed.
// Might take values 'normal'/'hard'. Value 'hard' means manifest cache and target cluster state cache should be invalidated before refresh.
AnnotationKeyRefresh = "argocd.argoproj.io/refresh"
// AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application.
AnnotationKeyManagedBy = "managed-by"
// AnnotationValueManagedByArgoCD is a 'managed-by' annotation value for resources managed by Argo CD
AnnotationValueManagedByArgoCD = "argocd.argoproj.io"
// ResourcesFinalizerName the finalizer value which we inject to finalize deletion of an application
ResourcesFinalizerName = "resources-finalizer.argocd.argoproj.io"
)
// Environment variables for tuning and debugging Argo CD
const (
// EnvVarSSODebug is an environment variable to enable additional OAuth debugging in the API server
EnvVarSSODebug = "ARGOCD_SSO_DEBUG"
// EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server
EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG"
// EnvVarFakeInClusterConfig is an environment variable to fake an in-cluster RESTConfig using
// the current kubectl context (for development purposes)
EnvVarFakeInClusterConfig = "ARGOCD_FAKE_IN_CLUSTER"
// Overrides the location where SSH known hosts for repo access data is stored
EnvVarSSHDataPath = "ARGOCD_SSH_DATA_PATH"
// Overrides the location where TLS certificate for repo access data is stored
EnvVarTLSDataPath = "ARGOCD_TLS_DATA_PATH"
// Specifies number of git remote operations attempts count
EnvGitAttemptsCount = "ARGOCD_GIT_ATTEMPTS_COUNT"
)
const (
// MinClientVersion is the minimum client version that can interface with this API server.
// When introducing breaking changes to the API or datastructures, this number should be bumped.
// The value here may be lower than the current value in VERSION
MinClientVersion = "1.3.0"
// CacheVersion is a objects version cached using util/cache/cache.go.
// Number should be bumped in case of backward incompatible change to make sure cache is invalidated after upgrade.
CacheVersion = "1.0.0"
var (
// LabelKeyAppInstance refers to the application instance resource name
LabelKeyAppInstance = MetadataPrefix + "/app-instance"
// LabelKeySecretType contains the type of argocd secret (either 'cluster' or 'repo')
LabelKeySecretType = MetadataPrefix + "/secret-type"
// LabelKeyApplicationControllerInstanceID is the label which allows to separate application among multiple running application controllers.
LabelKeyApplicationControllerInstanceID = application.ApplicationFullName + "/controller-instanceid"
// LabelApplicationName is the label which indicates that resource belongs to application with the specified name
LabelApplicationName = application.ApplicationFullName + "/app-name"
// AnnotationKeyRefresh is the annotation key in the application which is updated with an
// arbitrary value (i.e. timestamp) on a git event, to force the controller to wake up and
// re-evaluate the application
AnnotationKeyRefresh = application.ApplicationFullName + "/refresh"
)
// ArgoCDManagerServiceAccount is the name of the service account for managing a cluster
const (
ArgoCDManagerServiceAccount = "argocd-manager"
ArgoCDManagerClusterRole = "argocd-manager-role"
ArgoCDManagerClusterRoleBinding = "argocd-manager-role-binding"
)
// ArgoCDManagerPolicyRules are the policies to give argocd-manager
var ArgoCDManagerPolicyRules = []rbacv1.PolicyRule{
{
APIGroups: []string{"*"},
Resources: []string{"*"},
Verbs: []string{"*"},
},
}

188
common/installer.go Normal file
View File

@@ -0,0 +1,188 @@
package common
import (
"fmt"
"time"
"github.com/argoproj/argo-cd/errors"
log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// CreateServiceAccount creates a service account
func CreateServiceAccount(
clientset kubernetes.Interface,
serviceAccountName string,
namespace string,
) {
serviceAccount := apiv1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Namespace: namespace,
},
}
_, err := clientset.CoreV1().ServiceAccounts(namespace).Create(&serviceAccount)
if err != nil {
if !apierr.IsAlreadyExists(err) {
log.Fatalf("Failed to create service account '%s': %v\n", serviceAccountName, err)
}
fmt.Printf("ServiceAccount '%s' already exists\n", serviceAccountName)
return
}
fmt.Printf("ServiceAccount '%s' created\n", serviceAccountName)
}
// CreateClusterRole creates a cluster role
func CreateClusterRole(
clientset kubernetes.Interface,
clusterRoleName string,
rules []rbacv1.PolicyRule,
) {
clusterRole := rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleName,
},
Rules: rules,
}
crclient := clientset.RbacV1().ClusterRoles()
_, err := crclient.Create(&clusterRole)
if err != nil {
if !apierr.IsAlreadyExists(err) {
log.Fatalf("Failed to create ClusterRole '%s': %v\n", clusterRoleName, err)
}
_, err = crclient.Update(&clusterRole)
if err != nil {
log.Fatalf("Failed to update ClusterRole '%s': %v\n", clusterRoleName, err)
}
fmt.Printf("ClusterRole '%s' updated\n", clusterRoleName)
} else {
fmt.Printf("ClusterRole '%s' created\n", clusterRoleName)
}
}
// CreateClusterRoleBinding create a ClusterRoleBinding
func CreateClusterRoleBinding(
clientset kubernetes.Interface,
clusterBindingRoleName,
serviceAccountName,
clusterRoleName string,
namespace string,
) {
roleBinding := rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterBindingRoleName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: clusterRoleName,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: serviceAccountName,
Namespace: namespace,
},
},
}
_, err := clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding)
if err != nil {
if !apierr.IsAlreadyExists(err) {
log.Fatalf("Failed to create ClusterRoleBinding %s: %v\n", clusterBindingRoleName, err)
}
fmt.Printf("ClusterRoleBinding '%s' already exists\n", clusterBindingRoleName)
return
}
fmt.Printf("ClusterRoleBinding '%s' created, bound '%s' to '%s'\n", clusterBindingRoleName, serviceAccountName, clusterRoleName)
}
// InstallClusterManagerRBAC installs RBAC resources for a cluster manager to operate a cluster. Returns a token
func InstallClusterManagerRBAC(conf *rest.Config) string {
const ns = "kube-system"
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
CreateServiceAccount(clientset, ArgoCDManagerServiceAccount, ns)
CreateClusterRole(clientset, ArgoCDManagerClusterRole, ArgoCDManagerPolicyRules)
CreateClusterRoleBinding(clientset, ArgoCDManagerClusterRoleBinding, ArgoCDManagerServiceAccount, ArgoCDManagerClusterRole, ns)
var serviceAccount *apiv1.ServiceAccount
var secretName string
err = wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) {
serviceAccount, err = clientset.CoreV1().ServiceAccounts(ns).Get(ArgoCDManagerServiceAccount, metav1.GetOptions{})
if err != nil {
return false, err
}
if len(serviceAccount.Secrets) == 0 {
return false, nil
}
secretName = serviceAccount.Secrets[0].Name
return true, nil
})
if err != nil {
log.Fatalf("Failed to wait for service account secret: %v", err)
}
secret, err := clientset.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{})
if err != nil {
log.Fatalf("Failed to retrieve secret '%s': %v", secretName, err)
}
token, ok := secret.Data["token"]
if !ok {
log.Fatalf("Secret '%s' for service account '%s' did not have a token", secretName, serviceAccount)
}
return string(token)
}
// UninstallClusterManagerRBAC removes RBAC resources for a cluster manager to operate a cluster
func UninstallClusterManagerRBAC(conf *rest.Config) {
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
UninstallRBAC(clientset, "kube-system", ArgoCDManagerClusterRoleBinding, ArgoCDManagerClusterRole, ArgoCDManagerServiceAccount)
}
// UninstallRBAC uninstalls RBAC related resources for a binding, role, and service account
func UninstallRBAC(clientset kubernetes.Interface, namespace, bindingName, roleName, serviceAccount string) {
if err := clientset.RbacV1().ClusterRoleBindings().Delete(bindingName, &metav1.DeleteOptions{}); err != nil {
if !apierr.IsNotFound(err) {
log.Fatalf("Failed to delete ClusterRoleBinding: %v\n", err)
}
fmt.Printf("ClusterRoleBinding '%s' not found\n", bindingName)
} else {
fmt.Printf("ClusterRoleBinding '%s' deleted\n", bindingName)
}
if err := clientset.RbacV1().ClusterRoles().Delete(roleName, &metav1.DeleteOptions{}); err != nil {
if !apierr.IsNotFound(err) {
log.Fatalf("Failed to delete ClusterRole: %v\n", err)
}
fmt.Printf("ClusterRole '%s' not found\n", roleName)
} else {
fmt.Printf("ClusterRole '%s' deleted\n", roleName)
}
if err := clientset.CoreV1().ServiceAccounts(namespace).Delete(serviceAccount, &metav1.DeleteOptions{}); err != nil {
if !apierr.IsNotFound(err) {
log.Fatalf("Failed to delete ServiceAccount: %v\n", err)
}
fmt.Printf("ServiceAccount '%s' in namespace '%s' not found\n", serviceAccount, namespace)
} else {
fmt.Printf("ServiceAccount '%s' deleted\n", serviceAccount)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,733 +0,0 @@
package controller
import (
"context"
"encoding/json"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
corev1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/fake"
kubetesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd/common"
mockstatecache "github.com/argoproj/argo-cd/controller/cache/mocks"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
"github.com/argoproj/argo-cd/reposerver/apiclient"
mockrepoclient "github.com/argoproj/argo-cd/reposerver/apiclient/mocks"
mockreposerver "github.com/argoproj/argo-cd/reposerver/mocks"
"github.com/argoproj/argo-cd/test"
utilcache "github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/kube/kubetest"
"github.com/argoproj/argo-cd/util/settings"
)
type namespacedResource struct {
argoappv1.ResourceNode
AppName string
}
type fakeData struct {
apps []runtime.Object
manifestResponse *apiclient.ManifestResponse
managedLiveObjs map[kube.ResourceKey]*unstructured.Unstructured
namespacedResources map[kube.ResourceKey]namespacedResource
configMapData map[string]string
}
func newFakeController(data *fakeData) *ApplicationController {
var clust corev1.Secret
err := yaml.Unmarshal([]byte(fakeCluster), &clust)
if err != nil {
panic(err)
}
// Mock out call to GenerateManifest
mockRepoClient := mockrepoclient.RepoServerServiceClient{}
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil)
mockRepoClientset := mockreposerver.Clientset{}
mockRepoClientset.On("NewRepoServerClient").Return(&fakeCloser{}, &mockRepoClient, nil)
secret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-secret",
Namespace: test.FakeArgoCDNamespace,
},
Data: map[string][]byte{
"admin.password": []byte("test"),
"server.secretkey": []byte("test"),
},
}
cm := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-cm",
Namespace: test.FakeArgoCDNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: data.configMapData,
}
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
kubectl := &kubetest.MockKubectlCmd{}
ctrl, err := NewApplicationController(
test.FakeArgoCDNamespace,
settingsMgr,
kubeClient,
appclientset.NewSimpleClientset(data.apps...),
&mockRepoClientset,
utilcache.NewCache(utilcache.NewInMemoryCache(1*time.Hour)),
kubectl,
time.Minute,
time.Minute,
common.DefaultPortArgoCDMetrics,
0,
)
if err != nil {
panic(err)
}
cancelProj := test.StartInformer(ctrl.projInformer)
defer cancelProj()
cancelApp := test.StartInformer(ctrl.appInformer)
defer cancelApp()
mockStateCache := mockstatecache.LiveStateCache{}
ctrl.appStateManager.(*appStateManager).liveStateCache = &mockStateCache
ctrl.stateCache = &mockStateCache
mockStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil)
mockStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(data.managedLiveObjs, nil)
mockStateCache.On("GetServerVersion", mock.Anything).Return("v1.2.3", nil)
response := make(map[kube.ResourceKey]argoappv1.ResourceNode)
for k, v := range data.namespacedResources {
response[k] = v.ResourceNode
}
mockStateCache.On("GetNamespaceTopLevelResources", mock.Anything, mock.Anything).Return(response, nil)
mockStateCache.On("IterateHierarchy", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
key := args[1].(kube.ResourceKey)
action := args[2].(func(child argoappv1.ResourceNode, appName string))
appName := ""
if res, ok := data.namespacedResources[key]; ok {
appName = res.AppName
}
action(argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName)
}).Return(nil)
return ctrl
}
type fakeCloser struct{}
func (f *fakeCloser) Close() error { return nil }
var fakeCluster = `
apiVersion: v1
data:
# {"bearerToken":"fake","tlsClientConfig":{"insecure":true},"awsAuthConfig":null}
config: eyJiZWFyZXJUb2tlbiI6ImZha2UiLCJ0bHNDbGllbnRDb25maWciOnsiaW5zZWN1cmUiOnRydWV9LCJhd3NBdXRoQ29uZmlnIjpudWxsfQ==
# minikube
name: aHR0cHM6Ly9sb2NhbGhvc3Q6NjQ0Mw==
# https://localhost:6443
server: aHR0cHM6Ly9sb2NhbGhvc3Q6NjQ0Mw==
kind: Secret
metadata:
labels:
argocd.argoproj.io/secret-type: cluster
name: some-secret
namespace: ` + test.FakeArgoCDNamespace + `
type: Opaque
`
var fakeApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
uid: "123"
name: my-app
namespace: ` + test.FakeArgoCDNamespace + `
spec:
destination:
namespace: ` + test.FakeDestNamespace + `
server: https://localhost:6443
project: default
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
syncPolicy:
automated: {}
status:
operationState:
finishedAt: 2018-09-21T23:50:29Z
message: successfully synced
operation:
sync:
revision: HEAD
phase: Succeeded
startedAt: 2018-09-21T23:50:25Z
syncResult:
resources:
- kind: RoleBinding
message: |-
rolebinding.rbac.authorization.k8s.io/always-outofsync reconciled
rolebinding.rbac.authorization.k8s.io/always-outofsync configured
name: always-outofsync
namespace: default
status: Synced
revision: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
`
func newFakeApp() *argoappv1.Application {
var app argoappv1.Application
err := yaml.Unmarshal([]byte(fakeApp), &app)
if err != nil {
panic(err)
}
return &app
}
func TestAutoSync(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.NotNil(t, app.Operation)
assert.NotNil(t, app.Operation.Sync)
assert.False(t, app.Operation.Sync.Prune)
}
func TestSkipAutoSync(t *testing.T) {
// Verify we skip when we previously synced to it in our most recent history
// Set current to 'aaaaa', desired to 'aaaa' and mark system OutOfSync
{
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when we are already Synced (even if revision is different)
{
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeSynced,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when auto-sync is disabled
{
app := newFakeApp()
app.Spec.SyncPolicy = nil
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when application is marked for deletion
{
app := newFakeApp()
now := metav1.Now()
app.DeletionTimestamp = &now
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when previous sync attempt failed and return error condition
// Set current to 'aaaaa', desired to 'bbbbb' and add 'bbbbb' to failure history
{
app := newFakeApp()
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
Source: *app.Spec.Source.DeepCopy(),
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
}
// TestAutoSyncIndicateError verifies we skip auto-sync and return error condition if previous sync failed
func TestAutoSyncIndicateError(t *testing.T) {
app := newFakeApp()
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
Parameters: []argoappv1.HelmParameter{
{
Name: "a",
Value: "1",
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{
Source: app.Spec.Source.DeepCopy(),
},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
Source: *app.Spec.Source.DeepCopy(),
},
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// TestAutoSyncParameterOverrides verifies we auto-sync if revision is same but parameter overrides are different
func TestAutoSyncParameterOverrides(t *testing.T) {
app := newFakeApp()
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
Parameters: []argoappv1.HelmParameter{
{
Name: "a",
Value: "1",
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{
Source: &argoappv1.ApplicationSource{
Helm: &argoappv1.ApplicationSourceHelm{
Parameters: []argoappv1.HelmParameter{
{
Name: "a",
Value: "2", // this value changed
},
},
},
},
},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
}
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.NotNil(t, app.Operation)
}
// TestFinalizeAppDeletion verifies application deletion
func TestFinalizeAppDeletion(t *testing.T) {
app := newFakeApp()
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
}})
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
fakeAppCs.ReactionChain = nil
fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
return defaultReactor.React(action)
})
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, nil, nil
})
err := ctrl.finalizeApplicationDeletion(app)
assert.NoError(t, err)
assert.True(t, patched)
}
// TestNormalizeApplication verifies we normalize an application during reconciliation
func TestNormalizeApplication(t *testing.T) {
defaultProj := argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: test.FakeArgoCDNamespace,
},
Spec: argoappv1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []argoappv1.ApplicationDestination{
{
Server: "*",
Namespace: "*",
},
},
},
}
app := newFakeApp()
app.Spec.Project = ""
app.Spec.Source.Kustomize = &argoappv1.ApplicationSourceKustomize{NamePrefix: "foo-"}
data := fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
{
// Verify we normalize the app because project is missing
ctrl := newFakeController(&data)
key, _ := cache.MetaNamespaceKeyFunc(app)
ctrl.appRefreshQueue.Add(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"}}` {
normalized = true
}
}
return true, nil, nil
})
ctrl.processAppRefreshQueueItem()
assert.True(t, normalized)
}
{
// Verify we don't unnecessarily normalize app when project is set
app.Spec.Project = "default"
data.apps[0] = app
ctrl := newFakeController(&data)
key, _ := cache.MetaNamespaceKeyFunc(app)
ctrl.appRefreshQueue.Add(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"}}` {
normalized = true
}
}
return true, nil, nil
})
ctrl.processAppRefreshQueueItem()
assert.False(t, normalized)
}
}
func TestHandleAppUpdated(t *testing.T) {
app := newFakeApp()
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
app.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
ctrl.handleObjectUpdated(map[string]bool{app.Name: true}, kube.GetObjectRef(kube.MustToUnstructured(app)))
isRequested, level := ctrl.isRefreshRequested(app.Name)
assert.False(t, isRequested)
assert.Equal(t, ComparisonWithNothing, level)
ctrl.handleObjectUpdated(map[string]bool{app.Name: true}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: "default"})
isRequested, level = ctrl.isRefreshRequested(app.Name)
assert.True(t, isRequested)
assert.Equal(t, CompareWithRecent, level)
}
func TestHandleOrphanedResourceUpdated(t *testing.T) {
app1 := newFakeApp()
app1.Name = "app1"
app1.Spec.Destination.Namespace = test.FakeArgoCDNamespace
app1.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr
app2 := newFakeApp()
app2.Name = "app2"
app2.Spec.Destination.Namespace = test.FakeArgoCDNamespace
app2.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app1, app2, proj}})
ctrl.handleObjectUpdated(map[string]bool{}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: test.FakeArgoCDNamespace})
isRequested, level := ctrl.isRefreshRequested(app1.Name)
assert.True(t, isRequested)
assert.Equal(t, ComparisonWithNothing, level)
isRequested, level = ctrl.isRefreshRequested(app2.Name)
assert.True(t, isRequested)
assert.Equal(t, ComparisonWithNothing, level)
}
func TestSetOperationStateOnDeletedApp(t *testing.T) {
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
patched := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, nil, apierr.NewNotFound(schema.GroupResource{}, "my-app")
})
ctrl.setOperationState(newFakeApp(), &argoappv1.OperationState{Phase: argoappv1.OperationSucceeded})
assert.True(t, patched)
}
func TestNeedRefreshAppStatus(t *testing.T) {
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
app := newFakeApp()
now := metav1.Now()
app.Status.ReconciledAt = &now
app.Status.Sync = argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeSynced,
ComparedTo: argoappv1.ComparedTo{
Source: app.Spec.Source,
Destination: app.Spec.Destination,
},
}
// no need to refresh just reconciled application
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour)
assert.False(t, needRefresh)
// refresh app using the 'deepest' requested comparison level
ctrl.requestAppRefresh(app.Name, CompareWithRecent)
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing)
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
assert.Equal(t, CompareWithRecent, compareWith)
// refresh application which status is not reconciled using latest commit
app.Status.Sync = argoappv1.SyncStatus{Status: argoappv1.SyncStatusCodeUnknown}
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
{
// refresh app using the 'latest' level if comparison expired
app := app.DeepCopy()
ctrl.requestAppRefresh(app.Name, CompareWithRecent)
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
app.Status.ReconciledAt = &reconciledAt
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
}
{
app := app.DeepCopy()
// execute hard refresh if app has refresh annotation
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
app.Status.ReconciledAt = &reconciledAt
app.Annotations = map[string]string{
common.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
}
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
}
{
app := app.DeepCopy()
// ensure that CompareWithLatest level is used if application source has changed
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing)
// sample app source change
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
Parameters: []argoappv1.HelmParameter{{
Name: "foo",
Value: "bar",
}},
}
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour)
assert.True(t, needRefresh)
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
assert.Equal(t, CompareWithLatest, compareWith)
}
}
func TestRefreshAppConditions(t *testing.T) {
defaultProj := argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: test.FakeArgoCDNamespace,
},
Spec: argoappv1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []argoappv1.ApplicationDestination{
{
Server: "*",
Namespace: "*",
},
},
},
}
t.Run("NoErrorConditions", func(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
hasErrors := ctrl.refreshAppConditions(app)
assert.False(t, hasErrors)
assert.Len(t, app.Status.Conditions, 0)
})
t.Run("PreserveExistingWarningCondition", func(t *testing.T) {
app := newFakeApp()
app.Status.SetConditions([]argoappv1.ApplicationCondition{{Type: argoappv1.ApplicationConditionExcludedResourceWarning}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
hasErrors := ctrl.refreshAppConditions(app)
assert.False(t, hasErrors)
assert.Len(t, app.Status.Conditions, 1)
assert.Equal(t, argoappv1.ApplicationConditionExcludedResourceWarning, app.Status.Conditions[0].Type)
})
t.Run("ReplacesSpecErrorCondition", func(t *testing.T) {
app := newFakeApp()
app.Spec.Project = "wrong project"
app.Status.SetConditions([]argoappv1.ApplicationCondition{{Type: argoappv1.ApplicationConditionInvalidSpecError, Message: "old message"}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
hasErrors := ctrl.refreshAppConditions(app)
assert.True(t, hasErrors)
assert.Len(t, app.Status.Conditions, 1)
assert.Equal(t, argoappv1.ApplicationConditionInvalidSpecError, app.Status.Conditions[0].Type)
assert.Equal(t, "Application referencing project wrong project which does not exist", app.Status.Conditions[0].Message)
})
}
func TestUpdateReconciledAt(t *testing.T) {
app := newFakeApp()
reconciledAt := metav1.NewTime(time.Now().Add(-1 * time.Second))
app.Status = argoappv1.ApplicationStatus{ReconciledAt: &reconciledAt}
app.Status.Sync = argoappv1.SyncStatus{ComparedTo: argoappv1.ComparedTo{Source: app.Spec.Source, Destination: app.Spec.Destination}}
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
})
key, _ := cache.MetaNamespaceKeyFunc(app)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
receivedPatch := map[string]interface{}{}
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, nil, nil
})
t.Run("UpdatedOnFullReconciliation", func(t *testing.T) {
receivedPatch = map[string]interface{}{}
ctrl.requestAppRefresh(app.Name, CompareWithLatest)
ctrl.appRefreshQueue.Add(key)
ctrl.processAppRefreshQueueItem()
_, updated, err := unstructured.NestedString(receivedPatch, "status", "reconciledAt")
assert.NoError(t, err)
assert.True(t, updated)
_, updated, err = unstructured.NestedString(receivedPatch, "status", "observedAt")
assert.NoError(t, err)
assert.True(t, updated)
})
t.Run("NotUpdatedOnPartialReconciliation", func(t *testing.T) {
receivedPatch = map[string]interface{}{}
ctrl.appRefreshQueue.Add(key)
ctrl.requestAppRefresh(app.Name, CompareWithRecent)
ctrl.processAppRefreshQueueItem()
_, updated, err := unstructured.NestedString(receivedPatch, "status", "reconciledAt")
assert.NoError(t, err)
assert.False(t, updated)
_, updated, err = unstructured.NestedString(receivedPatch, "status", "observedAt")
assert.NoError(t, err)
assert.True(t, updated)
})
}

View File

@@ -1,285 +0,0 @@
package cache
import (
"context"
"reflect"
"sync"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd/controller/metrics"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
type cacheSettings struct {
ResourceOverrides map[string]appv1.ResourceOverride
AppInstanceLabelKey string
ResourcesFilter *settings.ResourcesFilter
}
type LiveStateCache interface {
// Returns k8s server version
GetServerVersion(serverURL string) (string, error)
// Returns true of given group kind is a namespaced resource
IsNamespaced(server string, gk schema.GroupKind) (bool, error)
// Executes give callback against resource specified by the key and all its children
IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error
// Returns state of live nodes which correspond for target nodes of specified application.
GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
// Returns all top level resources (resources without owner references) of a specified namespace
GetNamespaceTopLevelResources(server string, namespace string) (map[kube.ResourceKey]appv1.ResourceNode, error)
// Starts watching resources of each controlled cluster.
Run(ctx context.Context) error
// Invalidate invalidates the entire cluster state cache
Invalidate()
}
type ObjectUpdatedHandler = func(managedByApp map[string]bool, ref v1.ObjectReference)
func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isNamespaced bool) kube.ResourceKey {
key := kube.GetResourceKey(un)
if !isNamespaced {
key.Namespace = ""
} else if isNamespaced && key.Namespace == "" {
key.Namespace = a.Spec.Destination.Namespace
}
return key
}
func NewLiveStateCache(
db db.ArgoDB,
appInformer cache.SharedIndexInformer,
settingsMgr *settings.SettingsManager,
kubectl kube.Kubectl,
metricsServer *metrics.MetricsServer,
onObjectUpdated ObjectUpdatedHandler) LiveStateCache {
return &liveStateCache{
appInformer: appInformer,
db: db,
clusters: make(map[string]*clusterInfo),
lock: &sync.Mutex{},
onObjectUpdated: onObjectUpdated,
kubectl: kubectl,
settingsMgr: settingsMgr,
metricsServer: metricsServer,
cacheSettingsLock: &sync.Mutex{},
}
}
type liveStateCache struct {
db db.ArgoDB
clusters map[string]*clusterInfo
lock *sync.Mutex
appInformer cache.SharedIndexInformer
onObjectUpdated ObjectUpdatedHandler
kubectl kube.Kubectl
settingsMgr *settings.SettingsManager
metricsServer *metrics.MetricsServer
cacheSettingsLock *sync.Mutex
cacheSettings *cacheSettings
}
func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
appInstanceLabelKey, err := c.settingsMgr.GetAppInstanceLabelKey()
if err != nil {
return nil, err
}
resourcesFilter, err := c.settingsMgr.GetResourcesFilter()
if err != nil {
return nil, err
}
resourceOverrides, err := c.settingsMgr.GetResourceOverrides()
if err != nil {
return nil, err
}
return &cacheSettings{AppInstanceLabelKey: appInstanceLabelKey, ResourceOverrides: resourceOverrides, ResourcesFilter: resourcesFilter}, nil
}
func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
c.lock.Lock()
defer c.lock.Unlock()
info, ok := c.clusters[server]
if !ok {
cluster, err := c.db.GetCluster(context.Background(), server)
if err != nil {
return nil, err
}
info = &clusterInfo{
apisMeta: make(map[schema.GroupKind]*apiMeta),
lock: &sync.Mutex{},
nodes: make(map[kube.ResourceKey]*node),
nsIndex: make(map[string]map[kube.ResourceKey]*node),
onObjectUpdated: c.onObjectUpdated,
kubectl: c.kubectl,
cluster: cluster,
syncTime: nil,
syncLock: &sync.Mutex{},
log: log.WithField("server", cluster.Server),
cacheSettingsSrc: c.getCacheSettings,
}
c.clusters[cluster.Server] = info
}
return info, nil
}
func (c *liveStateCache) getSyncedCluster(server string) (*clusterInfo, error) {
info, err := c.getCluster(server)
if err != nil {
return nil, err
}
err = info.ensureSynced()
if err != nil {
return nil, err
}
return info, nil
}
func (c *liveStateCache) Invalidate() {
log.Info("invalidating live state cache")
c.lock.Lock()
defer c.lock.Unlock()
for _, clust := range c.clusters {
clust.lock.Lock()
clust.invalidate()
clust.lock.Unlock()
}
log.Info("live state cache invalidated")
}
func (c *liveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool, error) {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return false, err
}
return clusterInfo.isNamespaced(gk), nil
}
func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return err
}
clusterInfo.iterateHierarchy(key, action)
return nil
}
func (c *liveStateCache) GetNamespaceTopLevelResources(server string, namespace string) (map[kube.ResourceKey]appv1.ResourceNode, error) {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return nil, err
}
return clusterInfo.getNamespaceTopLevelResources(namespace), nil
}
func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server)
if err != nil {
return nil, err
}
return clusterInfo.getManagedLiveObjs(a, targetObjs, c.metricsServer)
}
func (c *liveStateCache) GetServerVersion(serverURL string) (string, error) {
clusterInfo, err := c.getSyncedCluster(serverURL)
if err != nil {
return "", err
}
return clusterInfo.serverVersion, nil
}
func isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool {
for _, obj := range apps {
if app, ok := obj.(*appv1.Application); ok && app.Spec.Destination.Server == cluster.Server {
return true
}
}
return false
}
func (c *liveStateCache) getCacheSettings() *cacheSettings {
c.cacheSettingsLock.Lock()
defer c.cacheSettingsLock.Unlock()
return c.cacheSettings
}
func (c *liveStateCache) watchSettings(ctx context.Context) {
updateCh := make(chan *settings.ArgoCDSettings, 1)
c.settingsMgr.Subscribe(updateCh)
done := false
for !done {
select {
case <-updateCh:
nextCacheSettings, err := c.loadCacheSettings()
if err != nil {
log.Warnf("Failed to read updated settings: %v", err)
continue
}
c.cacheSettingsLock.Lock()
needInvalidate := false
if !reflect.DeepEqual(c.cacheSettings, nextCacheSettings) {
c.cacheSettings = nextCacheSettings
needInvalidate = true
}
c.cacheSettingsLock.Unlock()
if needInvalidate {
c.Invalidate()
}
case <-ctx.Done():
done = true
}
}
log.Info("shutting down settings watch")
c.settingsMgr.Unsubscribe(updateCh)
close(updateCh)
}
// Run watches for resource changes annotated with application label on all registered clusters and schedule corresponding app refresh.
func (c *liveStateCache) Run(ctx context.Context) error {
cacheSettings, err := c.loadCacheSettings()
if err != nil {
return err
}
c.cacheSettings = cacheSettings
go c.watchSettings(ctx)
util.RetryUntilSucceed(func() error {
clusterEventCallback := func(event *db.ClusterEvent) {
c.lock.Lock()
defer c.lock.Unlock()
if cluster, ok := c.clusters[event.Cluster.Server]; ok {
if event.Type == watch.Deleted {
cluster.invalidate()
delete(c.clusters, event.Cluster.Server)
} else if event.Type == watch.Modified {
cluster.cluster = event.Cluster
cluster.invalidate()
}
} else if event.Type == watch.Added && isClusterHasApps(c.appInformer.GetStore().List(), event.Cluster) {
go func() {
// warm up cache for cluster with apps
_, _ = c.getSyncedCluster(event.Cluster.Server)
}()
}
}
return c.db.WatchClusters(ctx, clusterEventCallback)
}, "watch clusters", ctx, clusterRetryTimeout)
<-ctx.Done()
return nil
}

View File

@@ -1,27 +0,0 @@
package cache
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestGetServerVersion(t *testing.T) {
now := time.Now()
cache := &liveStateCache{
lock: &sync.Mutex{},
clusters: map[string]*clusterInfo{
"http://localhost": {
syncTime: &now,
syncLock: &sync.Mutex{},
lock: &sync.Mutex{},
serverVersion: "123",
},
}}
version, err := cache.GetServerVersion("http://localhost")
assert.NoError(t, err)
assert.Equal(t, "123", version)
}

View File

@@ -1,551 +0,0 @@
package cache
import (
"context"
"fmt"
"runtime/debug"
"sort"
"strings"
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
"github.com/argoproj/argo-cd/controller/metrics"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/health"
"github.com/argoproj/argo-cd/util/kube"
)
const (
clusterSyncTimeout = 24 * time.Hour
clusterRetryTimeout = 10 * time.Second
watchResourcesRetryTimeout = 1 * time.Second
)
type apiMeta struct {
namespaced bool
resourceVersion string
watchCancel context.CancelFunc
}
type clusterInfo struct {
syncLock *sync.Mutex
syncTime *time.Time
syncError error
apisMeta map[schema.GroupKind]*apiMeta
serverVersion string
lock *sync.Mutex
nodes map[kube.ResourceKey]*node
nsIndex map[string]map[kube.ResourceKey]*node
onObjectUpdated ObjectUpdatedHandler
kubectl kube.Kubectl
cluster *appv1.Cluster
log *log.Entry
cacheSettingsSrc func() *cacheSettings
}
func (c *clusterInfo) replaceResourceCache(gk schema.GroupKind, resourceVersion string, objs []unstructured.Unstructured) {
c.lock.Lock()
defer c.lock.Unlock()
info, ok := c.apisMeta[gk]
if ok {
objByKind := make(map[kube.ResourceKey]*unstructured.Unstructured)
for i := range objs {
objByKind[kube.GetResourceKey(&objs[i])] = &objs[i]
}
for i := range objs {
obj := &objs[i]
key := kube.GetResourceKey(&objs[i])
existingNode, exists := c.nodes[key]
c.onNodeUpdated(exists, existingNode, obj, key)
}
for key, existingNode := range c.nodes {
if key.Kind != gk.Kind || key.Group != gk.Group {
continue
}
if _, ok := objByKind[key]; !ok {
c.onNodeRemoved(key, existingNode)
}
}
info.resourceVersion = resourceVersion
}
}
func isServiceAccountTokenSecret(un *unstructured.Unstructured) (bool, metav1.OwnerReference) {
ref := metav1.OwnerReference{
APIVersion: "v1",
Kind: kube.ServiceAccountKind,
}
if un.GetKind() != kube.SecretKind || un.GroupVersionKind().Group != "" {
return false, ref
}
if typeVal, ok, err := unstructured.NestedString(un.Object, "type"); !ok || err != nil || typeVal != "kubernetes.io/service-account-token" {
return false, ref
}
annotations := un.GetAnnotations()
if annotations == nil {
return false, ref
}
id, okId := annotations["kubernetes.io/service-account.uid"]
name, okName := annotations["kubernetes.io/service-account.name"]
if okId && okName {
ref.Name = name
ref.UID = types.UID(id)
}
return ref.Name != "" && ref.UID != "", ref
}
func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLabel string) *node {
ownerRefs := un.GetOwnerReferences()
// Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed
if un.GroupVersionKind().Group == "" && un.GetKind() == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0 {
ownerRefs = append(ownerRefs, metav1.OwnerReference{
Name: un.GetName(),
Kind: kube.ServiceKind,
APIVersion: "v1",
})
}
// edge case. Consider auto-created service account tokens as a child of service account objects
if yes, ref := isServiceAccountTokenSecret(un); yes {
ownerRefs = append(ownerRefs, ref)
}
nodeInfo := &node{
resourceVersion: un.GetResourceVersion(),
ref: kube.GetObjectRef(un),
ownerRefs: ownerRefs,
}
populateNodeInfo(un, nodeInfo)
appName := kube.GetAppInstanceLabel(un, appInstanceLabel)
if len(ownerRefs) == 0 && appName != "" {
nodeInfo.appName = appName
nodeInfo.resource = un
}
nodeInfo.health, _ = health.GetResourceHealth(un, c.cacheSettingsSrc().ResourceOverrides)
return nodeInfo
}
func (c *clusterInfo) setNode(n *node) {
key := n.resourceKey()
c.nodes[key] = n
ns, ok := c.nsIndex[key.Namespace]
if !ok {
ns = make(map[kube.ResourceKey]*node)
c.nsIndex[key.Namespace] = ns
}
ns[key] = n
}
func (c *clusterInfo) removeNode(key kube.ResourceKey) {
delete(c.nodes, key)
if ns, ok := c.nsIndex[key.Namespace]; ok {
delete(ns, key)
if len(ns) == 0 {
delete(c.nsIndex, key.Namespace)
}
}
}
func (c *clusterInfo) invalidate() {
c.syncLock.Lock()
defer c.syncLock.Unlock()
c.syncTime = nil
for i := range c.apisMeta {
c.apisMeta[i].watchCancel()
}
c.apisMeta = nil
}
func (c *clusterInfo) synced() bool {
if c.syncTime == nil {
return false
}
if c.syncError != nil {
return time.Now().Before(c.syncTime.Add(clusterRetryTimeout))
}
return time.Now().Before(c.syncTime.Add(clusterSyncTimeout))
}
func (c *clusterInfo) stopWatching(gk schema.GroupKind) {
c.syncLock.Lock()
defer c.syncLock.Unlock()
if info, ok := c.apisMeta[gk]; ok {
info.watchCancel()
delete(c.apisMeta, gk)
c.replaceResourceCache(gk, "", []unstructured.Unstructured{})
log.Warnf("Stop watching %s not found on %s.", gk, c.cluster.Server)
}
}
// startMissingWatches lists supported cluster resources and start watching for changes unless watch is already running
func (c *clusterInfo) startMissingWatches() error {
apis, err := c.kubectl.GetAPIResources(c.cluster.RESTConfig(), c.cacheSettingsSrc().ResourcesFilter)
if err != nil {
return err
}
for i := range apis {
api := apis[i]
if _, ok := c.apisMeta[api.GroupKind]; !ok {
ctx, cancel := context.WithCancel(context.Background())
info := &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel}
c.apisMeta[api.GroupKind] = info
go c.watchEvents(ctx, api, info)
}
}
return nil
}
func runSynced(lock *sync.Mutex, action func() error) error {
lock.Lock()
defer lock.Unlock()
return action()
}
func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo, info *apiMeta) {
util.RetryUntilSucceed(func() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
}()
err = runSynced(c.syncLock, func() error {
if info.resourceVersion == "" {
list, err := api.Interface.List(metav1.ListOptions{})
if err != nil {
return err
}
c.replaceResourceCache(api.GroupKind, list.GetResourceVersion(), list.Items)
}
return nil
})
if err != nil {
return err
}
w, err := api.Interface.Watch(metav1.ListOptions{ResourceVersion: info.resourceVersion})
if errors.IsNotFound(err) {
c.stopWatching(api.GroupKind)
return nil
}
err = runSynced(c.syncLock, func() error {
if errors.IsGone(err) {
info.resourceVersion = ""
log.Warnf("Resource version of %s on %s is too old.", api.GroupKind, c.cluster.Server)
}
return err
})
if err != nil {
return err
}
defer w.Stop()
for {
select {
case <-ctx.Done():
return nil
case event, ok := <-w.ResultChan():
if ok {
obj := event.Object.(*unstructured.Unstructured)
info.resourceVersion = obj.GetResourceVersion()
c.processEvent(event.Type, obj)
if kube.IsCRD(obj) {
if event.Type == watch.Deleted {
group, groupOk, groupErr := unstructured.NestedString(obj.Object, "spec", "group")
kind, kindOk, kindErr := unstructured.NestedString(obj.Object, "spec", "names", "kind")
if groupOk && groupErr == nil && kindOk && kindErr == nil {
gk := schema.GroupKind{Group: group, Kind: kind}
c.stopWatching(gk)
}
} else {
err = runSynced(c.syncLock, func() error {
return c.startMissingWatches()
})
}
}
if err != nil {
log.Warnf("Failed to start missing watch: %v", err)
}
} else {
return fmt.Errorf("Watch %s on %s has closed", api.GroupKind, c.cluster.Server)
}
}
}
}, fmt.Sprintf("watch %s on %s", api.GroupKind, c.cluster.Server), ctx, watchResourcesRetryTimeout)
}
func (c *clusterInfo) sync() (err error) {
c.log.Info("Start syncing cluster")
for i := range c.apisMeta {
c.apisMeta[i].watchCancel()
}
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
c.nodes = make(map[kube.ResourceKey]*node)
config := c.cluster.RESTConfig()
version, err := c.kubectl.GetServerVersion(config)
if err != nil {
return err
}
c.serverVersion = version
apis, err := c.kubectl.GetAPIResources(config, c.cacheSettingsSrc().ResourcesFilter)
if err != nil {
return err
}
lock := sync.Mutex{}
err = util.RunAllAsync(len(apis), func(i int) error {
api := apis[i]
list, err := api.Interface.List(metav1.ListOptions{})
if err != nil {
return err
}
lock.Lock()
for i := range list.Items {
c.setNode(c.createObjInfo(&list.Items[i], c.cacheSettingsSrc().AppInstanceLabelKey))
}
lock.Unlock()
return nil
})
if err == nil {
err = c.startMissingWatches()
}
if err != nil {
log.Errorf("Failed to sync cluster %s: %v", c.cluster.Server, err)
return err
}
c.log.Info("Cluster successfully synced")
return nil
}
func (c *clusterInfo) ensureSynced() error {
c.syncLock.Lock()
defer c.syncLock.Unlock()
if c.synced() {
return c.syncError
}
err := c.sync()
syncTime := time.Now()
c.syncTime = &syncTime
c.syncError = err
return c.syncError
}
func (c *clusterInfo) getNamespaceTopLevelResources(namespace string) map[kube.ResourceKey]appv1.ResourceNode {
c.lock.Lock()
defer c.lock.Unlock()
nodes := make(map[kube.ResourceKey]appv1.ResourceNode)
for _, node := range c.nsIndex[namespace] {
if len(node.ownerRefs) == 0 {
nodes[node.resourceKey()] = node.asResourceNode()
}
}
return nodes
}
func (c *clusterInfo) iterateHierarchy(key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) {
c.lock.Lock()
defer c.lock.Unlock()
if objInfo, ok := c.nodes[key]; ok {
nsNodes := c.nsIndex[key.Namespace]
action(objInfo.asResourceNode(), objInfo.getApp(nsNodes))
childrenByUID := make(map[types.UID][]*node)
for _, child := range nsNodes {
if objInfo.isParentOf(child) {
childrenByUID[child.ref.UID] = append(childrenByUID[child.ref.UID], child)
}
}
// make sure children has no duplicates
for _, children := range childrenByUID {
if len(children) > 0 {
// The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group). It is ok to pick any object but we need to make sure
// we pick the same child after every refresh.
sort.Slice(children, func(i, j int) bool {
key1 := children[i].resourceKey()
key2 := children[j].resourceKey()
return strings.Compare(key1.String(), key2.String()) < 0
})
child := children[0]
action(child.asResourceNode(), child.getApp(nsNodes))
child.iterateChildren(nsNodes, map[kube.ResourceKey]bool{objInfo.resourceKey(): true}, action)
}
}
}
}
func (c *clusterInfo) isNamespaced(gk schema.GroupKind) bool {
if api, ok := c.apisMeta[gk]; ok && !api.namespaced {
return false
}
return true
}
func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured, metricsServer *metrics.MetricsServer) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
c.lock.Lock()
defer c.lock.Unlock()
managedObjs := make(map[kube.ResourceKey]*unstructured.Unstructured)
// iterate all objects in live state cache to find ones associated with app
for key, o := range c.nodes {
if o.appName == a.Name && o.resource != nil && len(o.ownerRefs) == 0 {
managedObjs[key] = o.resource
}
}
config := metrics.AddMetricsTransportWrapper(metricsServer, a, c.cluster.RESTConfig())
// iterate target objects and identify ones that already exist in the cluster,\
// but are simply missing our label
lock := &sync.Mutex{}
err := util.RunAllAsync(len(targetObjs), func(i int) error {
targetObj := targetObjs[i]
key := GetTargetObjKey(a, targetObj, c.isNamespaced(targetObj.GroupVersionKind().GroupKind()))
lock.Lock()
managedObj := managedObjs[key]
lock.Unlock()
if managedObj == nil {
if existingObj, exists := c.nodes[key]; exists {
if existingObj.resource != nil {
managedObj = existingObj.resource
} else {
var err error
managedObj, err = c.kubectl.GetResource(config, targetObj.GroupVersionKind(), existingObj.ref.Name, existingObj.ref.Namespace)
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
}
} else if _, watched := c.apisMeta[key.GroupKind()]; !watched {
var err error
managedObj, err = c.kubectl.GetResource(config, targetObj.GroupVersionKind(), targetObj.GetName(), targetObj.GetNamespace())
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
}
}
if managedObj != nil {
converted, err := c.kubectl.ConvertToVersion(managedObj, targetObj.GroupVersionKind().Group, targetObj.GroupVersionKind().Version)
if err != nil {
// fallback to loading resource from kubernetes if conversion fails
log.Warnf("Failed to convert resource: %v", err)
managedObj, err = c.kubectl.GetResource(config, targetObj.GroupVersionKind(), managedObj.GetName(), managedObj.GetNamespace())
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
} else {
managedObj = converted
}
lock.Lock()
managedObjs[key] = managedObj
lock.Unlock()
}
return nil
})
if err != nil {
return nil, err
}
return managedObjs, nil
}
func (c *clusterInfo) processEvent(event watch.EventType, un *unstructured.Unstructured) {
c.lock.Lock()
defer c.lock.Unlock()
key := kube.GetResourceKey(un)
existingNode, exists := c.nodes[key]
if event == watch.Deleted {
if exists {
c.onNodeRemoved(key, existingNode)
}
} else if event != watch.Deleted {
c.onNodeUpdated(exists, existingNode, un, key)
}
}
func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstructured.Unstructured, key kube.ResourceKey) {
nodes := make([]*node, 0)
if exists {
nodes = append(nodes, existingNode)
}
newObj := c.createObjInfo(un, c.cacheSettingsSrc().AppInstanceLabelKey)
c.setNode(newObj)
nodes = append(nodes, newObj)
toNotify := make(map[string]bool)
for i := range nodes {
n := nodes[i]
if ns, ok := c.nsIndex[n.ref.Namespace]; ok {
app := n.getApp(ns)
if app == "" || skipAppRequeing(key) {
continue
}
toNotify[app] = n.isRootAppNode() || toNotify[app]
}
}
c.onObjectUpdated(toNotify, newObj.ref)
}
func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) {
appName := n.appName
if ns, ok := c.nsIndex[key.Namespace]; ok {
appName = n.getApp(ns)
}
c.removeNode(key)
managedByApp := make(map[string]bool)
if appName != "" {
managedByApp[appName] = n.isRootAppNode()
}
c.onObjectUpdated(managedByApp, n.ref)
}
var (
ignoredRefreshResources = map[string]bool{
"/" + kube.EndpointsKind: true,
}
)
// skipAppRequeing checks if the object is an API type which we want to skip requeuing against.
// We ignore API types which have a high churn rate, and/or whose updates are irrelevant to the app
func skipAppRequeing(key kube.ResourceKey) bool {
return ignoredRefreshResources[key.Group+"/"+key.Kind]
}

View File

@@ -1,488 +0,0 @@
package cache
import (
"fmt"
"sort"
"strings"
"sync"
"testing"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic/fake"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/kube/kubetest"
)
func strToUnstructured(jsonStr string) *unstructured.Unstructured {
obj := make(map[string]interface{})
err := yaml.Unmarshal([]byte(jsonStr), &obj)
errors.CheckError(err)
return &unstructured.Unstructured{Object: obj}
}
func mustToUnstructured(obj interface{}) *unstructured.Unstructured {
un, err := kube.ToUnstructured(obj)
errors.CheckError(err)
return un
}
var (
testPod = strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
uid: "1"
name: helm-guestbook-pod
namespace: default
ownerReferences:
- apiVersion: apps/v1
kind: ReplicaSet
name: helm-guestbook-rs
uid: "2"
resourceVersion: "123"`)
testRS = strToUnstructured(`
apiVersion: apps/v1
kind: ReplicaSet
metadata:
uid: "2"
name: helm-guestbook-rs
namespace: default
annotations:
deployment.kubernetes.io/revision: "2"
ownerReferences:
- apiVersion: apps/v1beta1
kind: Deployment
name: helm-guestbook
uid: "3"
resourceVersion: "123"`)
testDeploy = strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: helm-guestbook
uid: "3"
name: helm-guestbook
namespace: default
resourceVersion: "123"`)
testService = strToUnstructured(`
apiVersion: v1
kind: Service
metadata:
name: helm-guestbook
namespace: default
resourceVersion: "123"
uid: "4"
spec:
selector:
app: guestbook
type: LoadBalancer
status:
loadBalancer:
ingress:
- hostname: localhost`)
testIngress = strToUnstructured(`
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: helm-guestbook
namespace: default
uid: "4"
spec:
backend:
serviceName: not-found-service
servicePort: 443
rules:
- host: helm-guestbook.com
http:
paths:
- backend:
serviceName: helm-guestbook
servicePort: 443
path: /
- backend:
serviceName: helm-guestbook
servicePort: https
path: /
status:
loadBalancer:
ingress:
- ip: 107.178.210.11`)
)
func newCluster(objs ...*unstructured.Unstructured) *clusterInfo {
runtimeObjs := make([]runtime.Object, len(objs))
for i := range objs {
runtimeObjs[i] = objs[i]
}
scheme := runtime.NewScheme()
client := fake.NewSimpleDynamicClient(scheme, runtimeObjs...)
apiResources := []kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: "Pod"},
Interface: client.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}),
Meta: metav1.APIResource{Namespaced: true},
}, {
GroupKind: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"},
Interface: client.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}),
Meta: metav1.APIResource{Namespaced: true},
}, {
GroupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"},
Interface: client.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}),
Meta: metav1.APIResource{Namespaced: true},
}}
return newClusterExt(&kubetest.MockKubectlCmd{APIResources: apiResources})
}
func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
return &clusterInfo{
lock: &sync.Mutex{},
nodes: make(map[kube.ResourceKey]*node),
onObjectUpdated: func(managedByApp map[string]bool, reference corev1.ObjectReference) {},
kubectl: kubectl,
nsIndex: make(map[string]map[kube.ResourceKey]*node),
cluster: &appv1.Cluster{},
syncTime: nil,
syncLock: &sync.Mutex{},
apisMeta: make(map[schema.GroupKind]*apiMeta),
log: log.WithField("cluster", "test"),
cacheSettingsSrc: func() *cacheSettings {
return &cacheSettings{AppInstanceLabelKey: common.LabelKeyAppInstance}
},
}
}
func getChildren(cluster *clusterInfo, un *unstructured.Unstructured) []appv1.ResourceNode {
hierarchy := make([]appv1.ResourceNode, 0)
cluster.iterateHierarchy(kube.GetResourceKey(un), func(child appv1.ResourceNode, app string) {
hierarchy = append(hierarchy, child)
})
return hierarchy[1:]
}
func TestGetNamespaceResources(t *testing.T) {
defaultNamespaceTopLevel1 := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata: {"name": "helm-guestbook1", "namespace": "default"}
`)
defaultNamespaceTopLevel2 := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata: {"name": "helm-guestbook2", "namespace": "default"}
`)
kubesystemNamespaceTopLevel2 := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata: {"name": "helm-guestbook3", "namespace": "kube-system"}
`)
cluster := newCluster(defaultNamespaceTopLevel1, defaultNamespaceTopLevel2, kubesystemNamespaceTopLevel2)
err := cluster.ensureSynced()
assert.Nil(t, err)
resources := cluster.getNamespaceTopLevelResources("default")
assert.Len(t, resources, 2)
assert.Equal(t, resources[kube.GetResourceKey(defaultNamespaceTopLevel1)].Name, "helm-guestbook1")
assert.Equal(t, resources[kube.GetResourceKey(defaultNamespaceTopLevel2)].Name, "helm-guestbook2")
resources = cluster.getNamespaceTopLevelResources("kube-system")
assert.Len(t, resources, 1)
assert.Equal(t, resources[kube.GetResourceKey(kubesystemNamespaceTopLevel2)].Name, "helm-guestbook3")
}
func TestGetChildren(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
rsChildren := getChildren(cluster, testRS)
assert.Equal(t, []appv1.ResourceNode{{
ResourceRef: appv1.ResourceRef{
Kind: "Pod",
Namespace: "default",
Name: "helm-guestbook-pod",
Group: "",
Version: "v1",
UID: "1",
},
ParentRefs: []appv1.ResourceRef{{
Group: "apps",
Version: "",
Kind: "ReplicaSet",
Namespace: "default",
Name: "helm-guestbook-rs",
UID: "2",
}},
Health: &appv1.HealthStatus{Status: appv1.HealthStatusUnknown},
NetworkingInfo: &appv1.ResourceNetworkingInfo{Labels: testPod.GetLabels()},
ResourceVersion: "123",
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
}}, rsChildren)
deployChildren := getChildren(cluster, testDeploy)
assert.Equal(t, append([]appv1.ResourceNode{{
ResourceRef: appv1.ResourceRef{
Kind: "ReplicaSet",
Namespace: "default",
Name: "helm-guestbook-rs",
Group: "apps",
Version: "v1",
UID: "2",
},
ResourceVersion: "123",
Health: &appv1.HealthStatus{Status: appv1.HealthStatusHealthy},
Info: []appv1.InfoItem{{Name: "Revision", Value: "Rev:2"}},
ParentRefs: []appv1.ResourceRef{{Group: "apps", Version: "", Kind: "Deployment", Namespace: "default", Name: "helm-guestbook", UID: "3"}},
}}, rsChildren...), deployChildren)
}
func TestGetManagedLiveObjs(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
targetDeploy := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata:
name: helm-guestbook
labels:
app: helm-guestbook`)
managedObjs, err := cluster.getManagedLiveObjs(&appv1.Application{
ObjectMeta: metav1.ObjectMeta{Name: "helm-guestbook"},
Spec: appv1.ApplicationSpec{
Destination: appv1.ApplicationDestination{
Namespace: "default",
},
},
}, []*unstructured.Unstructured{targetDeploy}, nil)
assert.Nil(t, err)
assert.Equal(t, managedObjs, map[kube.ResourceKey]*unstructured.Unstructured{
kube.NewResourceKey("apps", "Deployment", "default", "helm-guestbook"): testDeploy,
})
}
func TestChildDeletedEvent(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
cluster.processEvent(watch.Deleted, testPod)
rsChildren := getChildren(cluster, testRS)
assert.Equal(t, []appv1.ResourceNode{}, rsChildren)
}
func TestProcessNewChildEvent(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
newPod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
uid: "4"
name: helm-guestbook-pod2
namespace: default
ownerReferences:
- apiVersion: apps/v1
kind: ReplicaSet
name: helm-guestbook-rs
uid: "2"
resourceVersion: "123"`)
cluster.processEvent(watch.Added, newPod)
rsChildren := getChildren(cluster, testRS)
sort.Slice(rsChildren, func(i, j int) bool {
return strings.Compare(rsChildren[i].Name, rsChildren[j].Name) < 0
})
assert.Equal(t, []appv1.ResourceNode{{
ResourceRef: appv1.ResourceRef{
Kind: "Pod",
Namespace: "default",
Name: "helm-guestbook-pod",
Group: "",
Version: "v1",
UID: "1",
},
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Health: &appv1.HealthStatus{Status: appv1.HealthStatusUnknown},
NetworkingInfo: &appv1.ResourceNetworkingInfo{Labels: testPod.GetLabels()},
ParentRefs: []appv1.ResourceRef{{
Group: "apps",
Version: "",
Kind: "ReplicaSet",
Namespace: "default",
Name: "helm-guestbook-rs",
UID: "2",
}},
ResourceVersion: "123",
}, {
ResourceRef: appv1.ResourceRef{
Kind: "Pod",
Namespace: "default",
Name: "helm-guestbook-pod2",
Group: "",
Version: "v1",
UID: "4",
},
NetworkingInfo: &appv1.ResourceNetworkingInfo{Labels: testPod.GetLabels()},
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Health: &appv1.HealthStatus{Status: appv1.HealthStatusUnknown},
ParentRefs: []appv1.ResourceRef{{
Group: "apps",
Version: "",
Kind: "ReplicaSet",
Namespace: "default",
Name: "helm-guestbook-rs",
UID: "2",
}},
ResourceVersion: "123",
}}, rsChildren)
}
func TestUpdateResourceTags(t *testing.T) {
pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "default"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "test",
Image: "test",
}},
},
}
cluster := newCluster(mustToUnstructured(pod))
err := cluster.ensureSynced()
assert.Nil(t, err)
podNode := cluster.nodes[kube.GetResourceKey(mustToUnstructured(pod))]
assert.NotNil(t, podNode)
assert.Equal(t, []appv1.InfoItem{{Name: "Containers", Value: "0/1"}}, podNode.info)
pod.Status = corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: -1,
},
},
}},
}
cluster.processEvent(watch.Modified, mustToUnstructured(pod))
podNode = cluster.nodes[kube.GetResourceKey(mustToUnstructured(pod))]
assert.NotNil(t, podNode)
assert.Equal(t, []appv1.InfoItem{{Name: "Status Reason", Value: "ExitCode:-1"}, {Name: "Containers", Value: "0/1"}}, podNode.info)
}
func TestUpdateAppResource(t *testing.T) {
updatesReceived := make([]string, 0)
cluster := newCluster(testPod, testRS, testDeploy)
cluster.onObjectUpdated = func(managedByApp map[string]bool, _ corev1.ObjectReference) {
for appName, fullRefresh := range managedByApp {
updatesReceived = append(updatesReceived, fmt.Sprintf("%s: %v", appName, fullRefresh))
}
}
err := cluster.ensureSynced()
assert.Nil(t, err)
cluster.processEvent(watch.Modified, mustToUnstructured(testPod))
assert.Contains(t, updatesReceived, "helm-guestbook: false")
}
func TestCircularReference(t *testing.T) {
dep := testDeploy.DeepCopy()
dep.SetOwnerReferences([]metav1.OwnerReference{{
Name: testPod.GetName(),
Kind: testPod.GetKind(),
APIVersion: testPod.GetAPIVersion(),
}})
cluster := newCluster(testPod, testRS, dep)
err := cluster.ensureSynced()
assert.Nil(t, err)
children := getChildren(cluster, dep)
assert.Len(t, children, 2)
node := cluster.nodes[kube.GetResourceKey(dep)]
assert.NotNil(t, node)
app := node.getApp(cluster.nodes)
assert.Equal(t, "", app)
}
func TestWatchCacheUpdated(t *testing.T) {
removed := testPod.DeepCopy()
removed.SetName(testPod.GetName() + "-removed-pod")
updated := testPod.DeepCopy()
updated.SetName(testPod.GetName() + "-updated-pod")
updated.SetResourceVersion("updated-pod-version")
cluster := newCluster(removed, updated)
err := cluster.ensureSynced()
assert.Nil(t, err)
added := testPod.DeepCopy()
added.SetName(testPod.GetName() + "-new-pod")
podGroupKind := testPod.GroupVersionKind().GroupKind()
cluster.replaceResourceCache(podGroupKind, "updated-list-version", []unstructured.Unstructured{*updated, *added})
_, ok := cluster.nodes[kube.GetResourceKey(removed)]
assert.False(t, ok)
updatedNode, ok := cluster.nodes[kube.GetResourceKey(updated)]
assert.True(t, ok)
assert.Equal(t, updatedNode.resourceVersion, "updated-pod-version")
_, ok = cluster.nodes[kube.GetResourceKey(added)]
assert.True(t, ok)
}
func TestGetDuplicatedChildren(t *testing.T) {
extensionsRS := testRS.DeepCopy()
extensionsRS.SetGroupVersionKind(schema.GroupVersionKind{Group: "extensions", Kind: kube.ReplicaSetKind, Version: "v1beta1"})
cluster := newCluster(testDeploy, testRS, extensionsRS)
err := cluster.ensureSynced()
assert.Nil(t, err)
// Get children multiple times to make sure the right child is picked up every time.
for i := 0; i < 5; i++ {
children := getChildren(cluster, testDeploy)
assert.Len(t, children, 1)
assert.Equal(t, "apps", children[0].Group)
assert.Equal(t, kube.ReplicaSetKind, children[0].Kind)
assert.Equal(t, testRS.GetName(), children[0].Name)
}
}

View File

@@ -1,257 +0,0 @@
package cache
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
k8snode "k8s.io/kubernetes/pkg/util/node"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/resource"
)
func populateNodeInfo(un *unstructured.Unstructured, node *node) {
gvk := un.GroupVersionKind()
revision := resource.GetRevision(un)
if revision > 0 {
node.info = append(node.info, v1alpha1.InfoItem{Name: "Revision", Value: fmt.Sprintf("Rev:%v", revision)})
}
switch gvk.Group {
case "":
switch gvk.Kind {
case kube.PodKind:
populatePodInfo(un, node)
return
case kube.ServiceKind:
populateServiceInfo(un, node)
return
}
case "extensions", "networking.k8s.io":
switch gvk.Kind {
case kube.IngressKind:
populateIngressInfo(un, node)
return
}
}
}
func getIngress(un *unstructured.Unstructured) []v1.LoadBalancerIngress {
ingress, ok, err := unstructured.NestedSlice(un.Object, "status", "loadBalancer", "ingress")
if !ok || err != nil {
return nil
}
res := make([]v1.LoadBalancerIngress, 0)
for _, item := range ingress {
if lbIngress, ok := item.(map[string]interface{}); ok {
if hostname := lbIngress["hostname"]; hostname != nil {
res = append(res, v1.LoadBalancerIngress{Hostname: fmt.Sprintf("%s", hostname)})
} else if ip := lbIngress["ip"]; ip != nil {
res = append(res, v1.LoadBalancerIngress{IP: fmt.Sprintf("%s", ip)})
}
}
}
return res
}
func populateServiceInfo(un *unstructured.Unstructured, node *node) {
targetLabels, _, _ := unstructured.NestedStringMap(un.Object, "spec", "selector")
ingress := make([]v1.LoadBalancerIngress, 0)
if serviceType, ok, err := unstructured.NestedString(un.Object, "spec", "type"); ok && err == nil && serviceType == string(v1.ServiceTypeLoadBalancer) {
ingress = getIngress(un)
}
node.networkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress}
}
func populateIngressInfo(un *unstructured.Unstructured, node *node) {
ingress := getIngress(un)
targetsMap := make(map[v1alpha1.ResourceRef]bool)
if backend, ok, err := unstructured.NestedMap(un.Object, "spec", "backend"); ok && err == nil {
targetsMap[v1alpha1.ResourceRef{
Group: "",
Kind: kube.ServiceKind,
Namespace: un.GetNamespace(),
Name: fmt.Sprintf("%s", backend["serviceName"]),
}] = true
}
urlsSet := make(map[string]bool)
if rules, ok, err := unstructured.NestedSlice(un.Object, "spec", "rules"); ok && err == nil {
for i := range rules {
rule, ok := rules[i].(map[string]interface{})
if !ok {
continue
}
host := rule["host"]
if host == nil || host == "" {
for i := range ingress {
host = util.FirstNonEmpty(ingress[i].Hostname, ingress[i].IP)
if host != "" {
break
}
}
}
paths, ok, err := unstructured.NestedSlice(rule, "http", "paths")
if !ok || err != nil {
continue
}
for i := range paths {
path, ok := paths[i].(map[string]interface{})
if !ok {
continue
}
if serviceName, ok, err := unstructured.NestedString(path, "backend", "serviceName"); ok && err == nil {
targetsMap[v1alpha1.ResourceRef{
Group: "",
Kind: kube.ServiceKind,
Namespace: un.GetNamespace(),
Name: serviceName,
}] = true
}
if port, ok, err := unstructured.NestedFieldNoCopy(path, "backend", "servicePort"); ok && err == nil && host != "" && host != nil {
stringPort := ""
switch typedPod := port.(type) {
case int64:
stringPort = fmt.Sprintf("%d", typedPod)
case float64:
stringPort = fmt.Sprintf("%d", int64(typedPod))
case string:
stringPort = typedPod
default:
stringPort = fmt.Sprintf("%v", port)
}
var externalURL string
switch stringPort {
case "80", "http":
externalURL = fmt.Sprintf("http://%s", host)
case "443", "https":
externalURL = fmt.Sprintf("https://%s", host)
default:
externalURL = fmt.Sprintf("http://%s:%s", host, stringPort)
}
subPath := ""
if nestedPath, ok, err := unstructured.NestedString(path, "path"); ok && err == nil {
subPath = nestedPath
}
externalURL += subPath
urlsSet[externalURL] = true
}
}
}
}
targets := make([]v1alpha1.ResourceRef, 0)
for target := range targetsMap {
targets = append(targets, target)
}
urls := make([]string, 0)
for url := range urlsSet {
urls = append(urls, url)
}
node.networkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, Ingress: ingress, ExternalURLs: urls}
}
func populatePodInfo(un *unstructured.Unstructured, node *node) {
pod := v1.Pod{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
if err != nil {
return
}
restarts := 0
totalContainers := len(pod.Spec.Containers)
readyContainers := 0
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
imagesSet := make(map[string]bool)
for _, container := range pod.Spec.InitContainers {
imagesSet[container.Image] = true
}
for _, container := range pod.Spec.Containers {
imagesSet[container.Image] = true
}
node.images = nil
for image := range imagesSet {
node.images = append(node.images, image)
}
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
restarts += int(container.RestartCount)
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
}
} else {
reason = "Init:" + container.State.Terminated.Reason
}
initializing = true
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
reason = "Init:" + container.State.Waiting.Reason
initializing = true
default:
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
initializing = true
}
break
}
if !initializing {
restarts = 0
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
restarts += int(container.RestartCount)
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
reason = container.State.Waiting.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
reason = container.State.Terminated.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
}
} else if container.Ready && container.State.Running != nil {
hasRunning = true
readyContainers++
}
}
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
if reason == "Completed" && hasRunning {
reason = "Running"
}
}
if pod.DeletionTimestamp != nil && pod.Status.Reason == k8snode.NodeUnreachablePodReason {
reason = "Unknown"
} else if pod.DeletionTimestamp != nil {
reason = "Terminating"
}
if reason != "" {
node.info = append(node.info, v1alpha1.InfoItem{Name: "Status Reason", Value: reason})
}
node.info = append(node.info, v1alpha1.InfoItem{Name: "Containers", Value: fmt.Sprintf("%d/%d", readyContainers, totalContainers)})
node.networkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels()}
}

View File

@@ -1,222 +0,0 @@
package cache
import (
"sort"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"github.com/stretchr/testify/assert"
)
func TestGetPodInfo(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: helm-guestbook-pod
namespace: default
ownerReferences:
- apiVersion: extensions/v1beta1
kind: ReplicaSet
name: helm-guestbook-rs
resourceVersion: "123"
labels:
app: guestbook
spec:
containers:
- image: bar`)
node := &node{}
populateNodeInfo(pod, node)
assert.Equal(t, []v1alpha1.InfoItem{{Name: "Containers", Value: "0/1"}}, node.info)
assert.Equal(t, []string{"bar"}, node.images)
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, node.networkingInfo)
}
func TestGetServiceInfo(t *testing.T) {
node := &node{}
populateNodeInfo(testService, node)
assert.Equal(t, 0, len(node.info))
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
TargetLabels: map[string]string{"app": "guestbook"},
Ingress: []v1.LoadBalancerIngress{{Hostname: "localhost"}},
}, node.networkingInfo)
}
func TestGetIngressInfo(t *testing.T) {
node := &node{}
populateNodeInfo(testIngress, node)
assert.Equal(t, 0, len(node.info))
sort.Slice(node.networkingInfo.TargetRefs, func(i, j int) bool {
return strings.Compare(node.networkingInfo.TargetRefs[j].Name, node.networkingInfo.TargetRefs[i].Name) < 0
})
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}},
TargetRefs: []v1alpha1.ResourceRef{{
Namespace: "default",
Group: "",
Kind: kube.ServiceKind,
Name: "not-found-service",
}, {
Namespace: "default",
Group: "",
Kind: kube.ServiceKind,
Name: "helm-guestbook",
}},
ExternalURLs: []string{"https://helm-guestbook.com/"},
}, node.networkingInfo)
}
func TestGetIngressInfoNoHost(t *testing.T) {
ingress := strToUnstructured(`
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: helm-guestbook
namespace: default
spec:
rules:
- http:
paths:
- backend:
serviceName: helm-guestbook
servicePort: 443
path: /
status:
loadBalancer:
ingress:
- ip: 107.178.210.11`)
node := &node{}
populateNodeInfo(ingress, node)
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}},
TargetRefs: []v1alpha1.ResourceRef{{
Namespace: "default",
Group: "",
Kind: kube.ServiceKind,
Name: "helm-guestbook",
}},
ExternalURLs: []string{"https://107.178.210.11/"},
}, node.networkingInfo)
}
func TestExternalUrlWithSubPath(t *testing.T) {
ingress := strToUnstructured(`
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: helm-guestbook
namespace: default
spec:
rules:
- http:
paths:
- backend:
serviceName: helm-guestbook
servicePort: 443
path: /my/sub/path/
status:
loadBalancer:
ingress:
- ip: 107.178.210.11`)
node := &node{}
populateNodeInfo(ingress, node)
expectedExternalUrls := []string{"https://107.178.210.11/my/sub/path/"}
assert.Equal(t, expectedExternalUrls, node.networkingInfo.ExternalURLs)
}
func TestExternalUrlWithMultipleSubPaths(t *testing.T) {
ingress := strToUnstructured(`
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: helm-guestbook
namespace: default
spec:
rules:
- host: helm-guestbook.com
http:
paths:
- backend:
serviceName: helm-guestbook
servicePort: 443
path: /my/sub/path/
- backend:
serviceName: helm-guestbook-2
servicePort: 443
path: /my/sub/path/2
- backend:
serviceName: helm-guestbook-3
servicePort: 443
status:
loadBalancer:
ingress:
- ip: 107.178.210.11`)
node := &node{}
populateNodeInfo(ingress, node)
expectedExternalUrls := []string{"https://helm-guestbook.com/my/sub/path/", "https://helm-guestbook.com/my/sub/path/2", "https://helm-guestbook.com"}
actualURLs := node.networkingInfo.ExternalURLs
sort.Strings(expectedExternalUrls)
sort.Strings(actualURLs)
assert.Equal(t, expectedExternalUrls, actualURLs)
}
func TestExternalUrlWithNoSubPath(t *testing.T) {
ingress := strToUnstructured(`
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: helm-guestbook
namespace: default
spec:
rules:
- http:
paths:
- backend:
serviceName: helm-guestbook
servicePort: 443
status:
loadBalancer:
ingress:
- ip: 107.178.210.11`)
node := &node{}
populateNodeInfo(ingress, node)
expectedExternalUrls := []string{"https://107.178.210.11"}
assert.Equal(t, expectedExternalUrls, node.networkingInfo.ExternalURLs)
}
func TestExternalUrlWithNetworkingApi(t *testing.T) {
ingress := strToUnstructured(`
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: helm-guestbook
namespace: default
spec:
rules:
- http:
paths:
- backend:
serviceName: helm-guestbook
servicePort: 443
status:
loadBalancer:
ingress:
- ip: 107.178.210.11`)
node := &node{}
populateNodeInfo(ingress, node)
expectedExternalUrls := []string{"https://107.178.210.11"}
assert.Equal(t, expectedExternalUrls, node.networkingInfo.ExternalURLs)
}

View File

@@ -1,142 +0,0 @@
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import (
context "context"
kube "github.com/argoproj/argo-cd/util/kube"
mock "github.com/stretchr/testify/mock"
schema "k8s.io/apimachinery/pkg/runtime/schema"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
)
// LiveStateCache is an autogenerated mock type for the LiveStateCache type
type LiveStateCache struct {
mock.Mock
}
// GetManagedLiveObjs provides a mock function with given fields: a, targetObjs
func (_m *LiveStateCache) GetManagedLiveObjs(a *v1alpha1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
ret := _m.Called(a, targetObjs)
var r0 map[kube.ResourceKey]*unstructured.Unstructured
if rf, ok := ret.Get(0).(func(*v1alpha1.Application, []*unstructured.Unstructured) map[kube.ResourceKey]*unstructured.Unstructured); ok {
r0 = rf(a, targetObjs)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[kube.ResourceKey]*unstructured.Unstructured)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*v1alpha1.Application, []*unstructured.Unstructured) error); ok {
r1 = rf(a, targetObjs)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNamespaceTopLevelResources provides a mock function with given fields: server, namespace
func (_m *LiveStateCache) GetNamespaceTopLevelResources(server string, namespace string) (map[kube.ResourceKey]v1alpha1.ResourceNode, error) {
ret := _m.Called(server, namespace)
var r0 map[kube.ResourceKey]v1alpha1.ResourceNode
if rf, ok := ret.Get(0).(func(string, string) map[kube.ResourceKey]v1alpha1.ResourceNode); ok {
r0 = rf(server, namespace)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[kube.ResourceKey]v1alpha1.ResourceNode)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(server, namespace)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetServerVersion provides a mock function with given fields: server
func (_m *LiveStateCache) GetServerVersion(server string) (string, error) {
ret := _m.Called(server)
var r0 string
if rf, ok := ret.Get(0).(func(string) string); ok {
r0 = rf(server)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(server)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Invalidate provides a mock function with given fields:
func (_m *LiveStateCache) Invalidate() {
_m.Called()
}
// IsNamespaced provides a mock function with given fields: server, gk
func (_m *LiveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool, error) {
ret := _m.Called(server, gk)
var r0 bool
if rf, ok := ret.Get(0).(func(string, schema.GroupKind) bool); ok {
r0 = rf(server, gk)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, schema.GroupKind) error); ok {
r1 = rf(server, gk)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IterateHierarchy provides a mock function with given fields: server, key, action
func (_m *LiveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(v1alpha1.ResourceNode, string)) error {
ret := _m.Called(server, key, action)
var r0 error
if rf, ok := ret.Get(0).(func(string, kube.ResourceKey, func(v1alpha1.ResourceNode, string)) error); ok {
r0 = rf(server, key, action)
} else {
r0 = ret.Error(0)
}
return r0
}
// Run provides a mock function with given fields: ctx
func (_m *LiveStateCache) Run(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@@ -1,142 +0,0 @@
package cache
import (
log "github.com/sirupsen/logrus"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type node struct {
resourceVersion string
ref v1.ObjectReference
ownerRefs []metav1.OwnerReference
info []appv1.InfoItem
appName string
// available only for root application nodes
resource *unstructured.Unstructured
// networkingInfo are available only for known types involved into networking: Ingress, Service, Pod
networkingInfo *appv1.ResourceNetworkingInfo
images []string
health *appv1.HealthStatus
}
func (n *node) isRootAppNode() bool {
return n.appName != "" && len(n.ownerRefs) == 0
}
func (n *node) resourceKey() kube.ResourceKey {
return kube.NewResourceKey(n.ref.GroupVersionKind().Group, n.ref.Kind, n.ref.Namespace, n.ref.Name)
}
func (n *node) isParentOf(child *node) bool {
for i, ownerRef := range child.ownerRefs {
// backfill UID of inferred owner child references
if ownerRef.UID == "" && n.ref.Kind == ownerRef.Kind && n.ref.APIVersion == ownerRef.APIVersion && n.ref.Name == ownerRef.Name {
ownerRef.UID = n.ref.UID
child.ownerRefs[i] = ownerRef
return true
}
if n.ref.UID == ownerRef.UID {
return true
}
}
return false
}
func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion {
gv, err := schema.ParseGroupVersion(ownerRef.APIVersion)
if err != nil {
gv = schema.GroupVersion{}
}
return gv
}
func (n *node) getApp(ns map[kube.ResourceKey]*node) string {
return n.getAppRecursive(ns, map[kube.ResourceKey]bool{})
}
func (n *node) getAppRecursive(ns map[kube.ResourceKey]*node, visited map[kube.ResourceKey]bool) string {
if !visited[n.resourceKey()] {
visited[n.resourceKey()] = true
} else {
log.Warnf("Circular dependency detected: %v.", visited)
return n.appName
}
if n.appName != "" {
return n.appName
}
for _, ownerRef := range n.ownerRefs {
gv := ownerRefGV(ownerRef)
if parent, ok := ns[kube.NewResourceKey(gv.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name)]; ok {
app := parent.getAppRecursive(ns, visited)
if app != "" {
return app
}
}
}
return ""
}
func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey) map[kube.ResourceKey]bool {
newSet := make(map[kube.ResourceKey]bool)
for k, v := range set {
newSet[k] = v
}
for i := range keys {
newSet[keys[i]] = true
}
return newSet
}
func (n *node) asResourceNode() appv1.ResourceNode {
gv, err := schema.ParseGroupVersion(n.ref.APIVersion)
if err != nil {
gv = schema.GroupVersion{}
}
parentRefs := make([]appv1.ResourceRef, len(n.ownerRefs))
for _, ownerRef := range n.ownerRefs {
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
ownerKey := kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name)
parentRefs[0] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: n.ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)}
}
return appv1.ResourceNode{
ResourceRef: appv1.ResourceRef{
UID: string(n.ref.UID),
Name: n.ref.Name,
Group: gv.Group,
Version: gv.Version,
Kind: n.ref.Kind,
Namespace: n.ref.Namespace,
},
ParentRefs: parentRefs,
Info: n.info,
ResourceVersion: n.resourceVersion,
NetworkingInfo: n.networkingInfo,
Images: n.images,
Health: n.health,
}
}
func (n *node) iterateChildren(ns map[kube.ResourceKey]*node, parents map[kube.ResourceKey]bool, action func(child appv1.ResourceNode, appName string)) {
for childKey, child := range ns {
if n.isParentOf(ns[childKey]) {
if parents[childKey] {
key := n.resourceKey()
log.Warnf("Circular dependency detected. %s is child and parent of %s", childKey.String(), key.String())
} else {
action(child.asResourceNode(), child.getApp(ns))
child.iterateChildren(ns, newResourceKeySet(parents, n.resourceKey()), action)
}
}
}
}

View File

@@ -1,83 +0,0 @@
package cache
import (
"testing"
"github.com/argoproj/argo-cd/common"
"github.com/stretchr/testify/assert"
)
var c = &clusterInfo{cacheSettingsSrc: func() *cacheSettings {
return &cacheSettings{AppInstanceLabelKey: common.LabelKeyAppInstance}
}}
func TestIsParentOf(t *testing.T) {
child := c.createObjInfo(testPod, "")
parent := c.createObjInfo(testRS, "")
grandParent := c.createObjInfo(testDeploy, "")
assert.True(t, parent.isParentOf(child))
assert.False(t, grandParent.isParentOf(child))
}
func TestIsParentOfSameKindDifferentGroupAndUID(t *testing.T) {
rs := testRS.DeepCopy()
rs.SetAPIVersion("somecrd.io/v1")
rs.SetUID("123")
child := c.createObjInfo(testPod, "")
invalidParent := c.createObjInfo(rs, "")
assert.False(t, invalidParent.isParentOf(child))
}
func TestIsServiceParentOfEndPointWithTheSameName(t *testing.T) {
nonMatchingNameEndPoint := c.createObjInfo(strToUnstructured(`
apiVersion: v1
kind: Endpoints
metadata:
name: not-matching-name
namespace: default
`), "")
matchingNameEndPoint := c.createObjInfo(strToUnstructured(`
apiVersion: v1
kind: Endpoints
metadata:
name: helm-guestbook
namespace: default
`), "")
parent := c.createObjInfo(testService, "")
assert.True(t, parent.isParentOf(matchingNameEndPoint))
assert.Equal(t, parent.ref.UID, matchingNameEndPoint.ownerRefs[0].UID)
assert.False(t, parent.isParentOf(nonMatchingNameEndPoint))
}
func TestIsServiceAccoountParentOfSecret(t *testing.T) {
serviceAccount := c.createObjInfo(strToUnstructured(`
apiVersion: v1
kind: ServiceAccount
metadata:
name: default
namespace: default
uid: '123'
secrets:
- name: default-token-123
`), "")
tokenSecret := c.createObjInfo(strToUnstructured(`
apiVersion: v1
kind: Secret
metadata:
annotations:
kubernetes.io/service-account.name: default
kubernetes.io/service-account.uid: '123'
name: default-token-123
namespace: default
uid: '345'
type: kubernetes.io/service-account-token
`), "")
assert.True(t, serviceAccount.isParentOf(tokenSecret))
}

560
controller/controller.go Normal file
View File

@@ -0,0 +1,560 @@
package controller
import (
"context"
"encoding/json"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/argoproj/argo-cd/common"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
appinformers "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
watchResourcesRetryTimeout = 10 * time.Second
updateOperationStateTimeout = 1 * time.Second
)
// ApplicationController is the controller for application resources.
type ApplicationController struct {
namespace string
kubeClientset kubernetes.Interface
applicationClientset appclientset.Interface
appRefreshQueue workqueue.RateLimitingInterface
appOperationQueue workqueue.RateLimitingInterface
appInformer cache.SharedIndexInformer
appStateManager AppStateManager
appHealthManager AppHealthManager
statusRefreshTimeout time.Duration
db db.ArgoDB
forceRefreshApps map[string]bool
forceRefreshAppsMutex *sync.Mutex
}
type ApplicationControllerConfig struct {
InstanceID string
Namespace string
}
// NewApplicationController creates new instance of ApplicationController.
func NewApplicationController(
namespace string,
kubeClientset kubernetes.Interface,
applicationClientset appclientset.Interface,
db db.ArgoDB,
appStateManager AppStateManager,
appHealthManager AppHealthManager,
appResyncPeriod time.Duration,
config *ApplicationControllerConfig,
) *ApplicationController {
appRefreshQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
appOperationQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
return &ApplicationController{
namespace: namespace,
kubeClientset: kubeClientset,
applicationClientset: applicationClientset,
appRefreshQueue: appRefreshQueue,
appOperationQueue: appOperationQueue,
appStateManager: appStateManager,
appHealthManager: appHealthManager,
appInformer: newApplicationInformer(applicationClientset, appRefreshQueue, appOperationQueue, appResyncPeriod, config),
db: db,
statusRefreshTimeout: appResyncPeriod,
forceRefreshApps: make(map[string]bool),
forceRefreshAppsMutex: &sync.Mutex{},
}
}
// Run starts the Application CRD controller.
func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int, operationProcessors int) {
defer runtime.HandleCrash()
defer ctrl.appRefreshQueue.ShutDown()
go ctrl.appInformer.Run(ctx.Done())
go ctrl.watchAppsResources()
if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced) {
log.Error("Timed out waiting for caches to sync")
return
}
for i := 0; i < statusProcessors; i++ {
go wait.Until(func() {
for ctrl.processAppRefreshQueueItem() {
}
}, time.Second, ctx.Done())
}
for i := 0; i < operationProcessors; i++ {
go wait.Until(func() {
for ctrl.processAppOperationQueueItem() {
}
}, time.Second, ctx.Done())
}
<-ctx.Done()
}
func (ctrl *ApplicationController) forceAppRefresh(appName string) {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
ctrl.forceRefreshApps[appName] = true
}
func (ctrl *ApplicationController) isRefreshForced(appName string) bool {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
_, ok := ctrl.forceRefreshApps[appName]
if ok {
delete(ctrl.forceRefreshApps, appName)
}
return ok
}
// watchClusterResources watches for resource changes annotated with application label on specified cluster and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchClusterResources(ctx context.Context, item appv1.Cluster) {
config := item.RESTConfig()
retryUntilSucceed(func() error {
ch, err := kube.WatchResourcesWithLabel(ctx, config, "", common.LabelApplicationName)
if err != nil {
return err
}
for event := range ch {
eventObj := event.Object.(*unstructured.Unstructured)
objLabels := eventObj.GetLabels()
if objLabels == nil {
objLabels = make(map[string]string)
}
if appName, ok := objLabels[common.LabelApplicationName]; ok {
ctrl.forceAppRefresh(appName)
ctrl.appRefreshQueue.Add(ctrl.namespace + "/" + appName)
}
}
return fmt.Errorf("resource updates channel has closed")
}, fmt.Sprintf("watch app resources on %s", config.Host), ctx, watchResourcesRetryTimeout)
}
// watchAppsResources watches for resource changes annotated with application label on all registered clusters and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchAppsResources() {
watchingClusters := make(map[string]context.CancelFunc)
retryUntilSucceed(func() error {
return ctrl.db.WatchClusters(context.Background(), func(event *db.ClusterEvent) {
cancel, ok := watchingClusters[event.Cluster.Server]
if event.Type == watch.Deleted && ok {
cancel()
delete(watchingClusters, event.Cluster.Server)
} else if event.Type != watch.Deleted && !ok {
ctx, cancel := context.WithCancel(context.Background())
watchingClusters[event.Cluster.Server] = cancel
go ctrl.watchClusterResources(ctx, *event.Cluster)
}
})
}, "watch clusters", context.Background(), watchResourcesRetryTimeout)
<-context.Background().Done()
}
// retryUntilSucceed keep retrying given action with specified timeout until action succeed or specified context is done.
func retryUntilSucceed(action func() error, desc string, ctx context.Context, timeout time.Duration) {
ctxCompleted := false
go func() {
select {
case <-ctx.Done():
ctxCompleted = true
}
}()
for {
err := action()
if err == nil {
return
}
if err != nil {
if ctxCompleted {
log.Infof("Stop retrying %s", desc)
return
} else {
log.Warnf("Failed to %s: %v, retrying in %v", desc, err, timeout)
time.Sleep(timeout)
}
}
}
}
func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext bool) {
appKey, shutdown := ctrl.appOperationQueue.Get()
if shutdown {
processNext = false
return
} else {
processNext = true
}
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
ctrl.appOperationQueue.Done(appKey)
}()
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
if err != nil {
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
return
}
if !exists {
// This happens after app was deleted, but the work queue still had an entry for it.
return
}
app, ok := obj.(*appv1.Application)
if !ok {
log.Warnf("Key '%s' in index is not an application", appKey)
return
}
if app.Operation != nil {
ctrl.processRequestedAppOperation(app)
} else if app.DeletionTimestamp != nil && app.CascadedDeletion() {
ctrl.finalizeApplicationDeletion(app)
}
return
}
func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application) {
log.Infof("Deleting resources for application %s", app.Name)
// Get refreshed application info, since informer app copy might be stale
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
log.Errorf("Unable to get refreshed application info prior deleting resources: %v", err)
}
return
}
clst, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
if err == nil {
config := clst.RESTConfig()
err = kube.DeleteResourceWithLabel(config, app.Spec.Destination.Namespace, common.LabelApplicationName, app.Name)
if err == nil {
app.SetCascadedDeletion(false)
var patch []byte
patch, err = json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": app.Finalizers,
},
})
if err == nil {
_, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(app.Name, types.MergePatchType, patch)
}
}
}
if err != nil {
log.Errorf("Unable to delete application resources: %v", err)
ctrl.setAppCondition(app, appv1.ApplicationCondition{
Type: appv1.ApplicationConditionDeletionError,
Message: err.Error(),
})
} else {
log.Infof("Successfully deleted resources for application %s", app.Name)
}
}
func (ctrl *ApplicationController) setAppCondition(app *appv1.Application, condition appv1.ApplicationCondition) {
index := -1
for i, exiting := range app.Status.Conditions {
if exiting.Type == condition.Type {
index = i
break
}
}
if index > -1 {
app.Status.Conditions[index] = condition
} else {
app.Status.Conditions = append(app.Status.Conditions, condition)
}
var patch []byte
patch, err := json.Marshal(map[string]interface{}{
"status": map[string]interface{}{
"conditions": app.Status.Conditions,
},
})
if err == nil {
_, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(app.Name, types.MergePatchType, patch)
}
if err != nil {
log.Errorf("Unable to set application condition: %v", err)
}
}
func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Application) {
state := appv1.OperationState{Phase: appv1.OperationRunning, Operation: *app.Operation, StartedAt: metav1.Now()}
// Recover from any unexpected panics and automatically set the status to be failed
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
// TODO: consider adding Error OperationStatus in addition to Failed
state.Phase = appv1.OperationError
if rerr, ok := r.(error); ok {
state.Message = rerr.Error()
} else {
state.Message = fmt.Sprintf("%v", r)
}
ctrl.setOperationState(app.Name, state, app.Operation)
}
}()
if app.Status.OperationState != nil && !app.Status.OperationState.Phase.Completed() {
// If we get here, we are about process an operation but we notice it is already Running.
// We need to detect if the controller crashed before completing the operation, or if the
// the app object we pulled off the informer is simply stale and doesn't reflect the fact
// that the operation is completed. We don't want to perform the operation again. To detect
// this, always retrieve the latest version to ensure it is not stale.
freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
log.Errorf("Failed to retrieve latest application state: %v", err)
return
}
if freshApp.Status.OperationState == nil || freshApp.Status.OperationState.Phase.Completed() {
log.Infof("Skipping operation on stale application state (%s)", app.ObjectMeta.Name)
return
}
log.Warnf("Found interrupted application operation %s %v", app.ObjectMeta.Name, app.Status.OperationState)
} else {
ctrl.setOperationState(app.Name, state, app.Operation)
}
if app.Operation.Sync != nil {
opRes := ctrl.appStateManager.SyncAppState(app, app.Operation.Sync.Revision, nil, app.Operation.Sync.DryRun, app.Operation.Sync.Prune)
state.Phase = opRes.Phase
state.Message = opRes.Message
state.SyncResult = opRes.SyncResult
} else if app.Operation.Rollback != nil {
var deploymentInfo *appv1.DeploymentInfo
for _, info := range app.Status.History {
if info.ID == app.Operation.Rollback.ID {
deploymentInfo = &info
break
}
}
if deploymentInfo == nil {
state.Phase = appv1.OperationFailed
state.Message = fmt.Sprintf("application %s does not have deployment with id %v", app.Name, app.Operation.Rollback.ID)
} else {
opRes := ctrl.appStateManager.SyncAppState(app, deploymentInfo.Revision, &deploymentInfo.ComponentParameterOverrides, app.Operation.Rollback.DryRun, app.Operation.Rollback.Prune)
state.Phase = opRes.Phase
state.Message = opRes.Message
state.RollbackResult = opRes.SyncResult
}
} else {
state.Phase = appv1.OperationFailed
state.Message = "Invalid operation request"
}
ctrl.setOperationState(app.Name, state, app.Operation)
}
func (ctrl *ApplicationController) setOperationState(appName string, state appv1.OperationState, operation *appv1.Operation) {
retryUntilSucceed(func() error {
var inProgressOpValue *appv1.Operation
if state.Phase == "" {
// expose any bugs where we neglect to set phase
panic("no phase was set")
}
if !state.Phase.Completed() {
// If operation is still running, we populate the app.operation field, which prevents
// any other operation from running at the same time. Otherwise, it is cleared by setting
// it to nil which indicates no operation is in progress.
inProgressOpValue = operation
} else {
nowTime := metav1.Now()
state.FinishedAt = &nowTime
}
patch, err := json.Marshal(map[string]interface{}{
"status": map[string]interface{}{
"operationState": state,
},
"operation": inProgressOpValue,
})
if err != nil {
return err
}
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace)
_, err = appClient.Patch(appName, types.MergePatchType, patch)
if err != nil {
return err
}
log.Infof("updated '%s' operation (phase: %s)", appName, state.Phase)
return nil
}, "Update application operation state", context.Background(), updateOperationStateTimeout)
}
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
appKey, shutdown := ctrl.appRefreshQueue.Get()
if shutdown {
processNext = false
return
} else {
processNext = true
}
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
ctrl.appRefreshQueue.Done(appKey)
}()
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
if err != nil {
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
return
}
if !exists {
// This happens after app was deleted, but the work queue still had an entry for it.
return
}
app, ok := obj.(*appv1.Application)
if !ok {
log.Warnf("Key '%s' in index is not an application", appKey)
return
}
isForceRefreshed := ctrl.isRefreshForced(app.Name)
if isForceRefreshed || app.NeedRefreshAppStatus(ctrl.statusRefreshTimeout) {
log.Infof("Refreshing application '%s' status (force refreshed: %v)", app.Name, isForceRefreshed)
comparisonResult, parameters, healthState, err := ctrl.tryRefreshAppStatus(app.DeepCopy())
if err != nil {
comparisonResult = &appv1.ComparisonResult{
Status: appv1.ComparisonStatusError,
Error: fmt.Sprintf("Failed to get application status for application '%s': %v", app.Name, err),
ComparedTo: app.Spec.Source,
ComparedAt: metav1.Time{Time: time.Now().UTC()},
}
parameters = nil
healthState = &appv1.HealthStatus{Status: appv1.HealthStatusUnknown}
}
ctrl.updateAppStatus(app.Name, app.Namespace, comparisonResult, parameters, *healthState)
}
return
}
func (ctrl *ApplicationController) tryRefreshAppStatus(app *appv1.Application) (*appv1.ComparisonResult, *[]appv1.ComponentParameter, *appv1.HealthStatus, error) {
comparisonResult, manifestInfo, err := ctrl.appStateManager.CompareAppState(app)
if err != nil {
return nil, nil, nil, err
}
log.Infof("App %s comparison result: prev: %s. current: %s", app.Name, app.Status.ComparisonResult.Status, comparisonResult.Status)
parameters := make([]appv1.ComponentParameter, len(manifestInfo.Params))
for i := range manifestInfo.Params {
parameters[i] = *manifestInfo.Params[i]
}
healthState, err := ctrl.appHealthManager.GetAppHealth(app.Spec.Destination.Server, app.Spec.Destination.Namespace, comparisonResult)
if err != nil {
return nil, nil, nil, err
}
return comparisonResult, &parameters, healthState, nil
}
func (ctrl *ApplicationController) updateAppStatus(
appName string, namespace string, comparisonResult *appv1.ComparisonResult, parameters *[]appv1.ComponentParameter, healthState appv1.HealthStatus) {
statusPatch := make(map[string]interface{})
statusPatch["comparisonResult"] = comparisonResult
statusPatch["parameters"] = parameters
statusPatch["health"] = healthState
patch, err := json.Marshal(map[string]interface{}{
"status": statusPatch,
})
if err == nil {
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(namespace)
_, err = appClient.Patch(appName, types.MergePatchType, patch)
}
if err != nil {
log.Warnf("Error updating application: %v", err)
} else {
log.Info("Application update successful")
}
}
func newApplicationInformer(
appClientset appclientset.Interface,
appQueue workqueue.RateLimitingInterface,
appOperationQueue workqueue.RateLimitingInterface,
appResyncPeriod time.Duration,
config *ApplicationControllerConfig) cache.SharedIndexInformer {
appInformerFactory := appinformers.NewFilteredSharedInformerFactory(
appClientset,
appResyncPeriod,
config.Namespace,
func(options *metav1.ListOptions) {
var instanceIDReq *labels.Requirement
var err error
if config.InstanceID != "" {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.Equals, []string{config.InstanceID})
} else {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.DoesNotExist, nil)
}
if err != nil {
panic(err)
}
options.FieldSelector = fields.Everything().String()
labelSelector := labels.NewSelector().Add(*instanceIDReq)
options.LabelSelector = labelSelector.String()
},
)
informer := appInformerFactory.Argoproj().V1alpha1().Applications().Informer()
informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
appOperationQueue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
appQueue.Add(key)
appOperationQueue.Add(key)
}
},
DeleteFunc: func(obj interface{}) {
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
}
},
},
)
return informer
}

161
controller/health.go Normal file
View File

@@ -0,0 +1,161 @@
package controller
import (
"context"
"encoding/json"
"fmt"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
"k8s.io/api/apps/v1"
coreV1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
maxHistoryCnt = 5
)
type AppHealthManager interface {
GetAppHealth(server string, namespace string, comparisonResult *appv1.ComparisonResult) (*appv1.HealthStatus, error)
}
type kubeAppHealthManager struct {
db db.ArgoDB
namespace string
}
func NewAppHealthManager(db db.ArgoDB, namespace string) AppHealthManager {
return &kubeAppHealthManager{db: db, namespace: namespace}
}
func (ctrl *kubeAppHealthManager) getServiceHealth(config *rest.Config, namespace string, name string) (*appv1.HealthStatus, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
service, err := clientSet.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
health := appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
if service.Spec.Type == coreV1.ServiceTypeLoadBalancer {
health.Status = appv1.HealthStatusProgressing
for _, ingress := range service.Status.LoadBalancer.Ingress {
if ingress.Hostname != "" || ingress.IP != "" {
health.Status = appv1.HealthStatusHealthy
break
}
}
}
return &health, nil
}
func (ctrl *kubeAppHealthManager) getDeploymentHealth(config *rest.Config, namespace string, name string) (*appv1.HealthStatus, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
deployment, err := clientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if deployment.Generation <= deployment.Status.ObservedGeneration {
cond := getDeploymentCondition(deployment.Status, v1.DeploymentProgressing)
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
return &appv1.HealthStatus{
Status: appv1.HealthStatusDegraded,
StatusDetails: fmt.Sprintf("Deployment %q exceeded its progress deadline", name),
}, nil
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
}, nil
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...\n", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
}, nil
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...\n", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
}, nil
}
} else {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: "Waiting for rollout to finish: observed deployment generation less then desired generation",
}, nil
}
return &appv1.HealthStatus{
Status: appv1.HealthStatusHealthy,
}, nil
}
func getDeploymentCondition(status v1.DeploymentStatus, condType v1.DeploymentConditionType) *v1.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
func (ctrl *kubeAppHealthManager) GetAppHealth(server string, namespace string, comparisonResult *appv1.ComparisonResult) (*appv1.HealthStatus, error) {
clst, err := ctrl.db.GetCluster(context.Background(), server)
if err != nil {
return nil, err
}
restConfig := clst.RESTConfig()
appHealth := appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
for i := range comparisonResult.Resources {
resource := comparisonResult.Resources[i]
if resource.LiveState == "null" {
resource.Health = appv1.HealthStatus{Status: appv1.HealthStatusUnknown}
} else {
var obj unstructured.Unstructured
err := json.Unmarshal([]byte(resource.LiveState), &obj)
if err != nil {
return nil, err
}
switch obj.GetKind() {
case kube.DeploymentKind:
state, err := ctrl.getDeploymentHealth(restConfig, namespace, obj.GetName())
if err != nil {
return nil, err
}
resource.Health = *state
case kube.ServiceKind:
state, err := ctrl.getServiceHealth(restConfig, namespace, obj.GetName())
if err != nil {
return nil, err
}
resource.Health = *state
default:
resource.Health = appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
}
if resource.Health.Status == appv1.HealthStatusProgressing {
if appHealth.Status == appv1.HealthStatusHealthy {
appHealth.Status = appv1.HealthStatusProgressing
}
} else if resource.Health.Status == appv1.HealthStatusDegraded {
if appHealth.Status == appv1.HealthStatusHealthy || appHealth.Status == appv1.HealthStatusProgressing {
appHealth.Status = appv1.HealthStatusDegraded
}
}
}
comparisonResult.Resources[i] = resource
}
return &appHealth, nil
}

View File

@@ -1,226 +0,0 @@
package metrics
import (
"net/http"
"strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/labels"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/healthz"
)
type MetricsServer struct {
*http.Server
syncCounter *prometheus.CounterVec
k8sRequestCounter *prometheus.CounterVec
kubectlExecCounter *prometheus.CounterVec
kubectlExecPendingGauge *prometheus.GaugeVec
reconcileHistogram *prometheus.HistogramVec
}
const (
// MetricsPath is the endpoint to collect application metrics
MetricsPath = "/metrics"
)
// Follow Prometheus naming practices
// https://prometheus.io/docs/practices/naming/
var (
descAppDefaultLabels = []string{"namespace", "name", "project"}
descAppInfo = prometheus.NewDesc(
"argocd_app_info",
"Information about application.",
append(descAppDefaultLabels, "repo", "dest_server", "dest_namespace"),
nil,
)
descAppCreated = prometheus.NewDesc(
"argocd_app_created_time",
"Creation time in unix timestamp for an application.",
descAppDefaultLabels,
nil,
)
descAppSyncStatusCode = prometheus.NewDesc(
"argocd_app_sync_status",
"The application current sync status.",
append(descAppDefaultLabels, "sync_status"),
nil,
)
descAppHealthStatus = prometheus.NewDesc(
"argocd_app_health_status",
"The application current health status.",
append(descAppDefaultLabels, "health_status"),
nil,
)
)
// NewMetricsServer returns a new prometheus server which collects application metrics
func NewMetricsServer(addr string, appLister applister.ApplicationLister, healthCheck func() error) *MetricsServer {
mux := http.NewServeMux()
appRegistry := NewAppRegistry(appLister)
appRegistry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
appRegistry.MustRegister(prometheus.NewGoCollector())
mux.Handle(MetricsPath, promhttp.HandlerFor(appRegistry, promhttp.HandlerOpts{}))
healthz.ServeHealthCheck(mux, healthCheck)
syncCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "argocd_app_sync_total",
Help: "Number of application syncs.",
},
append(descAppDefaultLabels, "phase"),
)
appRegistry.MustRegister(syncCounter)
kubectlExecCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "argocd_kubectl_exec_total",
Help: "Number of kubectl executions",
}, []string{"command"})
appRegistry.MustRegister(kubectlExecCounter)
kubectlExecPendingGauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "argocd_kubectl_exec_pending",
Help: "Number of pending kubectl executions",
}, []string{"command"})
appRegistry.MustRegister(kubectlExecPendingGauge)
k8sRequestCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "argocd_app_k8s_request_total",
Help: "Number of kubernetes requests executed during application reconciliation.",
},
append(descAppDefaultLabels, "response_code"),
)
appRegistry.MustRegister(k8sRequestCounter)
reconcileHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "argocd_app_reconcile",
Help: "Application reconciliation performance.",
// Buckets chosen after observing a ~2100ms mean reconcile time
Buckets: []float64{0.25, .5, 1, 2, 4, 8, 16},
},
descAppDefaultLabels,
)
appRegistry.MustRegister(reconcileHistogram)
return &MetricsServer{
Server: &http.Server{
Addr: addr,
Handler: mux,
},
syncCounter: syncCounter,
k8sRequestCounter: k8sRequestCounter,
reconcileHistogram: reconcileHistogram,
kubectlExecCounter: kubectlExecCounter,
kubectlExecPendingGauge: kubectlExecPendingGauge,
}
}
// IncSync increments the sync counter for an application
func (m *MetricsServer) IncSync(app *argoappv1.Application, state *argoappv1.OperationState) {
if !state.Phase.Completed() {
return
}
m.syncCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), string(state.Phase)).Inc()
}
// IncKubernetesRequest increments the kubernetes requests counter for an application
func (m *MetricsServer) IncKubernetesRequest(app *argoappv1.Application, statusCode int) {
m.k8sRequestCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), strconv.Itoa(statusCode)).Inc()
}
// IncReconcile increments the reconcile counter for an application
func (m *MetricsServer) IncReconcile(app *argoappv1.Application, duration time.Duration) {
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject()).Observe(duration.Seconds())
}
func (m *MetricsServer) IncKubectlExec(command string) {
m.kubectlExecCounter.WithLabelValues(command).Inc()
}
func (m *MetricsServer) IncKubectlExecPending(command string) {
m.kubectlExecPendingGauge.WithLabelValues(command).Inc()
}
func (m *MetricsServer) DecKubectlExecPending(command string) {
m.kubectlExecPendingGauge.WithLabelValues(command).Dec()
}
type appCollector struct {
store applister.ApplicationLister
}
// NewAppCollector returns a prometheus collector for application metrics
func NewAppCollector(appLister applister.ApplicationLister) prometheus.Collector {
return &appCollector{
store: appLister,
}
}
// NewAppRegistry creates a new prometheus registry that collects applications
func NewAppRegistry(appLister applister.ApplicationLister) *prometheus.Registry {
registry := prometheus.NewRegistry()
registry.MustRegister(NewAppCollector(appLister))
return registry
}
// Describe implements the prometheus.Collector interface
func (c *appCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- descAppInfo
ch <- descAppCreated
ch <- descAppSyncStatusCode
ch <- descAppHealthStatus
}
// Collect implements the prometheus.Collector interface
func (c *appCollector) Collect(ch chan<- prometheus.Metric) {
apps, err := c.store.List(labels.NewSelector())
if err != nil {
log.Warnf("Failed to collect applications: %v", err)
return
}
for _, app := range apps {
collectApps(ch, app)
}
}
func boolFloat64(b bool) float64 {
if b {
return 1
}
return 0
}
func collectApps(ch chan<- prometheus.Metric, app *argoappv1.Application) {
addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) {
project := app.Spec.GetProject()
lv = append([]string{app.Namespace, app.Name, project}, lv...)
ch <- prometheus.MustNewConstMetric(desc, t, v, lv...)
}
addGauge := func(desc *prometheus.Desc, v float64, lv ...string) {
addConstMetric(desc, prometheus.GaugeValue, v, lv...)
}
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace)
addGauge(descAppCreated, float64(app.CreationTimestamp.Unix()))
syncStatus := app.Status.Sync.Status
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeSynced), string(argoappv1.SyncStatusCodeSynced))
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeOutOfSync), string(argoappv1.SyncStatusCodeOutOfSync))
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeUnknown || syncStatus == ""), string(argoappv1.SyncStatusCodeUnknown))
healthStatus := app.Status.Health.Status
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusUnknown || healthStatus == ""), argoappv1.HealthStatusUnknown)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusProgressing), argoappv1.HealthStatusProgressing)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusSuspended), argoappv1.HealthStatusSuspended)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusHealthy), argoappv1.HealthStatusHealthy)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusDegraded), argoappv1.HealthStatusDegraded)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusMissing), argoappv1.HealthStatusMissing)
}

View File

@@ -1,237 +0,0 @@
package metrics
import (
"context"
"log"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
appinformer "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
)
const fakeApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app
namespace: argocd
spec:
destination:
namespace: dummy-namespace
server: https://localhost:6443
project: important-project
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
status:
sync:
status: Synced
health:
status: Healthy
`
const expectedResponse = `# HELP argocd_app_created_time Creation time in unix timestamp for an application.
# TYPE argocd_app_created_time gauge
argocd_app_created_time{name="my-app",namespace="argocd",project="important-project"} -6.21355968e+10
# HELP argocd_app_health_status The application current health status.
# TYPE argocd_app_health_status gauge
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd",project="important-project"} 1
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Suspended",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd",project="important-project"} 0
# HELP argocd_app_info Information about application.
# TYPE argocd_app_info gauge
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",repo="https://github.com/argoproj/argocd-example-apps"} 1
# HELP argocd_app_sync_status The application current sync status.
# TYPE argocd_app_sync_status gauge
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="OutOfSync"} 0
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Synced"} 1
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Unknown"} 0
`
const fakeDefaultApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app
namespace: argocd
spec:
destination:
namespace: dummy-namespace
server: https://localhost:6443
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
status:
sync:
status: Synced
health:
status: Healthy
`
const expectedDefaultResponse = `# HELP argocd_app_created_time Creation time in unix timestamp for an application.
# TYPE argocd_app_created_time gauge
argocd_app_created_time{name="my-app",namespace="argocd",project="default"} -6.21355968e+10
# HELP argocd_app_health_status The application current health status.
# TYPE argocd_app_health_status gauge
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd",project="default"} 1
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Suspended",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd",project="default"} 0
# HELP argocd_app_info Information about application.
# TYPE argocd_app_info gauge
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="default",repo="https://github.com/argoproj/argocd-example-apps"} 1
# HELP argocd_app_sync_status The application current sync status.
# TYPE argocd_app_sync_status gauge
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="OutOfSync"} 0
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="Synced"} 1
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="Unknown"} 0
`
var noOpHealthCheck = func() error {
return nil
}
func newFakeApp(fakeApp string) *argoappv1.Application {
var app argoappv1.Application
err := yaml.Unmarshal([]byte(fakeApp), &app)
if err != nil {
panic(err)
}
return &app
}
func newFakeLister(fakeApp ...string) (context.CancelFunc, applister.ApplicationLister) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var fakeApps []runtime.Object
for _, name := range fakeApp {
fakeApps = append(fakeApps, newFakeApp(name))
}
appClientset := appclientset.NewSimpleClientset(fakeApps...)
factory := appinformer.NewFilteredSharedInformerFactory(appClientset, 0, "argocd", func(options *metav1.ListOptions) {})
appInformer := factory.Argoproj().V1alpha1().Applications().Informer()
go appInformer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), appInformer.HasSynced) {
log.Fatal("Timed out waiting for caches to sync")
}
return cancel, factory.Argoproj().V1alpha1().Applications().Lister()
}
func testApp(t *testing.T, fakeApp string, expectedResponse string) {
cancel, appLister := newFakeLister(fakeApp)
defer cancel()
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
req, err := http.NewRequest("GET", "/metrics", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
metricsServ.Handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK)
body := rr.Body.String()
log.Println(body)
assertMetricsPrinted(t, expectedResponse, body)
}
type testCombination struct {
application string
expectedResponse string
}
func TestMetrics(t *testing.T) {
combinations := []testCombination{
{
application: fakeApp,
expectedResponse: expectedResponse,
},
{
application: fakeDefaultApp,
expectedResponse: expectedDefaultResponse,
},
}
for _, combination := range combinations {
testApp(t, combination.application, combination.expectedResponse)
}
}
const appSyncTotal = `# HELP argocd_app_sync_total Number of application syncs.
# TYPE argocd_app_sync_total counter
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Error",project="important-project"} 1
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Failed",project="important-project"} 1
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Succeeded",project="important-project"} 2
`
func TestMetricsSyncCounter(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
fakeApp := newFakeApp(fakeApp)
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationRunning})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationFailed})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationError})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationSucceeded})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationSucceeded})
req, err := http.NewRequest("GET", "/metrics", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
metricsServ.Handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK)
body := rr.Body.String()
log.Println(body)
assertMetricsPrinted(t, appSyncTotal, body)
}
// assertMetricsPrinted asserts every line in the expected lines appears in the body
func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
for _, line := range strings.Split(expectedLines, "\n") {
assert.Contains(t, body, line)
}
}
const appReconcileMetrics = `argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="0.25"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="0.5"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="1"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="2"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="4"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="8"} 1
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="16"} 1
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="+Inf"} 1
argocd_app_reconcile_sum{name="my-app",namespace="argocd",project="important-project"} 5
argocd_app_reconcile_count{name="my-app",namespace="argocd",project="important-project"} 1
`
func TestReconcileMetrics(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
fakeApp := newFakeApp(fakeApp)
metricsServ.IncReconcile(fakeApp, 5*time.Second)
req, err := http.NewRequest("GET", "/metrics", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
metricsServ.Handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK)
body := rr.Body.String()
log.Println(body)
assertMetricsPrinted(t, appReconcileMetrics, body)
}

View File

@@ -1,37 +0,0 @@
package metrics
import (
"net/http"
"k8s.io/client-go/rest"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
)
type metricsRoundTripper struct {
roundTripper http.RoundTripper
app *v1alpha1.Application
metricsServer *MetricsServer
}
func (mrt *metricsRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
resp, err := mrt.roundTripper.RoundTrip(r)
statusCode := 0
if resp != nil {
statusCode = resp.StatusCode
}
mrt.metricsServer.IncKubernetesRequest(mrt.app, statusCode)
return resp, err
}
// AddMetricsTransportWrapper adds a transport wrapper which increments 'argocd_app_k8s_request_total' counter on each kubernetes request
func AddMetricsTransportWrapper(server *MetricsServer, app *v1alpha1.Application, config *rest.Config) *rest.Config {
wrap := config.WrapTransport
config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
if wrap != nil {
rt = wrap(rt)
}
return &metricsRoundTripper{roundTripper: rt, metricsServer: server, app: app}
}
return config
}

View File

@@ -6,505 +6,342 @@ import (
"fmt"
"time"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/kube"
kubeutil "github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd/common"
statecache "github.com/argoproj/argo-cd/controller/cache"
"github.com/argoproj/argo-cd/controller/metrics"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/argo"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/health"
hookutil "github.com/argoproj/argo-cd/util/hook"
kubeutil "github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/resource"
"github.com/argoproj/argo-cd/util/resource/ignore"
"github.com/argoproj/argo-cd/util/settings"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
)
type managedResource struct {
Target *unstructured.Unstructured
Live *unstructured.Unstructured
Diff diff.DiffResult
Group string
Version string
Kind string
Namespace string
Name string
Hook bool
}
func GetLiveObjs(res []managedResource) []*unstructured.Unstructured {
objs := make([]*unstructured.Unstructured, len(res))
for i := range res {
objs[i] = res[i].Live
}
return objs
}
type ResourceInfoProvider interface {
IsNamespaced(server string, gk schema.GroupKind) (bool, error)
}
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool, localObjects []string) *comparisonResult
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
CompareAppState(app *v1alpha1.Application) (*v1alpha1.ComparisonResult, *repository.ManifestResponse, error)
SyncAppState(app *v1alpha1.Application, revision string, overrides *[]v1alpha1.ComponentParameter, dryRun bool, prune bool) *v1alpha1.OperationState
}
type comparisonResult struct {
syncStatus *v1alpha1.SyncStatus
healthStatus *v1alpha1.HealthStatus
resources []v1alpha1.ResourceStatus
managedResources []managedResource
hooks []*unstructured.Unstructured
diffNormalizer diff.Normalizer
appSourceType v1alpha1.ApplicationSourceType
// KsonnetAppStateManager allows to compare application using KSonnet CLI
type KsonnetAppStateManager struct {
db db.ArgoDB
appclientset appclientset.Interface
repoClientset reposerver.Clientset
namespace string
}
func (cr *comparisonResult) targetObjs() []*unstructured.Unstructured {
objs := cr.hooks
for _, r := range cr.managedResources {
if r.Target != nil {
objs = append(objs, r.Target)
// groupLiveObjects deduplicate list of kubernetes resources and choose correct version of resource: if resource has corresponding expected application resource then method pick
// kubernetes resource with matching version, otherwise chooses single kubernetes resource with any version
func (ks *KsonnetAppStateManager) groupLiveObjects(liveObjs []*unstructured.Unstructured, targetObjs []*unstructured.Unstructured) map[string]*unstructured.Unstructured {
targetByFullName := make(map[string]*unstructured.Unstructured)
for _, obj := range targetObjs {
targetByFullName[getResourceFullName(obj)] = obj
}
liveListByFullName := make(map[string][]*unstructured.Unstructured)
for _, obj := range liveObjs {
list := liveListByFullName[getResourceFullName(obj)]
if list == nil {
list = make([]*unstructured.Unstructured, 0)
}
list = append(list, obj)
liveListByFullName[getResourceFullName(obj)] = list
}
liveByFullName := make(map[string]*unstructured.Unstructured)
for fullName, list := range liveListByFullName {
targetObj := targetByFullName[fullName]
var liveObj *unstructured.Unstructured
if targetObj != nil {
for i := range list {
if list[i].GetAPIVersion() == targetObj.GetAPIVersion() {
liveObj = list[i]
break
}
}
} else {
liveObj = list[0]
}
if liveObj != nil {
liveByFullName[getResourceFullName(liveObj)] = liveObj
}
}
return objs
return liveByFullName
}
// appStateManager allows to compare applications to git
type appStateManager struct {
metricsServer *metrics.MetricsServer
db db.ArgoDB
settingsMgr *settings.SettingsManager
appclientset appclientset.Interface
projInformer cache.SharedIndexInformer
kubectl kubeutil.Kubectl
repoClientset apiclient.Clientset
liveStateCache statecache.LiveStateCache
namespace string
}
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1.ApplicationSource, appLabelKey, revision string, noCache bool) ([]*unstructured.Unstructured, []*unstructured.Unstructured, *apiclient.ManifestResponse, error) {
helmRepos, err := m.db.ListHelmRepositories(context.Background())
// CompareAppState compares application spec and real app state using KSonnet
func (ks *KsonnetAppStateManager) CompareAppState(app *v1alpha1.Application) (*v1alpha1.ComparisonResult, *repository.ManifestResponse, error) {
repo := ks.getRepo(app.Spec.Source.RepoURL)
conn, repoClient, err := ks.repoClientset.NewRepositoryClient()
if err != nil {
return nil, nil, nil, err
}
repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
if err != nil {
return nil, nil, nil, err
}
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
defer util.Close(conn)
if revision == "" {
revision = source.TargetRevision
overrides := make([]*v1alpha1.ComponentParameter, len(app.Spec.Source.ComponentParameterOverrides))
if app.Spec.Source.ComponentParameterOverrides != nil {
for i := range app.Spec.Source.ComponentParameterOverrides {
item := app.Spec.Source.ComponentParameterOverrides[i]
overrides[i] = &item
}
}
plugins, err := m.settingsMgr.GetConfigManagementPlugins()
if err != nil {
return nil, nil, nil, err
}
tools := make([]*appv1.ConfigManagementPlugin, len(plugins))
for i := range plugins {
tools[i] = &plugins[i]
}
buildOptions, err := m.settingsMgr.GetKustomizeBuildOptions()
if err != nil {
return nil, nil, nil, err
}
serverVersion, err := m.liveStateCache.GetServerVersion(app.Spec.Destination.Server)
if err != nil {
return nil, nil, nil, err
}
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
Repo: repo,
Repos: helmRepos,
Revision: revision,
NoCache: noCache,
AppLabelKey: appLabelKey,
AppLabelValue: app.Name,
Namespace: app.Spec.Destination.Namespace,
ApplicationSource: &source,
Plugins: tools,
KustomizeOptions: &appv1.KustomizeOptions{
BuildOptions: buildOptions,
},
KubeVersion: serverVersion,
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &repository.ManifestRequest{
Repo: repo,
Environment: app.Spec.Source.Environment,
Path: app.Spec.Source.Path,
Revision: app.Spec.Source.TargetRevision,
ComponentParameterOverrides: overrides,
AppLabel: app.Name,
})
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
targetObjs, hooks, err := unmarshalManifests(manifestInfo.Manifests)
if err != nil {
return nil, nil, nil, err
}
return targetObjs, hooks, manifestInfo, nil
}
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, []*unstructured.Unstructured, error) {
targetObjs := make([]*unstructured.Unstructured, 0)
hooks := make([]*unstructured.Unstructured, 0)
for _, manifest := range manifests {
targetObjs := make([]*unstructured.Unstructured, len(manifestInfo.Manifests))
for i, manifest := range manifestInfo.Manifests {
obj, err := v1alpha1.UnmarshalToUnstructured(manifest)
if err != nil {
return nil, nil, err
}
if ignore.Ignore(obj) {
continue
}
if hookutil.IsHook(obj) {
hooks = append(hooks, obj)
} else {
targetObjs = append(targetObjs, obj)
}
}
return targetObjs, hooks, nil
}
func DeduplicateTargetObjects(
server string,
namespace string,
objs []*unstructured.Unstructured,
infoProvider ResourceInfoProvider,
) ([]*unstructured.Unstructured, []v1alpha1.ApplicationCondition, error) {
targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured)
for i := range objs {
obj := objs[i]
isNamespaced, err := infoProvider.IsNamespaced(server, obj.GroupVersionKind().GroupKind())
if err != nil {
return objs, nil, err
}
if !isNamespaced {
obj.SetNamespace("")
} else if obj.GetNamespace() == "" {
obj.SetNamespace(namespace)
}
key := kubeutil.GetResourceKey(obj)
targetByKey[key] = append(targetByKey[key], obj)
}
conditions := make([]v1alpha1.ApplicationCondition, 0)
result := make([]*unstructured.Unstructured, 0)
for key, targets := range targetByKey {
if len(targets) > 1 {
conditions = append(conditions, appv1.ApplicationCondition{
Type: appv1.ApplicationConditionRepeatedResourceWarning,
Message: fmt.Sprintf("Resource %s appeared %d times among application resources.", key.String(), len(targets)),
})
}
result = append(result, targets[len(targets)-1])
targetObjs[i] = obj
}
return result, conditions, nil
}
server, namespace := app.Spec.Destination.Server, app.Spec.Destination.Namespace
// dedupLiveResources handles removes live resource duplicates with the same UID. Duplicates are created in a separate resource groups.
// E.g. apps/Deployment produces duplicate in extensions/Deployment, authorization.openshift.io/ClusterRole produces duplicate in rbac.authorization.k8s.io/ClusterRole etc.
// The method removes such duplicates unless it was defined in git ( exists in target resources list ). At least one duplicate stays.
// If non of duplicates are in git at random one stays
func dedupLiveResources(targetObjs []*unstructured.Unstructured, liveObjsByKey map[kubeutil.ResourceKey]*unstructured.Unstructured) {
targetObjByKey := make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
for i := range targetObjs {
targetObjByKey[kubeutil.GetResourceKey(targetObjs[i])] = targetObjs[i]
}
liveObjsById := make(map[types.UID][]*unstructured.Unstructured)
for k := range liveObjsByKey {
obj := liveObjsByKey[k]
if obj != nil {
liveObjsById[obj.GetUID()] = append(liveObjsById[obj.GetUID()], obj)
}
}
for id := range liveObjsById {
objs := liveObjsById[id]
if len(objs) > 1 {
duplicatesLeft := len(objs)
for i := range objs {
obj := objs[i]
resourceKey := kubeutil.GetResourceKey(obj)
if _, ok := targetObjByKey[resourceKey]; !ok {
delete(liveObjsByKey, resourceKey)
duplicatesLeft--
if duplicatesLeft == 1 {
break
}
}
}
}
}
}
func (m *appStateManager) getComparisonSettings(app *appv1.Application) (string, map[string]v1alpha1.ResourceOverride, diff.Normalizer, error) {
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
log.Infof("Comparing app %s state in cluster %s (namespace: %s)", app.ObjectMeta.Name, server, namespace)
// Get the REST config for the cluster corresponding to the environment
clst, err := ks.db.GetCluster(context.Background(), server)
if err != nil {
return "", nil, nil, err
return nil, nil, err
}
appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
restConfig := clst.RESTConfig()
// Retrieve the live versions of the objects
liveObjs, err := kubeutil.GetResourcesWithLabel(restConfig, namespace, common.LabelApplicationName, app.Name)
if err != nil {
return "", nil, nil, err
return nil, nil, err
}
diffNormalizer, err := argo.NewDiffNormalizer(app.Spec.IgnoreDifferences, resourceOverrides)
liveObjByFullName := ks.groupLiveObjects(liveObjs, targetObjs)
controlledLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
// Move live resources which have corresponding target object to controlledLiveObj
dynClientPool := dynamic.NewDynamicClientPool(restConfig)
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return "", nil, nil, err
return nil, nil, err
}
return appLabelKey, resourceOverrides, diffNormalizer, nil
}
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied source. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool, localManifests []string) *comparisonResult {
appLabelKey, resourceOverrides, diffNormalizer, err := m.getComparisonSettings(app)
// return unknown comparison result if basic comparison settings cannot be loaded
if err != nil {
return &comparisonResult{
syncStatus: &v1alpha1.SyncStatus{
ComparedTo: appv1.ComparedTo{Source: source, Destination: app.Spec.Destination},
Status: appv1.SyncStatusCodeUnknown,
},
healthStatus: &appv1.HealthStatus{Status: appv1.HealthStatusUnknown},
}
}
// do best effort loading live and target state to present as much information about app state as possible
failedToLoadObjs := false
conditions := make([]v1alpha1.ApplicationCondition, 0)
logCtx := log.WithField("application", app.Name)
logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
var targetObjs []*unstructured.Unstructured
var hooks []*unstructured.Unstructured
var manifestInfo *apiclient.ManifestResponse
if len(localManifests) == 0 {
targetObjs, hooks, manifestInfo, err = m.getRepoObjs(app, source, appLabelKey, revision, noCache)
if err != nil {
targetObjs = make([]*unstructured.Unstructured, 0)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
failedToLoadObjs = true
}
} else {
targetObjs, hooks, err = unmarshalManifests(localManifests)
if err != nil {
targetObjs = make([]*unstructured.Unstructured, 0)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
failedToLoadObjs = true
}
manifestInfo = nil
}
targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Server, app.Spec.Destination.Namespace, targetObjs, m.liveStateCache)
if err != nil {
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
}
conditions = append(conditions, dedupConditions...)
resFilter, err := m.settingsMgr.GetResourcesFilter()
if err != nil {
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
} else {
for i := len(targetObjs) - 1; i >= 0; i-- {
targetObj := targetObjs[i]
for i, targetObj := range targetObjs {
fullName := getResourceFullName(targetObj)
liveObj := liveObjByFullName[fullName]
if liveObj == nil {
// If we get here, it indicates we did not find the live resource when querying using
// our app label. However, it is possible that the resource was created/modified outside
// of ArgoCD. In order to determine that it is truly missing, we fall back to perform a
// direct lookup of the resource by name. See issue #141
gvk := targetObj.GroupVersionKind()
if resFilter.IsExcludedResource(gvk.Group, gvk.Kind, app.Spec.Destination.Server) {
targetObjs = append(targetObjs[:i], targetObjs[i+1:]...)
conditions = append(conditions, v1alpha1.ApplicationCondition{
Type: v1alpha1.ApplicationConditionExcludedResourceWarning,
Message: fmt.Sprintf("Resource %s/%s %s is excluded in the settings", gvk.Group, gvk.Kind, targetObj.GetName()),
})
dclient, err := dynClientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return nil, nil, err
}
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(disco, gvk)
if err != nil {
return nil, nil, err
}
liveObj, err = kubeutil.GetLiveResource(dclient, targetObj, apiResource, namespace)
if err != nil {
return nil, nil, err
}
}
controlledLiveObj[i] = liveObj
delete(liveObjByFullName, fullName)
}
logCtx.Debugf("Generated config manifests")
liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)
dedupLiveResources(targetObjs, liveObjByKey)
if err != nil {
liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
failedToLoadObjs = true
}
logCtx.Debugf("Retrieved lived manifests")
for _, liveObj := range liveObjByKey {
if liveObj != nil {
appInstanceName := kubeutil.GetAppInstanceLabel(liveObj, appLabelKey)
if appInstanceName != "" && appInstanceName != app.Name {
conditions = append(conditions, v1alpha1.ApplicationCondition{
Type: v1alpha1.ApplicationConditionSharedResourceWarning,
Message: fmt.Sprintf("%s/%s is part of a different application: %s", liveObj.GetKind(), liveObj.GetName(), appInstanceName),
})
}
// Move root level live resources to controlledLiveObj and add nil to targetObjs to indicate that target object is missing
for fullName := range liveObjByFullName {
liveObj := liveObjByFullName[fullName]
if !hasParent(liveObj) {
targetObjs = append(targetObjs, nil)
controlledLiveObj = append(controlledLiveObj, liveObj)
}
}
managedLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
for i, obj := range targetObjs {
gvk := obj.GroupVersionKind()
ns := util.FirstNonEmpty(obj.GetNamespace(), app.Spec.Destination.Namespace)
if namespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, obj.GroupVersionKind().GroupKind()); err == nil && !namespaced {
ns = ""
}
key := kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName())
if liveObj, ok := liveObjByKey[key]; ok {
managedLiveObj[i] = liveObj
delete(liveObjByKey, key)
} else {
managedLiveObj[i] = nil
}
}
logCtx.Debugf("built managed objects list")
// Everything remaining in liveObjByKey are "extra" resources that aren't tracked in git.
// The following adds all the extras to the managedLiveObj list and backfills the targetObj
// list with nils, so that the lists are of equal lengths for comparison purposes.
for _, obj := range liveObjByKey {
targetObjs = append(targetObjs, nil)
managedLiveObj = append(managedLiveObj, obj)
}
// Do the actual comparison
diffResults, err := diff.DiffArray(targetObjs, managedLiveObj, diffNormalizer)
diffResults, err := diff.DiffArray(targetObjs, controlledLiveObj)
if err != nil {
diffResults = &diff.DiffResultList{}
failedToLoadObjs = true
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
return nil, nil, err
}
comparisonStatus := v1alpha1.ComparisonStatusSynced
syncCode := v1alpha1.SyncStatusCodeSynced
managedResources := make([]managedResource, len(targetObjs))
resourceSummaries := make([]v1alpha1.ResourceStatus, len(targetObjs))
for i, targetObj := range targetObjs {
liveObj := managedLiveObj[i]
obj := liveObj
if obj == nil {
obj = targetObj
resources := make([]v1alpha1.ResourceState, len(targetObjs))
for i := 0; i < len(targetObjs); i++ {
resState := v1alpha1.ResourceState{
ChildLiveResources: make([]v1alpha1.ResourceNode, 0),
}
if obj == nil {
continue
}
gvk := obj.GroupVersionKind()
resState := v1alpha1.ResourceStatus{
Namespace: obj.GetNamespace(),
Name: obj.GetName(),
Kind: gvk.Kind,
Version: gvk.Version,
Group: gvk.Group,
Hook: hookutil.IsHook(obj),
RequiresPruning: targetObj == nil && liveObj != nil,
}
diffResult := diffResults.Diffs[i]
if resState.Hook || ignore.Ignore(obj) {
// For resource hooks, don't store sync status, and do not affect overall sync status
} else if diffResult.Modified || targetObj == nil || liveObj == nil {
// Set resource state to OutOfSync since one of the following is true:
// * target and live resource are different
// * target resource not defined and live resource is extra
// * target resource present but live resource is missing
resState.Status = v1alpha1.SyncStatusCodeOutOfSync
// we ignore the status if the obj needs pruning AND we have the annotation
needsPruning := targetObj == nil && liveObj != nil
if !(needsPruning && resource.HasAnnotationOption(obj, common.AnnotationCompareOptions, "IgnoreExtraneous")) {
syncCode = v1alpha1.SyncStatusCodeOutOfSync
}
if diffResult.Modified {
// Set resource state to 'OutOfSync' since target and corresponding live resource are different
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
resState.Status = v1alpha1.SyncStatusCodeSynced
resState.Status = v1alpha1.ComparisonStatusSynced
}
// we can't say anything about the status if we were unable to get the target objects
if failedToLoadObjs {
resState.Status = v1alpha1.SyncStatusCodeUnknown
if targetObjs[i] == nil {
resState.TargetState = "null"
// Set resource state to 'OutOfSync' since target resource is missing and live resource is unexpected
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
targetObjBytes, err := json.Marshal(targetObjs[i].Object)
if err != nil {
return nil, nil, err
}
resState.TargetState = string(targetObjBytes)
}
managedResources[i] = managedResource{
Name: resState.Name,
Namespace: resState.Namespace,
Group: resState.Group,
Kind: resState.Kind,
Version: resState.Version,
Live: liveObj,
Target: targetObj,
Diff: diffResult,
Hook: resState.Hook,
if controlledLiveObj[i] == nil {
resState.LiveState = "null"
// Set resource state to 'OutOfSync' since target resource present but corresponding live resource is missing
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
liveObjBytes, err := json.Marshal(controlledLiveObj[i].Object)
if err != nil {
return nil, nil, err
}
resState.LiveState = string(liveObjBytes)
}
resourceSummaries[i] = resState
resources[i] = resState
}
if failedToLoadObjs {
syncCode = v1alpha1.SyncStatusCodeUnknown
for i, resource := range resources {
liveResource := controlledLiveObj[i]
if liveResource != nil {
childResources, err := getChildren(liveResource, liveObjByFullName)
if err != nil {
return nil, nil, err
}
resource.ChildLiveResources = childResources
resources[i] = resource
}
}
syncStatus := v1alpha1.SyncStatus{
ComparedTo: appv1.ComparedTo{
Source: source,
Destination: app.Spec.Destination,
},
Status: syncCode,
compResult := v1alpha1.ComparisonResult{
ComparedTo: app.Spec.Source,
ComparedAt: metav1.Time{Time: time.Now().UTC()},
Resources: resources,
Status: comparisonStatus,
}
if manifestInfo != nil {
syncStatus.Revision = manifestInfo.Revision
}
healthStatus, err := health.SetApplicationHealth(resourceSummaries, GetLiveObjs(managedResources), resourceOverrides, func(obj *unstructured.Unstructured) bool {
return !isSelfReferencedApp(app, kubeutil.GetObjectRef(obj))
})
if err != nil {
conditions = append(conditions, appv1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
}
compRes := comparisonResult{
syncStatus: &syncStatus,
healthStatus: healthStatus,
resources: resourceSummaries,
managedResources: managedResources,
hooks: hooks,
diffNormalizer: diffNormalizer,
}
if manifestInfo != nil {
compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
}
app.Status.SetConditions(conditions, map[appv1.ApplicationConditionType]bool{
appv1.ApplicationConditionComparisonError: true,
appv1.ApplicationConditionSharedResourceWarning: true,
appv1.ApplicationConditionRepeatedResourceWarning: true,
appv1.ApplicationConditionExcludedResourceWarning: true,
})
return &compRes
return &compResult, manifestInfo, nil
}
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource) error {
var nextID int64
if len(app.Status.History) > 0 {
nextID = app.Status.History[len(app.Status.History)-1].ID + 1
func hasParent(obj *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
return obj.GetKind() == kubeutil.EndpointsKind || metav1.GetControllerOf(obj) != nil
}
func isControlledBy(obj *unstructured.Unstructured, parent *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
if obj.GetKind() == kubeutil.EndpointsKind && parent.GetKind() == kubeutil.ServiceKind {
return obj.GetName() == parent.GetName()
}
history := append(app.Status.History, v1alpha1.RevisionHistory{
Revision: revision,
DeployedAt: metav1.NewTime(time.Now().UTC()),
ID: nextID,
Source: source,
return metav1.IsControlledBy(obj, parent)
}
func getChildren(parent *unstructured.Unstructured, liveObjByFullName map[string]*unstructured.Unstructured) ([]v1alpha1.ResourceNode, error) {
children := make([]v1alpha1.ResourceNode, 0)
for fullName, obj := range liveObjByFullName {
if isControlledBy(obj, parent) {
delete(liveObjByFullName, fullName)
childResource := v1alpha1.ResourceNode{}
json, err := json.Marshal(obj)
if err != nil {
return nil, err
}
childResource.State = string(json)
childResourceChildren, err := getChildren(obj, liveObjByFullName)
if err != nil {
return nil, err
}
childResource.Children = childResourceChildren
children = append(children, childResource)
}
}
return children, nil
}
func getResourceFullName(obj *unstructured.Unstructured) string {
return fmt.Sprintf("%s:%s", obj.GetKind(), obj.GetName())
}
func (s *KsonnetAppStateManager) SyncAppState(
app *v1alpha1.Application, revision string, overrides *[]v1alpha1.ComponentParameter, dryRun bool, prune bool) *v1alpha1.OperationState {
if revision != "" {
app.Spec.Source.TargetRevision = revision
}
if overrides != nil {
app.Spec.Source.ComponentParameterOverrides = *overrides
}
opRes, manifest := s.syncAppResources(app, dryRun, prune)
if !dryRun && opRes.Phase.Successful() {
err := s.persistDeploymentInfo(app, manifest.Revision, manifest.Params, nil)
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = fmt.Sprintf("failed to record sync to history: %v", err)
}
}
return opRes
}
func (s *KsonnetAppStateManager) getRepo(repoURL string) *v1alpha1.Repository {
repo, err := s.db.GetRepository(context.Background(), repoURL)
if err != nil {
// If we couldn't retrieve from the repo service, assume public repositories
repo = &v1alpha1.Repository{Repo: repoURL}
}
return repo
}
func (s *KsonnetAppStateManager) persistDeploymentInfo(
app *v1alpha1.Application, revision string, envParams []*v1alpha1.ComponentParameter, overrides *[]v1alpha1.ComponentParameter) error {
params := make([]v1alpha1.ComponentParameter, len(envParams))
for i := range envParams {
param := *envParams[i]
params[i] = param
}
var nextId int64 = 0
if len(app.Status.History) > 0 {
nextId = app.Status.History[len(app.Status.History)-1].ID + 1
}
history := append(app.Status.History, v1alpha1.DeploymentInfo{
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
Revision: revision,
Params: params,
DeployedAt: metav1.NewTime(time.Now()),
ID: nextId,
})
if len(history) > common.RevisionHistoryLimit {
history = history[1 : common.RevisionHistoryLimit+1]
if len(history) > maxHistoryCnt {
history = history[1 : maxHistoryCnt+1]
}
patch, err := json.Marshal(map[string]map[string][]v1alpha1.RevisionHistory{
patch, err := json.Marshal(map[string]map[string][]v1alpha1.DeploymentInfo{
"status": {
"history": history,
},
@@ -512,31 +349,146 @@ func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revi
if err != nil {
return err
}
_, err = m.appclientset.ArgoprojV1alpha1().Applications(m.namespace).Patch(app.Name, types.MergePatchType, patch)
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.namespace).Patch(app.Name, types.MergePatchType, patch)
return err
}
func (s *KsonnetAppStateManager) syncAppResources(
app *v1alpha1.Application,
dryRun bool,
prune bool) (*v1alpha1.OperationState, *repository.ManifestResponse) {
opRes := v1alpha1.OperationState{
SyncResult: &v1alpha1.SyncOperationResult{},
}
comparison, manifestInfo, err := s.CompareAppState(app)
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = err.Error()
return &opRes, manifestInfo
}
clst, err := s.db.GetCluster(context.Background(), app.Spec.Destination.Server)
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = err.Error()
return &opRes, manifestInfo
}
config := clst.RESTConfig()
opRes.SyncResult.Resources = make([]*v1alpha1.ResourceDetails, len(comparison.Resources))
liveObjs := make([]*unstructured.Unstructured, len(comparison.Resources))
targetObjs := make([]*unstructured.Unstructured, len(comparison.Resources))
// First perform a `kubectl apply --dry-run` against all the manifests. This will detect most
// (but not all) validation issues with the users' manifests (e.g. will detect syntax issues,
// but will not not detect if they are mutating immutable fields). If anything fails, we will
// refuse to perform the sync.
dryRunSuccessful := true
for i, resourceState := range comparison.Resources {
liveObj, err := resourceState.LiveObject()
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = fmt.Sprintf("Failed to unmarshal live object: %v", err)
return &opRes, manifestInfo
}
targetObj, err := resourceState.TargetObject()
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = fmt.Sprintf("Failed to unmarshal target object: %v", err)
return &opRes, manifestInfo
}
liveObjs[i] = liveObj
targetObjs[i] = targetObj
resDetails, successful := syncObject(config, app.Spec.Destination.Namespace, targetObj, liveObj, prune, true)
if !successful {
dryRunSuccessful = false
}
opRes.SyncResult.Resources[i] = &resDetails
}
if !dryRunSuccessful {
opRes.Phase = v1alpha1.OperationFailed
opRes.Message = "one or more objects failed to apply (dry run)"
return &opRes, manifestInfo
}
if dryRun {
opRes.Phase = v1alpha1.OperationSucceeded
opRes.Message = "successfully synced (dry run)"
return &opRes, manifestInfo
}
// If we get here, all objects passed their dry-run, so we are now ready to actually perform the
// `kubectl apply`. Loop through the resources again, this time without dry-run.
syncSuccessful := true
for i := range comparison.Resources {
resDetails, successful := syncObject(config, app.Spec.Destination.Namespace, targetObjs[i], liveObjs[i], prune, false)
if !successful {
syncSuccessful = false
}
opRes.SyncResult.Resources[i] = &resDetails
}
if !syncSuccessful {
opRes.Message = "one or more objects failed to apply"
opRes.Phase = v1alpha1.OperationFailed
} else {
opRes.Message = "successfully synced"
opRes.Phase = v1alpha1.OperationSucceeded
}
return &opRes, manifestInfo
}
// syncObject performs a sync of a single resource
func syncObject(config *rest.Config, namespace string, targetObj, liveObj *unstructured.Unstructured, prune, dryRun bool) (v1alpha1.ResourceDetails, bool) {
obj := targetObj
if obj == nil {
obj = liveObj
}
resDetails := v1alpha1.ResourceDetails{
Name: obj.GetName(),
Kind: obj.GetKind(),
Namespace: namespace,
}
needsDelete := targetObj == nil
successful := true
if needsDelete {
if prune {
if dryRun {
resDetails.Message = "pruned (dry run)"
} else {
err := kubeutil.DeleteResource(config, liveObj, namespace)
if err != nil {
resDetails.Message = err.Error()
successful = false
} else {
resDetails.Message = "pruned"
}
}
} else {
resDetails.Message = "ignored (requires pruning)"
}
} else {
message, err := kube.ApplyResource(config, targetObj, namespace, dryRun)
if err != nil {
resDetails.Message = err.Error()
successful = false
} else {
resDetails.Message = message
}
}
return resDetails, successful
}
// NewAppStateManager creates new instance of Ksonnet app comparator
func NewAppStateManager(
db db.ArgoDB,
appclientset appclientset.Interface,
repoClientset apiclient.Clientset,
repoClientset reposerver.Clientset,
namespace string,
kubectl kubeutil.Kubectl,
settingsMgr *settings.SettingsManager,
liveStateCache statecache.LiveStateCache,
projInformer cache.SharedIndexInformer,
metricsServer *metrics.MetricsServer,
) AppStateManager {
return &appStateManager{
liveStateCache: liveStateCache,
db: db,
appclientset: appclientset,
kubectl: kubectl,
repoClientset: repoClientset,
namespace: namespace,
settingsMgr: settingsMgr,
projInformer: projInformer,
metricsServer: metricsServer,
return &KsonnetAppStateManager{
db: db,
appclientset: appclientset,
repoClientset: repoClientset,
namespace: namespace,
}
}

View File

@@ -1,393 +0,0 @@
package controller
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/kube"
)
// TestCompareAppStateEmpty tests comparison when both git and live have no objects
func TestCompareAppStateEmpty(t *testing.T) {
app := newFakeApp()
data := fakeData{
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Len(t, compRes.resources, 0)
assert.Len(t, compRes.managedResources, 0)
assert.Len(t, app.Status.Conditions, 0)
}
// TestCompareAppStateMissing tests when there is a manifest defined in the repo which doesn't exist in live
func TestCompareAppStateMissing(t *testing.T) {
app := newFakeApp()
data := fakeData{
apps: []runtime.Object{app},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{test.PodManifest},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
assert.Len(t, compRes.resources, 1)
assert.Len(t, compRes.managedResources, 1)
assert.Len(t, app.Status.Conditions, 0)
}
// TestCompareAppStateExtra tests when there is an extra object in live but not defined in git
func TestCompareAppStateExtra(t *testing.T) {
pod := test.NewPod()
pod.SetNamespace(test.FakeDestNamespace)
app := newFakeApp()
key := kube.ResourceKey{Group: "", Kind: "Pod", Namespace: test.FakeDestNamespace, Name: app.Name}
data := fakeData{
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
key: pod,
},
}
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
assert.Equal(t, 1, len(compRes.managedResources))
assert.Equal(t, 0, len(app.Status.Conditions))
}
// TestCompareAppStateHook checks that hooks are detected during manifest generation, and not
// considered as part of resources when assessing Synced status
func TestCompareAppStateHook(t *testing.T) {
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
podBytes, _ := json.Marshal(pod)
app := newFakeApp()
data := fakeData{
apps: []runtime.Object{app},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{string(podBytes)},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 0, len(compRes.resources))
assert.Equal(t, 0, len(compRes.managedResources))
assert.Equal(t, 1, len(compRes.hooks))
assert.Equal(t, 0, len(app.Status.Conditions))
}
// checks that ignore resources are detected, but excluded from status
func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationCompareOptions: "IgnoreExtraneous"})
app := newFakeApp()
data := fakeData{
apps: []runtime.Object{app},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Len(t, compRes.resources, 0)
assert.Len(t, compRes.managedResources, 0)
assert.Len(t, app.Status.Conditions, 0)
}
// TestCompareAppStateExtraHook tests when there is an extra _hook_ object in live but not defined in git
func TestCompareAppStateExtraHook(t *testing.T) {
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
pod.SetNamespace(test.FakeDestNamespace)
app := newFakeApp()
key := kube.ResourceKey{Group: "", Kind: "Pod", Namespace: test.FakeDestNamespace, Name: app.Name}
data := fakeData{
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
key: pod,
},
}
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
assert.Equal(t, 1, len(compRes.managedResources))
assert.Equal(t, 0, len(compRes.hooks))
assert.Equal(t, 0, len(app.Status.Conditions))
}
func toJSON(t *testing.T, obj *unstructured.Unstructured) string {
data, err := json.Marshal(obj)
assert.NoError(t, err)
return string(data)
}
func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
obj1 := test.NewPod()
obj1.SetNamespace(test.FakeDestNamespace)
obj2 := test.NewPod()
obj3 := test.NewPod()
obj3.SetNamespace("kube-system")
app := newFakeApp()
data := fakeData{
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{toJSON(t, obj1), toJSON(t, obj2), toJSON(t, obj3)},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(obj1): obj1,
kube.GetResourceKey(obj3): obj3,
},
}
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.NotNil(t, compRes)
assert.Contains(t, app.Status.Conditions, argoappv1.ApplicationCondition{
Message: "Resource /Pod/fake-dest-ns/my-pod appeared 2 times among application resources.",
Type: argoappv1.ApplicationConditionRepeatedResourceWarning,
})
assert.Equal(t, 2, len(compRes.resources))
}
var defaultProj = argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: test.FakeArgoCDNamespace,
},
Spec: argoappv1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []argoappv1.ApplicationDestination{
{
Server: "*",
Namespace: "*",
},
},
},
}
func TestSetHealth(t *testing.T) {
app := newFakeApp()
deployment := kube.MustToUnstructured(&v1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1beta1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
Namespace: "default",
},
})
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(deployment): deployment,
},
})
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.Equal(t, compRes.healthStatus.Status, argoappv1.HealthStatusHealthy)
}
func TestSetHealthSelfReferencedApp(t *testing.T) {
app := newFakeApp()
unstructuredApp := kube.MustToUnstructured(app)
deployment := kube.MustToUnstructured(&v1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1beta1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
Namespace: "default",
},
})
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(deployment): deployment,
kube.GetResourceKey(unstructuredApp): unstructuredApp,
},
})
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.Equal(t, compRes.healthStatus.Status, argoappv1.HealthStatusHealthy)
}
func TestSetManagedResourcesWithOrphanedResources(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
app := newFakeApp()
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, proj},
namespacedResources: map[kube.ResourceKey]namespacedResource{
kube.NewResourceKey("apps", kube.DeploymentKind, app.Namespace, "guestbook"): {
ResourceNode: argoappv1.ResourceNode{
ResourceRef: argoappv1.ResourceRef{Kind: kube.DeploymentKind, Name: "guestbook", Namespace: app.Namespace},
},
AppName: "",
},
},
})
tree, err := ctrl.setAppManagedResources(app, &comparisonResult{managedResources: make([]managedResource, 0)})
assert.NoError(t, err)
assert.Equal(t, len(tree.OrphanedNodes), 1)
assert.Equal(t, "guestbook", tree.OrphanedNodes[0].Name)
assert.Equal(t, app.Namespace, tree.OrphanedNodes[0].Namespace)
}
func TestSetManagedResourcesWithResourcesOfAnotherApp(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
app1 := newFakeApp()
app1.Name = "app1"
app2 := newFakeApp()
app2.Name = "app2"
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app1, app2, proj},
namespacedResources: map[kube.ResourceKey]namespacedResource{
kube.NewResourceKey("apps", kube.DeploymentKind, app2.Namespace, "guestbook"): {
ResourceNode: argoappv1.ResourceNode{
ResourceRef: argoappv1.ResourceRef{Kind: kube.DeploymentKind, Name: "guestbook", Namespace: app2.Namespace},
},
AppName: "app2",
},
},
})
tree, err := ctrl.setAppManagedResources(app1, &comparisonResult{managedResources: make([]managedResource, 0)})
assert.NoError(t, err)
assert.Equal(t, len(tree.OrphanedNodes), 0)
}
func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
app := newFakeApp()
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, proj},
configMapData: map[string]string{
"resource.customizations": "invalid setting",
},
})
compRes := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
assert.Equal(t, argoappv1.HealthStatusUnknown, compRes.healthStatus.Status)
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
}
func TestSetManagedResourcesKnownOrphanedResourceExceptions(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
app := newFakeApp()
app.Namespace = "default"
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, proj},
namespacedResources: map[kube.ResourceKey]namespacedResource{
kube.NewResourceKey("apps", kube.DeploymentKind, app.Namespace, "guestbook"): {
ResourceNode: argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Group: "apps", Kind: kube.DeploymentKind, Name: "guestbook", Namespace: app.Namespace}},
},
kube.NewResourceKey("", kube.ServiceAccountKind, app.Namespace, "default"): {
ResourceNode: argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Kind: kube.ServiceAccountKind, Name: "default", Namespace: app.Namespace}},
},
kube.NewResourceKey("", kube.ServiceKind, app.Namespace, "kubernetes"): {
ResourceNode: argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Kind: kube.ServiceAccountKind, Name: "kubernetes", Namespace: app.Namespace}},
},
},
})
tree, err := ctrl.setAppManagedResources(app, &comparisonResult{managedResources: make([]managedResource, 0)})
assert.NoError(t, err)
assert.Len(t, tree.OrphanedNodes, 1)
assert.Equal(t, "guestbook", tree.OrphanedNodes[0].Name)
}
func Test_comparisonResult_obs(t *testing.T) {
assert.Len(t, (&comparisonResult{}).targetObjs(), 0)
assert.Len(t, (&comparisonResult{managedResources: []managedResource{{}}}).targetObjs(), 0)
assert.Len(t, (&comparisonResult{managedResources: []managedResource{{Target: test.NewPod()}}}).targetObjs(), 1)
assert.Len(t, (&comparisonResult{hooks: []*unstructured.Unstructured{{}}}).targetObjs(), 1)
}

View File

@@ -1,808 +0,0 @@
package controller
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
log "github.com/sirupsen/logrus"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller/metrics"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
listersv1alpha1 "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/util/argo"
"github.com/argoproj/argo-cd/util/health"
"github.com/argoproj/argo-cd/util/hook"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/rand"
"github.com/argoproj/argo-cd/util/resource"
)
const (
crdReadinessTimeout = time.Duration(3) * time.Second
)
var syncIdPrefix uint64 = 0
type syncContext struct {
resourceOverrides map[string]v1alpha1.ResourceOverride
appName string
proj *v1alpha1.AppProject
compareResult *comparisonResult
config *rest.Config
dynamicIf dynamic.Interface
disco discovery.DiscoveryInterface
extensionsclientset *clientset.Clientset
kubectl kube.Kubectl
namespace string
server string
syncOp *v1alpha1.SyncOperation
syncRes *v1alpha1.SyncOperationResult
syncResources []v1alpha1.SyncOperationResource
opState *v1alpha1.OperationState
log *log.Entry
// lock to protect concurrent updates of the result list
lock sync.Mutex
}
func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState) {
// Sync requests might be requested with ambiguous revisions (e.g. master, HEAD, v1.2.3).
// This can change meaning when resuming operations (e.g a hook sync). After calculating a
// concrete git commit SHA, the SHA is remembered in the status.operationState.syncResult field.
// This ensures that when resuming an operation, we sync to the same revision that we initially
// started with.
var revision string
var syncOp v1alpha1.SyncOperation
var syncRes *v1alpha1.SyncOperationResult
var syncResources []v1alpha1.SyncOperationResource
var source v1alpha1.ApplicationSource
if state.Operation.Sync == nil {
state.Phase = v1alpha1.OperationFailed
state.Message = "Invalid operation request: no operation specified"
return
}
syncOp = *state.Operation.Sync
if syncOp.Source == nil {
// normal sync case (where source is taken from app.spec.source)
source = app.Spec.Source
} else {
// rollback case
source = *state.Operation.Sync.Source
}
syncResources = syncOp.Resources
if state.SyncResult != nil {
syncRes = state.SyncResult
revision = state.SyncResult.Revision
} else {
syncRes = &v1alpha1.SyncOperationResult{}
// status.operationState.syncResult.source. must be set properly since auto-sync relies
// on this information to decide if it should sync (if source is different than the last
// sync attempt)
syncRes.Source = source
state.SyncResult = syncRes
}
if revision == "" {
// if we get here, it means we did not remember a commit SHA which we should be syncing to.
// This typically indicates we are just about to begin a brand new sync/rollback operation.
// Take the value in the requested operation. We will resolve this to a SHA later.
revision = syncOp.Revision
}
compareResult := m.CompareAppState(app, revision, source, false, syncOp.Manifests)
// If there are any comparison or spec errors error conditions do not perform the operation
if errConditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{
v1alpha1.ApplicationConditionComparisonError: true,
v1alpha1.ApplicationConditionInvalidSpecError: true,
}); len(errConditions) > 0 {
state.Phase = v1alpha1.OperationError
state.Message = argo.FormatAppConditions(errConditions)
return
}
// We now have a concrete commit SHA. Save this in the sync result revision so that we remember
// what we should be syncing to when resuming operations.
syncRes.Revision = compareResult.syncStatus.Revision
clst, err := m.db.GetCluster(context.Background(), app.Spec.Destination.Server)
if err != nil {
state.Phase = v1alpha1.OperationError
state.Message = err.Error()
return
}
restConfig := metrics.AddMetricsTransportWrapper(m.metricsServer, app, clst.RESTConfig())
dynamicIf, err := dynamic.NewForConfig(restConfig)
if err != nil {
state.Phase = v1alpha1.OperationError
state.Message = fmt.Sprintf("Failed to initialize dynamic client: %v", err)
return
}
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
state.Phase = v1alpha1.OperationError
state.Message = fmt.Sprintf("Failed to initialize discovery client: %v", err)
return
}
extensionsclientset, err := clientset.NewForConfig(restConfig)
if err != nil {
state.Phase = v1alpha1.OperationError
state.Message = fmt.Sprintf("Failed to initialize extensions client: %v", err)
return
}
proj, err := argo.GetAppProject(&app.Spec, listersv1alpha1.NewAppProjectLister(m.projInformer.GetIndexer()), m.namespace)
if err != nil {
state.Phase = v1alpha1.OperationError
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
return
}
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
if err != nil {
state.Phase = v1alpha1.OperationError
state.Message = fmt.Sprintf("Failed to load resource overrides: %v", err)
return
}
atomic.AddUint64(&syncIdPrefix, 1)
syncId := fmt.Sprintf("%05d-%s", syncIdPrefix, rand.RandString(5))
syncCtx := syncContext{
resourceOverrides: resourceOverrides,
appName: app.Name,
proj: proj,
compareResult: compareResult,
config: restConfig,
dynamicIf: dynamicIf,
disco: disco,
extensionsclientset: extensionsclientset,
kubectl: m.kubectl,
namespace: app.Spec.Destination.Namespace,
server: app.Spec.Destination.Server,
syncOp: &syncOp,
syncRes: syncRes,
syncResources: syncResources,
opState: state,
log: log.WithFields(log.Fields{"application": app.Name, "syncId": syncId}),
}
start := time.Now()
if state.Phase == v1alpha1.OperationTerminating {
syncCtx.terminate()
} else {
syncCtx.sync()
}
syncCtx.log.WithField("duration", time.Since(start)).Info("sync/terminate complete")
if !syncOp.DryRun && !syncCtx.isSelectiveSync() && syncCtx.opState.Phase.Successful() {
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source)
if err != nil {
syncCtx.setOperationPhase(v1alpha1.OperationError, fmt.Sprintf("failed to record sync to history: %v", err))
}
}
}
// sync has performs the actual apply or hook based sync
func (sc *syncContext) sync() {
sc.log.WithFields(log.Fields{"isSelectiveSync": sc.isSelectiveSync(), "skipHooks": sc.skipHooks(), "started": sc.started()}).Info("syncing")
tasks, ok := sc.getSyncTasks()
if !ok {
sc.setOperationPhase(v1alpha1.OperationFailed, "one or more synchronization tasks are not valid")
return
}
sc.log.WithFields(log.Fields{"tasks": tasks, "isSelectiveSync": sc.isSelectiveSync()}).Info("tasks")
// Perform a `kubectl apply --dry-run` against all the manifests. This will detect most (but
// not all) validation issues with the user's manifests (e.g. will detect syntax issues, but
// will not not detect if they are mutating immutable fields). If anything fails, we will refuse
// to perform the sync. we only wish to do this once per operation, performing additional dry-runs
// is harmless, but redundant. The indicator we use to detect if we have already performed
// the dry-run for this operation, is if the resource or hook list is empty.
if !sc.started() {
sc.log.Debug("dry-run")
if sc.runTasks(tasks, true) == failed {
sc.setOperationPhase(v1alpha1.OperationFailed, "one or more objects failed to apply (dry run)")
return
}
}
// update status of any tasks that are running, note that this must exclude pruning tasks
for _, task := range tasks.Filter(func(t *syncTask) bool {
// just occasionally, you can be running yet not have a live resource
return t.running() && t.liveObj != nil
}) {
if task.isHook() {
// update the hook's result
operationState, message := getOperationPhase(task.liveObj)
sc.setResourceResult(task, "", operationState, message)
// maybe delete the hook
if task.needsDeleting() {
err := sc.deleteResource(task)
if err != nil && !errors.IsNotFound(err) {
sc.setResourceResult(task, "", v1alpha1.OperationError, fmt.Sprintf("failed to delete resource: %v", err))
}
}
} else {
// this must be calculated on the live object
healthStatus, err := health.GetResourceHealth(task.liveObj, sc.resourceOverrides)
if err == nil {
log.WithFields(log.Fields{"task": task, "healthStatus": healthStatus}).Debug("attempting to update health of running task")
if healthStatus == nil {
// some objects (e.g. secret) do not have health, and they automatically success
sc.setResourceResult(task, task.syncStatus, v1alpha1.OperationSucceeded, task.message)
} else {
switch healthStatus.Status {
case v1alpha1.HealthStatusHealthy:
sc.setResourceResult(task, task.syncStatus, v1alpha1.OperationSucceeded, healthStatus.Message)
case v1alpha1.HealthStatusDegraded:
sc.setResourceResult(task, task.syncStatus, v1alpha1.OperationFailed, healthStatus.Message)
}
}
}
}
}
// if (a) we are multi-step and we have any running tasks,
// or (b) there are any running hooks,
// then wait...
multiStep := tasks.multiStep()
if tasks.Any(func(t *syncTask) bool { return (multiStep || t.isHook()) && t.running() }) {
sc.setOperationPhase(v1alpha1.OperationRunning, "one or more tasks are running")
return
}
// syncFailTasks only run during failure, so separate them from regular tasks
syncFailTasks, tasks := tasks.Split(func(t *syncTask) bool { return t.phase == v1alpha1.SyncPhaseSyncFail })
// if there are any completed but unsuccessful tasks, sync is a failure.
if tasks.Any(func(t *syncTask) bool { return t.completed() && !t.successful() }) {
sc.setOperationFailed(syncFailTasks, "one or more synchronization tasks completed unsuccessfully")
return
}
sc.log.WithFields(log.Fields{"tasks": tasks}).Debug("filtering out non-pending tasks")
// remove tasks that are completed, we can assume that there are no running tasks
tasks = tasks.Filter(func(t *syncTask) bool { return t.pending() })
// If no sync tasks were generated (e.g., in case all application manifests have been removed),
// the sync operation is successful.
if len(tasks) == 0 {
sc.setOperationPhase(v1alpha1.OperationSucceeded, "successfully synced (no more tasks)")
return
}
// remove any tasks not in this wave
phase := tasks.phase()
wave := tasks.wave()
// if it is the last phase/wave and the only remaining tasks are non-hooks, the we are successful
// EVEN if those objects subsequently degraded
// This handles the common case where neither hooks or waves are used and a sync equates to simply an (asynchronous) kubectl apply of manifests, which succeeds immediately.
complete := !tasks.Any(func(t *syncTask) bool { return t.phase != phase || wave != t.wave() || t.isHook() })
sc.log.WithFields(log.Fields{"phase": phase, "wave": wave, "tasks": tasks, "syncFailTasks": syncFailTasks}).Debug("filtering tasks in correct phase and wave")
tasks = tasks.Filter(func(t *syncTask) bool { return t.phase == phase && t.wave() == wave })
sc.setOperationPhase(v1alpha1.OperationRunning, "one or more tasks are running")
sc.log.WithFields(log.Fields{"tasks": tasks}).Debug("wet-run")
runState := sc.runTasks(tasks, false)
switch runState {
case failed:
sc.setOperationFailed(syncFailTasks, "one or more objects failed to apply")
case successful:
if complete {
sc.setOperationPhase(v1alpha1.OperationSucceeded, "successfully synced (all tasks run)")
}
}
}
func (sc *syncContext) setOperationFailed(syncFailTasks syncTasks, message string) {
if len(syncFailTasks) > 0 {
// if all the failure hooks are completed, don't run them again, and mark the sync as failed
if syncFailTasks.All(func(task *syncTask) bool { return task.completed() }) {
sc.setOperationPhase(v1alpha1.OperationFailed, message)
return
}
// otherwise, we need to start the failure hooks, and then return without setting
// the phase, so we make sure we have at least one more sync
sc.log.WithFields(log.Fields{"syncFailTasks": syncFailTasks}).Debug("running sync fail tasks")
if sc.runTasks(syncFailTasks, false) == failed {
sc.setOperationPhase(v1alpha1.OperationFailed, message)
}
} else {
sc.setOperationPhase(v1alpha1.OperationFailed, message)
}
}
func (sc *syncContext) started() bool {
return len(sc.syncRes.Resources) > 0
}
func (sc *syncContext) isSelectiveSync() bool {
// we've selected no resources
if sc.syncResources == nil {
return false
}
// map both lists into string
var a []string
for _, r := range sc.compareResult.resources {
if !r.Hook {
a = append(a, fmt.Sprintf("%s:%s:%s", r.Group, r.Kind, r.Name))
}
}
sort.Strings(a)
var b []string
for _, r := range sc.syncResources {
b = append(b, fmt.Sprintf("%s:%s:%s", r.Group, r.Kind, r.Name))
}
sort.Strings(b)
return !reflect.DeepEqual(a, b)
}
// this essentially enforces the old "apply" behaviour
func (sc *syncContext) skipHooks() bool {
// All objects passed a `kubectl apply --dry-run`, so we are now ready to actually perform the sync.
// default sync strategy to hook if no strategy
return sc.syncOp.IsApplyStrategy() || sc.isSelectiveSync()
}
func (sc *syncContext) containsResource(resourceState managedResource) bool {
return !sc.isSelectiveSync() ||
(resourceState.Live != nil && argo.ContainsSyncResource(resourceState.Live.GetName(), resourceState.Live.GroupVersionKind(), sc.syncResources)) ||
(resourceState.Target != nil && argo.ContainsSyncResource(resourceState.Target.GetName(), resourceState.Target.GroupVersionKind(), sc.syncResources))
}
// generates the list of sync tasks we will be performing during this sync.
func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {
resourceTasks := syncTasks{}
successful = true
for _, resource := range sc.compareResult.managedResources {
if !sc.containsResource(resource) {
sc.log.WithFields(log.Fields{"group": resource.Group, "kind": resource.Kind, "name": resource.Name}).
Debug("skipping")
continue
}
obj := obj(resource.Target, resource.Live)
// this creates garbage tasks
if hook.IsHook(obj) {
sc.log.WithFields(log.Fields{"group": obj.GroupVersionKind().Group, "kind": obj.GetKind(), "namespace": obj.GetNamespace(), "name": obj.GetName()}).
Debug("skipping hook")
continue
}
for _, phase := range syncPhases(obj) {
resourceTasks = append(resourceTasks, &syncTask{phase: phase, targetObj: resource.Target, liveObj: resource.Live})
}
}
sc.log.WithFields(log.Fields{"resourceTasks": resourceTasks}).Debug("tasks from managed resources")
hookTasks := syncTasks{}
if !sc.skipHooks() {
for _, obj := range sc.compareResult.hooks {
for _, phase := range syncPhases(obj) {
// Hook resources names are deterministic, whether they are defined by the user (metadata.name),
// or formulated at the time of the operation (metadata.generateName). If user specifies
// metadata.generateName, then we will generate a formulated metadata.name before submission.
targetObj := obj.DeepCopy()
if targetObj.GetName() == "" {
postfix := strings.ToLower(fmt.Sprintf("%s-%s-%d", sc.syncRes.Revision[0:7], phase, sc.opState.StartedAt.UTC().Unix()))
generateName := obj.GetGenerateName()
targetObj.SetName(fmt.Sprintf("%s%s", generateName, postfix))
}
hookTasks = append(hookTasks, &syncTask{phase: phase, targetObj: targetObj})
}
}
}
sc.log.WithFields(log.Fields{"hookTasks": hookTasks}).Debug("tasks from hooks")
tasks := resourceTasks
tasks = append(tasks, hookTasks...)
// enrich target objects with the namespace
for _, task := range tasks {
if task.targetObj == nil {
continue
}
if task.targetObj.GetNamespace() == "" {
// If target object's namespace is empty, we set namespace in the object. We do
// this even though it might be a cluster-scoped resource. This prevents any
// possibility of the resource from unintentionally becoming created in the
// namespace during the `kubectl apply`
task.targetObj = task.targetObj.DeepCopy()
task.targetObj.SetNamespace(sc.namespace)
}
}
// enrich task with live obj
for _, task := range tasks {
if task.targetObj == nil || task.liveObj != nil {
continue
}
task.liveObj = sc.liveObj(task.targetObj)
}
// enrich tasks with the result
for _, task := range tasks {
_, result := sc.syncRes.Resources.Find(task.group(), task.kind(), task.namespace(), task.name(), task.phase)
if result != nil {
task.syncStatus = result.Status
task.operationState = result.HookPhase
task.message = result.Message
}
}
// check permissions
for _, task := range tasks {
serverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, task.groupVersionKind())
if err != nil {
// Special case for custom resources: if CRD is not yet known by the K8s API server,
// skip verification during `kubectl apply --dry-run` since we expect the CRD
// to be created during app synchronization.
if apierr.IsNotFound(err) && sc.hasCRDOfGroupKind(task.group(), task.kind()) {
sc.log.WithFields(log.Fields{"task": task}).Debug("skip dry-run for custom resource")
task.skipDryRun = true
} else {
sc.setResourceResult(task, v1alpha1.ResultCodeSyncFailed, "", err.Error())
successful = false
}
} else {
if !sc.proj.IsResourcePermitted(metav1.GroupKind{Group: task.group(), Kind: task.kind()}, serverRes.Namespaced) {
sc.setResourceResult(task, v1alpha1.ResultCodeSyncFailed, "", fmt.Sprintf("Resource %s:%s is not permitted in project %s.", task.group(), task.kind(), sc.proj.Name))
successful = false
}
if serverRes.Namespaced && !sc.proj.IsDestinationPermitted(v1alpha1.ApplicationDestination{Namespace: task.namespace(), Server: sc.server}) {
sc.setResourceResult(task, v1alpha1.ResultCodeSyncFailed, "", fmt.Sprintf("namespace %v is not permitted in project '%s'", task.namespace(), sc.proj.Name))
successful = false
}
}
}
sort.Sort(tasks)
return tasks, successful
}
func obj(a, b *unstructured.Unstructured) *unstructured.Unstructured {
if a != nil {
return a
} else {
return b
}
}
func (sc *syncContext) liveObj(obj *unstructured.Unstructured) *unstructured.Unstructured {
for _, resource := range sc.compareResult.managedResources {
if resource.Group == obj.GroupVersionKind().Group &&
resource.Kind == obj.GetKind() &&
// cluster scoped objects will not have a namespace, even if the user has defined it
(resource.Namespace == "" || resource.Namespace == obj.GetNamespace()) &&
resource.Name == obj.GetName() {
return resource.Live
}
}
return nil
}
func (sc *syncContext) setOperationPhase(phase v1alpha1.OperationPhase, message string) {
if sc.opState.Phase != phase || sc.opState.Message != message {
sc.log.Infof("Updating operation state. phase: %s -> %s, message: '%s' -> '%s'", sc.opState.Phase, phase, sc.opState.Message, message)
}
sc.opState.Phase = phase
sc.opState.Message = message
}
// ensureCRDReady waits until specified CRD is ready (established condition is true). Method is best effort - it does not fail even if CRD is not ready without timeout.
func (sc *syncContext) ensureCRDReady(name string) {
_ = wait.PollImmediate(time.Duration(100)*time.Millisecond, crdReadinessTimeout, func() (bool, error) {
crd, err := sc.extensionsclientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, condition := range crd.Status.Conditions {
if condition.Type == v1beta1.Established {
return condition.Status == v1beta1.ConditionTrue, nil
}
}
return false, nil
})
}
// applyObject performs a `kubectl apply` of a single resource
func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun bool, force bool) (v1alpha1.ResultCode, string) {
validate := !resource.HasAnnotationOption(targetObj, common.AnnotationSyncOptions, "Validate=false")
message, err := sc.kubectl.ApplyResource(sc.config, targetObj, targetObj.GetNamespace(), dryRun, force, validate)
if err != nil {
return v1alpha1.ResultCodeSyncFailed, err.Error()
}
if kube.IsCRD(targetObj) && !dryRun {
sc.ensureCRDReady(targetObj.GetName())
}
return v1alpha1.ResultCodeSynced, message
}
// pruneObject deletes the object if both prune is true and dryRun is false. Otherwise appropriate message
func (sc *syncContext) pruneObject(liveObj *unstructured.Unstructured, prune, dryRun bool) (v1alpha1.ResultCode, string) {
if !prune {
return v1alpha1.ResultCodePruneSkipped, "ignored (requires pruning)"
} else if resource.HasAnnotationOption(liveObj, common.AnnotationSyncOptions, "Prune=false") {
return v1alpha1.ResultCodePruneSkipped, "ignored (no prune)"
} else {
if dryRun {
return v1alpha1.ResultCodePruned, "pruned (dry run)"
} else {
// Skip deletion if object is already marked for deletion, so we don't cause a resource update hotloop
deletionTimestamp := liveObj.GetDeletionTimestamp()
if deletionTimestamp == nil || deletionTimestamp.IsZero() {
err := sc.kubectl.DeleteResource(sc.config, liveObj.GroupVersionKind(), liveObj.GetName(), liveObj.GetNamespace(), false)
if err != nil {
return v1alpha1.ResultCodeSyncFailed, err.Error()
}
}
return v1alpha1.ResultCodePruned, "pruned"
}
}
}
func (sc *syncContext) hasCRDOfGroupKind(group string, kind string) bool {
for _, obj := range sc.compareResult.targetObjs() {
if kube.IsCRD(obj) {
crdGroup, ok, err := unstructured.NestedString(obj.Object, "spec", "group")
if err != nil || !ok {
continue
}
crdKind, ok, err := unstructured.NestedString(obj.Object, "spec", "names", "kind")
if err != nil || !ok {
continue
}
if group == crdGroup && crdKind == kind {
return true
}
}
}
return false
}
// terminate looks for any running jobs/workflow hooks and deletes the resource
func (sc *syncContext) terminate() {
terminateSuccessful := true
sc.log.Debug("terminating")
tasks, _ := sc.getSyncTasks()
for _, task := range tasks {
if !task.isHook() || !task.completed() {
continue
}
if isRunnable(task.groupVersionKind()) {
err := sc.deleteResource(task)
if err != nil {
sc.setResourceResult(task, "", v1alpha1.OperationFailed, fmt.Sprintf("Failed to delete: %v", err))
terminateSuccessful = false
} else {
sc.setResourceResult(task, "", v1alpha1.OperationSucceeded, fmt.Sprintf("Deleted"))
}
}
}
if terminateSuccessful {
sc.setOperationPhase(v1alpha1.OperationFailed, "Operation terminated")
} else {
sc.setOperationPhase(v1alpha1.OperationError, "Operation termination had errors")
}
}
func (sc *syncContext) deleteResource(task *syncTask) error {
sc.log.WithFields(log.Fields{"task": task}).Debug("deleting resource")
resIf, err := sc.getResourceIf(task)
if err != nil {
return err
}
propagationPolicy := metav1.DeletePropagationForeground
return resIf.Delete(task.name(), &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
}
func (sc *syncContext) getResourceIf(task *syncTask) (dynamic.ResourceInterface, error) {
apiResource, err := kube.ServerResourceForGroupVersionKind(sc.disco, task.groupVersionKind())
if err != nil {
return nil, err
}
res := kube.ToGroupVersionResource(task.groupVersionKind().GroupVersion().String(), apiResource)
resIf := kube.ToResourceInterface(sc.dynamicIf, apiResource, res, task.namespace())
return resIf, err
}
var operationPhases = map[v1alpha1.ResultCode]v1alpha1.OperationPhase{
v1alpha1.ResultCodeSynced: v1alpha1.OperationRunning,
v1alpha1.ResultCodeSyncFailed: v1alpha1.OperationFailed,
v1alpha1.ResultCodePruned: v1alpha1.OperationSucceeded,
v1alpha1.ResultCodePruneSkipped: v1alpha1.OperationSucceeded,
}
// tri-state
type runState = int
const (
successful = iota
pending
failed
)
func (sc *syncContext) runTasks(tasks syncTasks, dryRun bool) runState {
dryRun = dryRun || sc.syncOp.DryRun
sc.log.WithFields(log.Fields{"numTasks": len(tasks), "dryRun": dryRun}).Debug("running tasks")
runState := successful
var createTasks syncTasks
var pruneTasks syncTasks
for _, task := range tasks {
if task.isPrune() {
pruneTasks = append(pruneTasks, task)
} else {
createTasks = append(createTasks, task)
}
}
// prune first
{
var wg sync.WaitGroup
for _, task := range pruneTasks {
wg.Add(1)
go func(t *syncTask) {
defer wg.Done()
sc.log.WithFields(log.Fields{"dryRun": dryRun, "task": t}).Debug("pruning")
result, message := sc.pruneObject(t.liveObj, sc.syncOp.Prune, dryRun)
if result == v1alpha1.ResultCodeSyncFailed {
runState = failed
}
if !dryRun || result == v1alpha1.ResultCodeSyncFailed {
sc.setResourceResult(t, result, operationPhases[result], message)
}
}(task)
}
wg.Wait()
}
// delete anything that need deleting
if runState == successful && createTasks.Any(func(t *syncTask) bool { return t.needsDeleting() }) {
var wg sync.WaitGroup
for _, task := range createTasks.Filter(func(t *syncTask) bool { return t.needsDeleting() }) {
wg.Add(1)
go func(t *syncTask) {
defer wg.Done()
sc.log.WithFields(log.Fields{"dryRun": dryRun, "task": t}).Debug("deleting")
if !dryRun {
err := sc.deleteResource(t)
if err != nil {
// it is possible to get a race condition here, such that the resource does not exist when
// delete is requested, we treat this as a nop
if !apierr.IsNotFound(err) {
runState = failed
sc.setResourceResult(t, "", v1alpha1.OperationError, fmt.Sprintf("failed to delete resource: %v", err))
}
} else {
// if there is anything that needs deleting, we are at best now in pending and
// want to return and wait for sync to be invoked again
runState = pending
}
}
}(task)
}
wg.Wait()
}
// finally create resources
if runState == successful {
processCreateTasks := func(tasks syncTasks) {
var createWg sync.WaitGroup
for _, task := range tasks {
if dryRun && task.skipDryRun {
continue
}
createWg.Add(1)
go func(t *syncTask) {
defer createWg.Done()
sc.log.WithFields(log.Fields{"dryRun": dryRun, "task": t}).Debug("applying")
result, message := sc.applyObject(t.targetObj, dryRun, sc.syncOp.SyncStrategy.Force())
if result == v1alpha1.ResultCodeSyncFailed {
runState = failed
}
if !dryRun || result == v1alpha1.ResultCodeSyncFailed {
sc.setResourceResult(t, result, operationPhases[result], message)
}
}(task)
}
createWg.Wait()
}
var tasksGroup syncTasks
for _, task := range createTasks {
//Only wait if the type of the next task is different than the previous type
if len(tasksGroup) > 0 && tasksGroup[0].targetObj.GetKind() != task.kind() {
processCreateTasks(tasksGroup)
tasksGroup = syncTasks{task}
} else {
tasksGroup = append(tasksGroup, task)
}
}
if len(tasksGroup) > 0 {
processCreateTasks(tasksGroup)
}
}
return runState
}
// setResourceResult sets a resource details in the SyncResult.Resources list
func (sc *syncContext) setResourceResult(task *syncTask, syncStatus v1alpha1.ResultCode, operationState v1alpha1.OperationPhase, message string) {
task.syncStatus = syncStatus
task.operationState = operationState
// we always want to keep the latest message
if message != "" {
task.message = message
}
sc.lock.Lock()
defer sc.lock.Unlock()
i, existing := sc.syncRes.Resources.Find(task.group(), task.kind(), task.namespace(), task.name(), task.phase)
res := v1alpha1.ResourceResult{
Group: task.group(),
Version: task.version(),
Kind: task.kind(),
Namespace: task.namespace(),
Name: task.name(),
Status: task.syncStatus,
Message: task.message,
HookType: task.hookType(),
HookPhase: task.operationState,
SyncPhase: task.phase,
}
logCtx := sc.log.WithFields(log.Fields{"namespace": task.namespace(), "kind": task.kind(), "name": task.name(), "phase": task.phase})
if existing != nil {
// update existing value
if res.Status != existing.Status || res.HookPhase != existing.HookPhase || res.Message != existing.Message {
logCtx.Infof("updating resource result, status: '%s' -> '%s', phase '%s' -> '%s', message '%s' -> '%s'",
existing.Status, res.Status,
existing.HookPhase, res.HookPhase,
existing.Message, res.Message)
}
sc.syncRes.Resources[i] = &res
} else {
logCtx.Infof("adding resource result, status: '%s', phase: '%s', message: '%s'", res.Status, res.HookPhase, res.Message)
sc.syncRes.Resources = append(sc.syncRes.Resources, &res)
}
}

View File

@@ -1,120 +0,0 @@
package controller
import (
"fmt"
"github.com/argoproj/argo-cd/util/health"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/batch"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
)
// getOperationPhase returns a hook status from an _live_ unstructured object
func getOperationPhase(hook *unstructured.Unstructured) (operation v1alpha1.OperationPhase, message string) {
gvk := hook.GroupVersionKind()
if isBatchJob(gvk) {
return getStatusFromBatchJob(hook)
} else if isArgoWorkflow(gvk) {
return health.GetStatusFromArgoWorkflow(hook)
} else if isPod(gvk) {
return getStatusFromPod(hook)
} else {
return v1alpha1.OperationSucceeded, fmt.Sprintf("%s created", hook.GetName())
}
}
// isRunnable returns if the resource object is a runnable type which needs to be terminated
func isRunnable(gvk schema.GroupVersionKind) bool {
return isBatchJob(gvk) || isArgoWorkflow(gvk) || isPod(gvk)
}
func isBatchJob(gvk schema.GroupVersionKind) bool {
return gvk.Group == "batch" && gvk.Kind == "Job"
}
// TODO this is a copy-and-paste of health.getJobHealth(), refactor out?
func getStatusFromBatchJob(hook *unstructured.Unstructured) (operation v1alpha1.OperationPhase, message string) {
var job batch.Job
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &job)
if err != nil {
return v1alpha1.OperationError, err.Error()
}
failed := false
var failMsg string
complete := false
for _, condition := range job.Status.Conditions {
switch condition.Type {
case batch.JobFailed:
failed = true
complete = true
failMsg = condition.Message
case batch.JobComplete:
complete = true
message = condition.Message
}
}
if !complete {
return v1alpha1.OperationRunning, message
} else if failed {
return v1alpha1.OperationFailed, failMsg
} else {
return v1alpha1.OperationSucceeded, message
}
}
func isArgoWorkflow(gvk schema.GroupVersionKind) bool {
return gvk.Group == "argoproj.io" && gvk.Kind == "Workflow"
}
func isPod(gvk schema.GroupVersionKind) bool {
return gvk.Group == "" && gvk.Kind == "Pod"
}
// TODO - this is very similar to health.getPodHealth() should we use that instead?
func getStatusFromPod(hook *unstructured.Unstructured) (v1alpha1.OperationPhase, string) {
var pod apiv1.Pod
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &pod)
if err != nil {
return v1alpha1.OperationError, err.Error()
}
getFailMessage := func(ctr *apiv1.ContainerStatus) string {
if ctr.State.Terminated != nil {
if ctr.State.Terminated.Message != "" {
return ctr.State.Terminated.Message
}
if ctr.State.Terminated.Reason == "OOMKilled" {
return ctr.State.Terminated.Reason
}
if ctr.State.Terminated.ExitCode != 0 {
return fmt.Sprintf("container %q failed with exit code %d", ctr.Name, ctr.State.Terminated.ExitCode)
}
}
return ""
}
switch pod.Status.Phase {
case apiv1.PodPending, apiv1.PodRunning:
return v1alpha1.OperationRunning, ""
case apiv1.PodSucceeded:
return v1alpha1.OperationSucceeded, ""
case apiv1.PodFailed:
if pod.Status.Message != "" {
// Pod has a nice error message. Use that.
return v1alpha1.OperationFailed, pod.Status.Message
}
for _, ctr := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) {
if msg := getFailMessage(&ctr); msg != "" {
return v1alpha1.OperationFailed, msg
}
}
return v1alpha1.OperationFailed, ""
case apiv1.PodUnknown:
return v1alpha1.OperationError, ""
}
return v1alpha1.OperationRunning, ""
}

View File

@@ -1,29 +0,0 @@
package controller
import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/hook"
)
func syncPhases(obj *unstructured.Unstructured) []v1alpha1.SyncPhase {
if hook.Skip(obj) {
return nil
} else if hook.IsHook(obj) {
phasesMap := make(map[v1alpha1.SyncPhase]bool)
for _, hookType := range hook.Types(obj) {
switch hookType {
case v1alpha1.HookTypePreSync, v1alpha1.HookTypeSync, v1alpha1.HookTypePostSync, v1alpha1.HookTypeSyncFail:
phasesMap[v1alpha1.SyncPhase(hookType)] = true
}
}
var phases []v1alpha1.SyncPhase
for phase := range phasesMap {
phases = append(phases, phase)
}
return phases
} else {
return []v1alpha1.SyncPhase{v1alpha1.SyncPhaseSync}
}
}

View File

@@ -1,57 +0,0 @@
package controller
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/test"
)
func TestSyncPhaseNone(t *testing.T) {
assert.Equal(t, []SyncPhase{SyncPhaseSync}, syncPhases(&unstructured.Unstructured{}))
}
func TestSyncPhasePreSync(t *testing.T) {
assert.Equal(t, []SyncPhase{SyncPhasePreSync}, syncPhases(pod("PreSync")))
}
func TestSyncPhaseSync(t *testing.T) {
assert.Equal(t, []SyncPhase{SyncPhaseSync}, syncPhases(pod("Sync")))
}
func TestSyncPhaseSkip(t *testing.T) {
assert.Nil(t, syncPhases(pod("Skip")))
}
// garbage hooks are still hooks, but have no phases, because some user spelled something wrong
func TestSyncPhaseGarbage(t *testing.T) {
assert.Nil(t, syncPhases(pod("Garbage")))
}
func TestSyncPhasePost(t *testing.T) {
assert.Equal(t, []SyncPhase{SyncPhasePostSync}, syncPhases(pod("PostSync")))
}
func TestSyncPhaseFail(t *testing.T) {
assert.Equal(t, []SyncPhase{SyncPhaseSyncFail}, syncPhases(pod("SyncFail")))
}
func TestSyncPhaseTwoPhases(t *testing.T) {
assert.ElementsMatch(t, []SyncPhase{SyncPhasePreSync, SyncPhasePostSync}, syncPhases(pod("PreSync,PostSync")))
}
func TestSyncDuplicatedPhases(t *testing.T) {
assert.ElementsMatch(t, []SyncPhase{SyncPhasePreSync}, syncPhases(pod("PreSync,PreSync")))
assert.ElementsMatch(t, []SyncPhase{SyncPhasePreSync}, syncPhases(podWithHelmHook("pre-install,pre-upgrade")))
}
func pod(hookType string) *unstructured.Unstructured {
return test.Annotate(test.NewPod(), "argocd.argoproj.io/hook", hookType)
}
func podWithHelmHook(hookType string) *unstructured.Unstructured {
return test.Annotate(test.NewPod(), "helm.sh/hook", hookType)
}

View File

@@ -1,130 +0,0 @@
package controller
import (
"fmt"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/hook"
"github.com/argoproj/argo-cd/util/resource/syncwaves"
)
// syncTask holds the live and target object. At least one should be non-nil. A targetObj of nil
// indicates the live object needs to be pruned. A liveObj of nil indicates the object has yet to
// be deployed
type syncTask struct {
phase v1alpha1.SyncPhase
liveObj *unstructured.Unstructured
targetObj *unstructured.Unstructured
skipDryRun bool
syncStatus v1alpha1.ResultCode
operationState v1alpha1.OperationPhase
message string
}
func ternary(val bool, a, b string) string {
if val {
return a
} else {
return b
}
}
func (t *syncTask) String() string {
return fmt.Sprintf("%s/%d %s %s/%s:%s/%s %s->%s (%s,%s,%s)",
t.phase, t.wave(),
ternary(t.isHook(), "hook", "resource"), t.group(), t.kind(), t.namespace(), t.name(),
ternary(t.liveObj != nil, "obj", "nil"), ternary(t.targetObj != nil, "obj", "nil"),
t.syncStatus, t.operationState, t.message,
)
}
func (t *syncTask) isPrune() bool {
return t.targetObj == nil
}
// return the target object (if this exists) otherwise the live object
// some caution - often you explicitly want the live object not the target object
func (t *syncTask) obj() *unstructured.Unstructured {
return obj(t.targetObj, t.liveObj)
}
func (t *syncTask) wave() int {
return syncwaves.Wave(t.obj())
}
func (t *syncTask) isHook() bool {
return hook.IsHook(t.obj())
}
func (t *syncTask) group() string {
return t.groupVersionKind().Group
}
func (t *syncTask) kind() string {
return t.groupVersionKind().Kind
}
func (t *syncTask) version() string {
return t.groupVersionKind().Version
}
func (t *syncTask) groupVersionKind() schema.GroupVersionKind {
return t.obj().GroupVersionKind()
}
func (t *syncTask) name() string {
return t.obj().GetName()
}
func (t *syncTask) namespace() string {
return t.obj().GetNamespace()
}
func (t *syncTask) pending() bool {
return t.operationState == ""
}
func (t *syncTask) running() bool {
return t.operationState.Running()
}
func (t *syncTask) completed() bool {
return t.operationState.Completed()
}
func (t *syncTask) successful() bool {
return t.operationState.Successful()
}
func (t *syncTask) failed() bool {
return t.operationState.Failed()
}
func (t *syncTask) hookType() v1alpha1.HookType {
if t.isHook() {
return v1alpha1.HookType(t.phase)
} else {
return ""
}
}
func (t *syncTask) hasHookDeletePolicy(policy v1alpha1.HookDeletePolicy) bool {
// cannot have a policy if it is not a hook, it is meaningless
if !t.isHook() {
return false
}
for _, p := range hook.DeletePolicies(t.obj()) {
if p == policy {
return true
}
}
return false
}
func (t *syncTask) needsDeleting() bool {
return t.liveObj != nil && (t.pending() && t.hasHookDeletePolicy(v1alpha1.HookDeletePolicyBeforeHookCreation) ||
t.successful() && t.hasHookDeletePolicy(v1alpha1.HookDeletePolicyHookSucceeded) ||
t.failed() && t.hasHookDeletePolicy(v1alpha1.HookDeletePolicyHookFailed))
}

View File

@@ -1,66 +0,0 @@
package controller
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
. "github.com/argoproj/argo-cd/test"
)
func Test_syncTask_hookType(t *testing.T) {
type fields struct {
phase SyncPhase
liveObj *unstructured.Unstructured
}
tests := []struct {
name string
fields fields
want HookType
}{
{"Empty", fields{SyncPhaseSync, NewPod()}, ""},
{"PreSyncHook", fields{SyncPhasePreSync, NewHook(HookTypePreSync)}, HookTypePreSync},
{"SyncHook", fields{SyncPhaseSync, NewHook(HookTypeSync)}, HookTypeSync},
{"PostSyncHook", fields{SyncPhasePostSync, NewHook(HookTypePostSync)}, HookTypePostSync},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
task := &syncTask{
phase: tt.fields.phase,
liveObj: tt.fields.liveObj,
}
hookType := task.hookType()
assert.EqualValues(t, tt.want, hookType)
})
}
}
func Test_syncTask_hasHookDeletePolicy(t *testing.T) {
assert.False(t, (&syncTask{targetObj: NewPod()}).hasHookDeletePolicy(HookDeletePolicyBeforeHookCreation))
assert.False(t, (&syncTask{targetObj: NewPod()}).hasHookDeletePolicy(HookDeletePolicyHookSucceeded))
assert.False(t, (&syncTask{targetObj: NewPod()}).hasHookDeletePolicy(HookDeletePolicyHookFailed))
// must be hook
assert.False(t, (&syncTask{targetObj: Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).hasHookDeletePolicy(HookDeletePolicyBeforeHookCreation))
assert.True(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).hasHookDeletePolicy(HookDeletePolicyBeforeHookCreation))
assert.True(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookSucceeded")}).hasHookDeletePolicy(HookDeletePolicyHookSucceeded))
assert.True(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookFailed")}).hasHookDeletePolicy(HookDeletePolicyHookFailed))
}
func Test_syncTask_needsDeleting(t *testing.T) {
assert.False(t, (&syncTask{liveObj: NewPod()}).needsDeleting())
// must be hook
assert.False(t, (&syncTask{liveObj: Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).needsDeleting())
// no need to delete if no live obj
assert.False(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argoocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).needsDeleting())
assert.True(t, (&syncTask{liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).needsDeleting())
assert.True(t, (&syncTask{liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).needsDeleting())
assert.True(t, (&syncTask{operationState: OperationSucceeded, liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookSucceeded")}).needsDeleting())
assert.True(t, (&syncTask{operationState: OperationFailed, liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookFailed")}).needsDeleting())
}
func Test_syncTask_wave(t *testing.T) {
assert.Equal(t, 0, (&syncTask{targetObj: NewPod()}).wave())
assert.Equal(t, 1, (&syncTask{targetObj: Annotate(NewPod(), "argocd.argoproj.io/sync-wave", "1")}).wave())
}

View File

@@ -1,185 +0,0 @@
package controller
import (
"strings"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
)
// kindOrder represents the correct order of Kubernetes resources within a manifest
var syncPhaseOrder = map[v1alpha1.SyncPhase]int{
v1alpha1.SyncPhasePreSync: -1,
v1alpha1.SyncPhaseSync: 0,
v1alpha1.SyncPhasePostSync: 1,
v1alpha1.SyncPhaseSyncFail: 2,
}
// kindOrder represents the correct order of Kubernetes resources within a manifest
// https://github.com/helm/helm/blob/master/pkg/tiller/kind_sorter.go
var kindOrder = map[string]int{}
func init() {
kinds := []string{
"Namespace",
"ResourceQuota",
"LimitRange",
"PodSecurityPolicy",
"PodDisruptionBudget",
"Secret",
"ConfigMap",
"StorageClass",
"PersistentVolume",
"PersistentVolumeClaim",
"ServiceAccount",
"CustomResourceDefinition",
"ClusterRole",
"ClusterRoleBinding",
"Role",
"RoleBinding",
"Service",
"DaemonSet",
"Pod",
"ReplicationController",
"ReplicaSet",
"Deployment",
"StatefulSet",
"Job",
"CronJob",
"Ingress",
"APIService",
}
for i, kind := range kinds {
// make sure none of the above entries are zero, we need that for custom resources
kindOrder[kind] = i - len(kinds)
}
}
type syncTasks []*syncTask
func (s syncTasks) Len() int {
return len(s)
}
func (s syncTasks) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// order is
// 1. phase
// 2. wave
// 3. kind
// 4. name
func (s syncTasks) Less(i, j int) bool {
tA := s[i]
tB := s[j]
d := syncPhaseOrder[tA.phase] - syncPhaseOrder[tB.phase]
if d != 0 {
return d < 0
}
d = tA.wave() - tB.wave()
if d != 0 {
return d < 0
}
a := tA.obj()
b := tB.obj()
// we take advantage of the fact that if the kind is not in the kindOrder map,
// then it will return the default int value of zero, which is the highest value
d = kindOrder[a.GetKind()] - kindOrder[b.GetKind()]
if d != 0 {
return d < 0
}
return a.GetName() < b.GetName()
}
func (s syncTasks) Filter(predicate func(task *syncTask) bool) (tasks syncTasks) {
for _, task := range s {
if predicate(task) {
tasks = append(tasks, task)
}
}
return tasks
}
func (s syncTasks) Split(predicate func(task *syncTask) bool) (trueTasks, falseTasks syncTasks) {
for _, task := range s {
if predicate(task) {
trueTasks = append(trueTasks, task)
} else {
falseTasks = append(falseTasks, task)
}
}
return trueTasks, falseTasks
}
func (s syncTasks) All(predicate func(task *syncTask) bool) bool {
for _, task := range s {
if !predicate(task) {
return false
}
}
return true
}
func (s syncTasks) Any(predicate func(task *syncTask) bool) bool {
for _, task := range s {
if predicate(task) {
return true
}
}
return false
}
func (s syncTasks) Find(predicate func(task *syncTask) bool) *syncTask {
for _, task := range s {
if predicate(task) {
return task
}
}
return nil
}
func (s syncTasks) String() string {
var values []string
for _, task := range s {
values = append(values, task.String())
}
return "[" + strings.Join(values, ", ") + "]"
}
func (s syncTasks) phase() v1alpha1.SyncPhase {
if len(s) > 0 {
return s[0].phase
}
return ""
}
func (s syncTasks) wave() int {
if len(s) > 0 {
return s[0].wave()
}
return 0
}
func (s syncTasks) lastPhase() v1alpha1.SyncPhase {
if len(s) > 0 {
return s[len(s)-1].phase
}
return ""
}
func (s syncTasks) lastWave() int {
if len(s) > 0 {
return s[len(s)-1].wave()
}
return 0
}
func (s syncTasks) multiStep() bool {
return s.wave() != s.lastWave() || s.phase() != s.lastPhase()
}

View File

@@ -1,392 +0,0 @@
package controller
import (
"sort"
"testing"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/common"
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
. "github.com/argoproj/argo-cd/test"
)
func Test_syncTasks_kindOrder(t *testing.T) {
assert.Equal(t, -27, kindOrder["Namespace"])
assert.Equal(t, -1, kindOrder["APIService"])
assert.Equal(t, 0, kindOrder["MyCRD"])
}
func TestSortSyncTask(t *testing.T) {
sort.Sort(unsortedTasks)
assert.Equal(t, sortedTasks, unsortedTasks)
}
func TestAnySyncTasks(t *testing.T) {
res := unsortedTasks.Any(func(task *syncTask) bool {
return task.name() == "a"
})
assert.True(t, res)
res = unsortedTasks.Any(func(task *syncTask) bool {
return task.name() == "does-not-exist"
})
assert.False(t, res)
}
func TestAllSyncTasks(t *testing.T) {
res := unsortedTasks.All(func(task *syncTask) bool {
return task.name() != ""
})
assert.False(t, res)
res = unsortedTasks.All(func(task *syncTask) bool {
return task.name() == "a"
})
assert.False(t, res)
}
func TestSplitSyncTasks(t *testing.T) {
named, unnamed := sortedTasks.Split(func(task *syncTask) bool {
return task.name() != ""
})
assert.Equal(t, named, namedObjTasks)
assert.Equal(t, unnamed, unnamedTasks)
}
var unsortedTasks = syncTasks{
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Pod",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "PersistentVolume",
},
},
},
{
phase: SyncPhaseSyncFail, targetObj: &unstructured.Unstructured{},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"argocd.argoproj.io/sync-wave": "1",
},
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "b",
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "a",
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"argocd.argoproj.io/sync-wave": "-1",
},
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
},
},
},
{
phase: SyncPhasePreSync,
targetObj: &unstructured.Unstructured{},
},
{
phase: SyncPhasePostSync, targetObj: &unstructured.Unstructured{},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "ConfigMap",
},
},
},
}
var sortedTasks = syncTasks{
{
phase: SyncPhasePreSync,
targetObj: &unstructured.Unstructured{},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"argocd.argoproj.io/sync-wave": "-1",
},
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "ConfigMap",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "PersistentVolume",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Pod",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "a",
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "b",
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"argocd.argoproj.io/sync-wave": "1",
},
},
},
},
},
{
phase: SyncPhasePostSync,
targetObj: &unstructured.Unstructured{},
},
{
phase: SyncPhaseSyncFail,
targetObj: &unstructured.Unstructured{},
},
}
var namedObjTasks = syncTasks{
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "a",
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "b",
},
},
},
},
}
var unnamedTasks = syncTasks{
{
phase: SyncPhasePreSync,
targetObj: &unstructured.Unstructured{},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"argocd.argoproj.io/sync-wave": "-1",
},
},
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "ConfigMap",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "PersistentVolume",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Pod",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"argocd.argoproj.io/sync-wave": "1",
},
},
},
},
},
{
phase: SyncPhasePostSync,
targetObj: &unstructured.Unstructured{},
},
{
phase: SyncPhaseSyncFail,
targetObj: &unstructured.Unstructured{},
},
}
func Test_syncTasks_Filter(t *testing.T) {
tasks := syncTasks{{phase: SyncPhaseSync}, {phase: SyncPhasePostSync}}
assert.Equal(t, syncTasks{{phase: SyncPhaseSync}}, tasks.Filter(func(t *syncTask) bool {
return t.phase == SyncPhaseSync
}))
}
func TestSyncNamespaceAgainstCRD(t *testing.T) {
crd := &syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Workflow",
},
}}
namespace := &syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Namespace",
},
},
}
unsorted := syncTasks{crd, namespace}
sort.Sort(unsorted)
assert.Equal(t, syncTasks{namespace, crd}, unsorted)
}
func Test_syncTasks_multiStep(t *testing.T) {
t.Run("Single", func(t *testing.T) {
tasks := syncTasks{{liveObj: Annotate(NewPod(), common.AnnotationSyncWave, "-1"), phase: SyncPhaseSync}}
assert.Equal(t, SyncPhaseSync, tasks.phase())
assert.Equal(t, -1, tasks.wave())
assert.Equal(t, SyncPhaseSync, tasks.lastPhase())
assert.Equal(t, -1, tasks.lastWave())
assert.False(t, tasks.multiStep())
})
t.Run("Double", func(t *testing.T) {
tasks := syncTasks{
{liveObj: Annotate(NewPod(), common.AnnotationSyncWave, "-1"), phase: SyncPhasePreSync},
{liveObj: Annotate(NewPod(), common.AnnotationSyncWave, "1"), phase: SyncPhasePostSync},
}
assert.Equal(t, SyncPhasePreSync, tasks.phase())
assert.Equal(t, -1, tasks.wave())
assert.Equal(t, SyncPhasePostSync, tasks.lastPhase())
assert.Equal(t, 1, tasks.lastWave())
assert.True(t, tasks.multiStep())
})
}

View File

@@ -1,699 +0,0 @@
package controller
import (
"fmt"
"reflect"
"testing"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
fakedisco "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/rest"
testcore "k8s.io/client-go/testing"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/apiclient"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/kube/kubetest"
)
func newTestSyncCtx(resources ...*v1.APIResourceList) *syncContext {
fakeDisco := &fakedisco.FakeDiscovery{Fake: &testcore.Fake{}}
fakeDisco.Resources = append(resources,
&v1.APIResourceList{
GroupVersion: "v1",
APIResources: []v1.APIResource{
{Kind: "Pod", Group: "", Version: "v1", Namespaced: true},
{Kind: "Service", Group: "", Version: "v1", Namespaced: true},
},
},
&v1.APIResourceList{
GroupVersion: "apps/v1",
APIResources: []v1.APIResource{
{Kind: "Deployment", Group: "apps", Version: "v1", Namespaced: true},
},
})
sc := syncContext{
config: &rest.Config{},
namespace: test.FakeArgoCDNamespace,
server: test.FakeClusterURL,
syncRes: &v1alpha1.SyncOperationResult{
Revision: "FooBarBaz",
},
syncOp: &v1alpha1.SyncOperation{
Prune: true,
SyncStrategy: &v1alpha1.SyncStrategy{
Apply: &v1alpha1.SyncStrategyApply{},
},
},
proj: &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: v1alpha1.AppProjectSpec{
Destinations: []v1alpha1.ApplicationDestination{{
Server: test.FakeClusterURL,
Namespace: test.FakeArgoCDNamespace,
}},
ClusterResourceWhitelist: []v1.GroupKind{
{Group: "*", Kind: "*"},
},
},
},
opState: &v1alpha1.OperationState{},
disco: fakeDisco,
log: log.WithFields(log.Fields{"application": "fake-app"}),
}
sc.kubectl = &kubetest.MockKubectlCmd{}
return &sc
}
func newManagedResource(live *unstructured.Unstructured) managedResource {
return managedResource{
Live: live,
Group: live.GroupVersionKind().Group,
Version: live.GroupVersionKind().Version,
Kind: live.GroupVersionKind().Kind,
Namespace: live.GetNamespace(),
Name: live.GetName(),
}
}
func TestSyncNotPermittedNamespace(t *testing.T) {
syncCtx := newTestSyncCtx()
targetPod := test.NewPod()
targetPod.SetNamespace("kube-system")
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: targetPod,
}, {
Live: nil,
Target: test.NewService(),
}},
}
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationFailed, syncCtx.opState.Phase)
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
}
func TestSyncCreateInSortedOrder(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: test.NewPod(),
}, {
Live: nil,
Target: test.NewService(),
}},
}
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
assert.Len(t, syncCtx.syncRes.Resources, 2)
for i := range syncCtx.syncRes.Resources {
result := syncCtx.syncRes.Resources[i]
if result.Kind == "Pod" {
assert.Equal(t, v1alpha1.ResultCodeSynced, result.Status)
assert.Equal(t, "", result.Message)
} else if result.Kind == "Service" {
assert.Equal(t, "", result.Message)
} else {
t.Error("Resource isn't a pod or a service")
}
}
}
func TestSyncCreateNotWhitelistedClusterResources(t *testing.T) {
syncCtx := newTestSyncCtx(&v1.APIResourceList{
GroupVersion: v1alpha1.SchemeGroupVersion.String(),
APIResources: []v1.APIResource{
{Name: "workflows", Namespaced: false, Kind: "Workflow", Group: "argoproj.io"},
{Name: "application", Namespaced: false, Kind: "Application", Group: "argoproj.io"},
},
}, &v1.APIResourceList{
GroupVersion: "rbac.authorization.k8s.io/v1",
APIResources: []v1.APIResource{
{Name: "clusterroles", Namespaced: false, Kind: "ClusterRole", Group: "rbac.authorization.k8s.io"},
},
})
syncCtx.proj.Spec.ClusterResourceWhitelist = []v1.GroupKind{
{Group: "argoproj.io", Kind: "*"},
}
syncCtx.kubectl = &kubetest.MockKubectlCmd{}
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: kube.MustToUnstructured(&rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{Name: "argo-ui-cluster-role"}}),
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
result := syncCtx.syncRes.Resources[0]
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
assert.Contains(t, result.Message, "not permitted in project")
}
func TestSyncBlacklistedNamespacedResources(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.proj.Spec.NamespaceResourceBlacklist = []v1.GroupKind{
{Group: "*", Kind: "Deployment"},
}
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: test.NewDeployment(),
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
result := syncCtx.syncRes.Resources[0]
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
assert.Contains(t, result.Message, "not permitted in project")
}
func TestSyncSuccessfully(t *testing.T) {
syncCtx := newTestSyncCtx()
pod := test.NewPod()
pod.SetNamespace(test.FakeArgoCDNamespace)
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: test.NewService(),
}, {
Live: pod,
Target: nil,
}},
}
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
assert.Len(t, syncCtx.syncRes.Resources, 2)
for i := range syncCtx.syncRes.Resources {
result := syncCtx.syncRes.Resources[i]
if result.Kind == "Pod" {
assert.Equal(t, v1alpha1.ResultCodePruned, result.Status)
assert.Equal(t, "pruned", result.Message)
} else if result.Kind == "Service" {
assert.Equal(t, v1alpha1.ResultCodeSynced, result.Status)
assert.Equal(t, "", result.Message)
} else {
t.Error("Resource isn't a pod or a service")
}
}
}
func TestSyncDeleteSuccessfully(t *testing.T) {
syncCtx := newTestSyncCtx()
svc := test.NewService()
svc.SetNamespace(test.FakeArgoCDNamespace)
pod := test.NewPod()
pod.SetNamespace(test.FakeArgoCDNamespace)
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: svc,
Target: nil,
}, {
Live: pod,
Target: nil,
}},
}
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
for i := range syncCtx.syncRes.Resources {
result := syncCtx.syncRes.Resources[i]
if result.Kind == "Pod" {
assert.Equal(t, v1alpha1.ResultCodePruned, result.Status)
assert.Equal(t, "pruned", result.Message)
} else if result.Kind == "Service" {
assert.Equal(t, v1alpha1.ResultCodePruned, result.Status)
assert.Equal(t, "pruned", result.Message)
} else {
t.Error("Resource isn't a pod or a service")
}
}
}
func TestSyncCreateFailure(t *testing.T) {
syncCtx := newTestSyncCtx()
testSvc := test.NewService()
syncCtx.kubectl = &kubetest.MockKubectlCmd{
Commands: map[string]kubetest.KubectlOutput{
testSvc.GetName(): {
Output: "",
Err: fmt.Errorf("foo"),
},
},
}
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: testSvc,
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
result := syncCtx.syncRes.Resources[0]
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
assert.Equal(t, "foo", result.Message)
}
func TestSyncPruneFailure(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = &kubetest.MockKubectlCmd{
Commands: map[string]kubetest.KubectlOutput{
"test-service": {
Output: "",
Err: fmt.Errorf("foo"),
},
},
}
testSvc := test.NewService()
testSvc.SetName("test-service")
testSvc.SetNamespace(test.FakeArgoCDNamespace)
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: testSvc,
Target: nil,
}},
}
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationFailed, syncCtx.opState.Phase)
assert.Len(t, syncCtx.syncRes.Resources, 1)
result := syncCtx.syncRes.Resources[0]
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
assert.Equal(t, "foo", result.Message)
}
func TestDontSyncOrPruneHooks(t *testing.T) {
syncCtx := newTestSyncCtx()
targetPod := test.NewPod()
targetPod.SetName("dont-create-me")
targetPod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
liveSvc := test.NewService()
liveSvc.SetName("dont-prune-me")
liveSvc.SetNamespace(test.FakeArgoCDNamespace)
liveSvc.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
syncCtx.compareResult = &comparisonResult{
hooks: []*unstructured.Unstructured{targetPod, liveSvc},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 0)
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
}
// make sure that we do not prune resources with Prune=false
func TestDontPrunePruneFalse(t *testing.T) {
syncCtx := newTestSyncCtx()
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationSyncOptions: "Prune=false"})
pod.SetNamespace(test.FakeArgoCDNamespace)
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Live: pod}}}
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResultCodePruneSkipped, syncCtx.syncRes.Resources[0].Status)
assert.Equal(t, "ignored (no prune)", syncCtx.syncRes.Resources[0].Message)
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
}
// make sure Validate=false means we don't validate
func TestSyncOptionValidate(t *testing.T) {
tests := []struct {
name string
annotationVal string
want bool
}{
{"Empty", "", true},
{"True", "Validate=true", true},
{"False", "Validate=false", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
syncCtx := newTestSyncCtx()
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationSyncOptions: tt.annotationVal})
pod.SetNamespace(test.FakeArgoCDNamespace)
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Target: pod, Live: pod}}}
syncCtx.sync()
kubectl, _ := syncCtx.kubectl.(*kubetest.MockKubectlCmd)
assert.Equal(t, tt.want, kubectl.LastValidate)
})
}
}
func TestSelectiveSyncOnly(t *testing.T) {
syncCtx := newTestSyncCtx()
pod1 := test.NewPod()
pod1.SetName("pod-1")
pod2 := test.NewPod()
pod2.SetName("pod-2")
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{Target: pod1}},
}
syncCtx.syncResources = []v1alpha1.SyncOperationResource{{Kind: "Pod", Name: "pod-1"}}
tasks, successful := syncCtx.getSyncTasks()
assert.True(t, successful)
assert.Len(t, tasks, 1)
assert.Equal(t, "pod-1", tasks[0].name())
}
func TestUnnamedHooksGetUniqueNames(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
pod := test.NewPod()
pod.SetName("")
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
tasks, successful := syncCtx.getSyncTasks()
assert.True(t, successful)
assert.Len(t, tasks, 2)
assert.Contains(t, tasks[0].name(), "foobarb-presync-")
assert.Contains(t, tasks[1].name(), "foobarb-postsync-")
assert.Equal(t, "", pod.GetName())
}
func TestManagedResourceAreNotNamed(t *testing.T) {
syncCtx := newTestSyncCtx()
pod := test.NewPod()
pod.SetName("")
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Target: pod}}}
tasks, successful := syncCtx.getSyncTasks()
assert.True(t, successful)
assert.Len(t, tasks, 1)
assert.Equal(t, "", tasks[0].name())
assert.Equal(t, "", pod.GetName())
}
func TestDeDupingTasks(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "Sync"})
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{Target: pod}},
hooks: []*unstructured.Unstructured{pod},
}
tasks, successful := syncCtx.getSyncTasks()
assert.True(t, successful)
assert.Len(t, tasks, 1)
}
func TestObjectsGetANamespace(t *testing.T) {
syncCtx := newTestSyncCtx()
pod := test.NewPod()
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Target: pod}}}
tasks, successful := syncCtx.getSyncTasks()
assert.True(t, successful)
assert.Len(t, tasks, 1)
assert.Equal(t, test.FakeArgoCDNamespace, tasks[0].namespace())
assert.Equal(t, "", pod.GetNamespace())
}
func TestPersistRevisionHistory(t *testing.T) {
app := newFakeApp()
app.Status.OperationState = nil
app.Status.History = nil
defaultProject := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
Name: "default",
},
}
data := fakeData{
apps: []runtime.Object{app, defaultProject},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
}}
ctrl.appStateManager.SyncAppState(app, opState)
// Ensure we record spec.source into sync result
assert.Equal(t, app.Spec.Source, opState.SyncResult.Source)
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, v1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, 1, len(updatedApp.Status.History))
assert.Equal(t, app.Spec.Source, updatedApp.Status.History[0].Source)
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
}
func TestPersistRevisionHistoryRollback(t *testing.T) {
app := newFakeApp()
app.Status.OperationState = nil
app.Status.History = nil
defaultProject := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
Name: "default",
},
}
data := fakeData{
apps: []runtime.Object{app, defaultProject},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
// Sync with source specified
source := v1alpha1.ApplicationSource{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "test",
Value: "123",
},
},
},
}
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &source,
},
}}
ctrl.appStateManager.SyncAppState(app, opState)
// Ensure we record opState's source into sync result
assert.Equal(t, source, opState.SyncResult.Source)
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, v1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, 1, len(updatedApp.Status.History))
assert.Equal(t, source, updatedApp.Status.History[0].Source)
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
}
func TestSyncFailureHookWithSuccessfulSync(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{Target: test.NewPod()}},
hooks: []*unstructured.Unstructured{test.NewHook(HookTypeSyncFail)},
}
syncCtx.sync()
assert.Equal(t, OperationSucceeded, syncCtx.opState.Phase)
// only one result, we did not run the failure failureHook
assert.Len(t, syncCtx.syncRes.Resources, 1)
}
func TestSyncFailureHookWithFailedSync(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
pod := test.NewPod()
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{Target: pod}},
hooks: []*unstructured.Unstructured{test.NewHook(HookTypeSyncFail)},
}
syncCtx.kubectl = &kubetest.MockKubectlCmd{
Commands: map[string]kubetest.KubectlOutput{pod.GetName(): {Err: fmt.Errorf("")}},
}
syncCtx.sync()
syncCtx.sync()
assert.Equal(t, OperationFailed, syncCtx.opState.Phase)
assert.Len(t, syncCtx.syncRes.Resources, 2)
}
func TestBeforeHookCreation(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
hook := test.Annotate(test.Annotate(test.NewPod(), common.AnnotationKeyHook, "Sync"), common.AnnotationKeyHookDeletePolicy, "BeforeHookCreation")
hook.SetNamespace(test.FakeArgoCDNamespace)
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{newManagedResource(hook)},
hooks: []*unstructured.Unstructured{hook},
}
syncCtx.dynamicIf = fake.NewSimpleDynamicClient(runtime.NewScheme())
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Empty(t, syncCtx.syncRes.Resources[0].Message)
}
func TestRunSyncFailHooksFailed(t *testing.T) {
// Tests that other SyncFail Hooks run even if one of them fail.
syncCtx := newTestSyncCtx()
syncCtx.syncOp.SyncStrategy.Apply = nil
pod := test.NewPod()
successfulSyncFailHook := test.NewHook(HookTypeSyncFail)
successfulSyncFailHook.SetName("successful-sync-fail-hook")
failedSyncFailHook := test.NewHook(HookTypeSyncFail)
failedSyncFailHook.SetName("failed-sync-fail-hook")
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{Target: pod}},
hooks: []*unstructured.Unstructured{successfulSyncFailHook, failedSyncFailHook},
}
syncCtx.kubectl = &kubetest.MockKubectlCmd{
Commands: map[string]kubetest.KubectlOutput{
// Fail operation
pod.GetName(): {Err: fmt.Errorf("")},
// Fail a single SyncFail hook
failedSyncFailHook.GetName(): {Err: fmt.Errorf("")}},
}
syncCtx.sync()
syncCtx.sync()
fmt.Println(syncCtx.syncRes.Resources)
fmt.Println(syncCtx.opState.Phase)
// Operation as a whole should fail
assert.Equal(t, OperationFailed, syncCtx.opState.Phase)
// failedSyncFailHook should fail
assert.Equal(t, OperationFailed, syncCtx.syncRes.Resources[1].HookPhase)
assert.Equal(t, ResultCodeSyncFailed, syncCtx.syncRes.Resources[1].Status)
// successfulSyncFailHook should be synced running (it is an nginx pod)
assert.Equal(t, OperationRunning, syncCtx.syncRes.Resources[2].HookPhase)
assert.Equal(t, ResultCodeSynced, syncCtx.syncRes.Resources[2].Status)
}
func Test_syncContext_isSelectiveSync(t *testing.T) {
type fields struct {
compareResult *comparisonResult
syncResources []SyncOperationResource
}
oneSyncResource := []SyncOperationResource{{}}
oneResource := func(group, kind, name string, hook bool) *comparisonResult {
return &comparisonResult{resources: []v1alpha1.ResourceStatus{{Group: group, Kind: kind, Name: name, Hook: hook}}}
}
tests := []struct {
name string
fields fields
want bool
}{
{"Empty", fields{}, false},
{"OneCompareResult", fields{oneResource("", "", "", false), []SyncOperationResource{}}, true},
{"OneSyncResource", fields{&comparisonResult{}, oneSyncResource}, true},
{"Equal", fields{oneResource("", "", "", false), oneSyncResource}, false},
{"EqualOutOfOrder", fields{&comparisonResult{resources: []v1alpha1.ResourceStatus{{Group: "a"}, {Group: "b"}}}, []SyncOperationResource{{Group: "b"}, {Group: "a"}}}, false},
{"KindDifferent", fields{oneResource("foo", "", "", false), oneSyncResource}, true},
{"GroupDifferent", fields{oneResource("", "foo", "", false), oneSyncResource}, true},
{"NameDifferent", fields{oneResource("", "", "foo", false), oneSyncResource}, true},
{"HookIgnored", fields{oneResource("", "", "", true), []SyncOperationResource{}}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sc := &syncContext{
compareResult: tt.fields.compareResult,
syncResources: tt.fields.syncResources,
}
if got := sc.isSelectiveSync(); got != tt.want {
t.Errorf("syncContext.isSelectiveSync() = %v, want %v", got, tt.want)
}
})
}
}
func Test_syncContext_liveObj(t *testing.T) {
type fields struct {
compareResult *comparisonResult
}
type args struct {
obj *unstructured.Unstructured
}
obj := test.NewPod()
obj.SetNamespace("my-ns")
found := test.NewPod()
tests := []struct {
name string
fields fields
args args
want *unstructured.Unstructured
}{
{"None", fields{compareResult: &comparisonResult{managedResources: []managedResource{}}}, args{obj: &unstructured.Unstructured{}}, nil},
{"Found", fields{compareResult: &comparisonResult{managedResources: []managedResource{{Group: obj.GroupVersionKind().Group, Kind: obj.GetKind(), Namespace: obj.GetNamespace(), Name: obj.GetName(), Live: found}}}}, args{obj: obj}, found},
{"EmptyNamespace", fields{compareResult: &comparisonResult{managedResources: []managedResource{{Group: obj.GroupVersionKind().Group, Kind: obj.GetKind(), Name: obj.GetName(), Live: found}}}}, args{obj: obj}, found},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sc := &syncContext{
compareResult: tt.fields.compareResult,
}
if got := sc.liveObj(tt.args.obj); !reflect.DeepEqual(got, tt.want) {
t.Errorf("syncContext.liveObj() = %v, want %v", got, tt.want)
}
})
}
}
func Test_syncContext_hasCRDOfGroupKind(t *testing.T) {
// target
assert.False(t, (&syncContext{compareResult: &comparisonResult{managedResources: []managedResource{{Target: test.NewCRD()}}}}).hasCRDOfGroupKind("", ""))
assert.True(t, (&syncContext{compareResult: &comparisonResult{managedResources: []managedResource{{Target: test.NewCRD()}}}}).hasCRDOfGroupKind("argoproj.io", "TestCrd"))
// hook
assert.False(t, (&syncContext{compareResult: &comparisonResult{hooks: []*unstructured.Unstructured{test.NewCRD()}}}).hasCRDOfGroupKind("", ""))
assert.True(t, (&syncContext{compareResult: &comparisonResult{hooks: []*unstructured.Unstructured{test.NewCRD()}}}).hasCRDOfGroupKind("argoproj.io", "TestCrd"))
}

View File

@@ -1,166 +0,0 @@
# Contributing
## Before You Start
You must install and run the ArgoCD using a local Kubernetes (e.g. Docker for Desktop or Minikube) first. This will help you understand the application, but also get your local environment set-up.
Then, to get a good grounding in Go, try out [the tutorial](https://tour.golang.org/).
## Pre-requisites
Install:
* [docker](https://docs.docker.com/install/#supported-platforms)
* [git](https://git-scm.com/) and [git-lfs](https://git-lfs.github.com/)
* [golang](https://golang.org/)
* [dep](https://github.com/golang/dep)
* [ksonnet](https://github.com/ksonnet/ksonnet#install)
* [helm](https://github.com/helm/helm/releases)
* [kustomize](https://github.com/kubernetes-sigs/kustomize/releases)
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
* [kubectx](https://kubectx.dev)
* [minikube](https://kubernetes.io/docs/setup/minikube/) or Docker for Desktop
Brew users can quickly install the lot:
```bash
brew install go git-lfs kubectl kubectx dep ksonnet/tap/ks kubernetes-helm kustomize kustomize
```
Check the versions:
```
go version ;# must be v1.12.x
helm version ;# must be v2.13.x
kustomize version ;# must be v3.10.x
```
Set up environment variables (e.g. is `~/.bashrc`):
```bash
export GOPATH=~/go
export PATH=$PATH:$GOPATH/bin
```
Checkout the code:
```bash
go get -u github.com/argoproj/argo-cd
cd ~/go/src/github.com/argoproj/argo-cd
```
## Building
Ensure dependencies are up to date first:
```shell
dep ensure
make dev-builder-image
make install-lint-tools
go get github.com/mattn/goreman
go get github.com/jstemmer/go-junit-report
```
Common make targets:
* `make codegen` - Run code generation
* `make lint` - Lint code
* `make test` - Run unit tests
* `make cli` - Make the `argocd` CLI tool
Check out the following [documentation](https://github.com/argoproj/argo-cd/blob/master/docs/developer-guide/test-e2e.md) for instructions on running the e2e tests.
## Running Locally
It is much easier to run and debug if you run ArgoCD on your local machine than in the Kubernetes cluster.
You should scale the deployments to zero:
```bash
kubectl -n argocd scale deployment/argocd-application-controller --replicas 0
kubectl -n argocd scale deployment/argocd-dex-server --replicas 0
kubectl -n argocd scale deployment/argocd-repo-server --replicas 0
kubectl -n argocd scale deployment/argocd-server --replicas 0
kubectl -n argocd scale deployment/argocd-redis --replicas 0
```
Download Yarn dependencies and Compile:
```bash
~/go/src/github.com/argoproj/argo-cd/ui
yarn install
yarn build
```
Then start the services:
```bash
cd ~/go/src/github.com/argoproj/argo-cd
make start
```
You can now execute `argocd` command against your locally running ArgoCD by appending `--server localhost:8080 --plaintext --insecure`, e.g.:
```bash
argocd app create guestbook --path guestbook --repo https://github.com/argoproj/argocd-example-apps.git --dest-server https://kubernetes.default.svc --dest-namespace default --server localhost:8080 --plaintext --insecure
```
You can open the UI: http://localhost:4000
As an alternative to using the above command line parameters each time you call `argocd` CLI, you can set the following environment variables:
```bash
export ARGOCD_SERVER=127.0.0.1:8080
export ARGOCD_OPTS="--plaintext --insecure"
```
## Running Local Containers
You may need to run containers locally, so here's how:
Create login to Docker Hub, then login.
```bash
docker login
```
Add your username as the environment variable, e.g. to your `~/.bash_profile`:
```bash
export IMAGE_NAMESPACE=alexcollinsintuit
```
If you don't want to use `latest` as the image's tag (the default), you can set it from the environment too:
```bash
export IMAGE_TAG=yourtag
```
Build the image:
```bash
DOCKER_PUSH=true make image
```
Update the manifests (be sure to do that from a shell that has above environment variables set)
```bash
make manifests
```
Install the manifests:
```bash
kubectl -n argocd apply --force -f manifests/install.yaml
```
Scale your deployments up:
```bash
kubectl -n argocd scale deployment/argocd-application-controller --replicas 1
kubectl -n argocd scale deployment/argocd-dex-server --replicas 1
kubectl -n argocd scale deployment/argocd-repo-server --replicas 1
kubectl -n argocd scale deployment/argocd-server --replicas 1
kubectl -n argocd scale deployment/argocd-redis --replicas 1
```
Now you can set-up the port-forwarding and open the UI or CLI.

View File

@@ -1,6 +0,0 @@
# Support
1. Make sure you've read [understanding the basics](understand_the_basics.md) the [getting started guide](getting_started.md).
2. Looked for an answer [the frequently asked questions](faq.md).
3. Ask a question in [the Argo CD Slack channel ⧉](https://argoproj.github.io/community/join-slack).
4. [Read issues, report a bug, or request a feature ⧉](https://github.com/argoproj/argo-cd/issues)

View File

@@ -1,34 +1,36 @@
# Architectural Overview
# Argo CD - Architectural Overview
![Argo CD Architecture](../assets/argocd_architecture.png)
![Argo CD Architecture](argocd_architecture.png)
## Components
### API Server
The API server is a gRPC/REST server which exposes the API consumed by the Web UI, CLI, and CI/CD
systems. It has the following responsibilities:
* application management and status reporting
* invoking of application operations (e.g. sync, rollback, user-defined actions)
* repository and cluster credential management (stored as K8s secrets)
* authentication and auth delegation to external identity providers
* RBAC enforcement
* listener/forwarder for Git webhook events
* listener/forwarder for git webhook events
### Repository Server
The repository server is an internal service which maintains a local cache of the Git repository
The repository server is an internal service which maintains a local cache of the git repository
holding the application manifests. It is responsible for generating and returning the Kubernetes
manifests when provided the following inputs:
* repository URL
* revision (commit, tag, branch)
* git revision (commit, tag, branch)
* application path
* template specific settings: parameters, ksonnet environments, helm values.yaml
* application environment
### Application Controller
The application controller is a Kubernetes controller which continuously monitors running
applications and compares the current, live state against the desired target state (as specified in
the repo). It detects `OutOfSync` application state and optionally takes corrective action. It
is responsible for invoking any user-defined hooks for lifecycle events (PreSync, Sync, PostSync)
the git repo). It detects out-of-sync application state and optionally takes corrective action. It
is responsible for invoking any user-defined handlers (argo workflows) for Sync, OutOfSync events
### Application CRD (Custom Resource Definition)
The Application CRD is the Kubernetes resource object representing a deployed application instance
in an environment. It holds a reference to the desired target state (repo, revision, app, environment)
of which the application controller will enforce state against.

BIN
docs/argocd-ui.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

View File

Before

Width:  |  Height:  |  Size: 119 KiB

After

Width:  |  Height:  |  Size: 119 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.5 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 187 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 280 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 86 KiB

Some files were not shown because too many files have changed in this diff Show More