Compare commits

..

2 Commits

462 changed files with 13398 additions and 55283 deletions

View File

@@ -10,94 +10,32 @@ spec:
value: master
- name: repo
value: https://github.com/argoproj/argo-cd.git
volumes:
- name: k3setc
emptyDir: {}
- name: k3svar
emptyDir: {}
- name: tmp
emptyDir: {}
templates:
- name: argo-cd-ci
steps:
- - name: build-e2e
template: build-e2e
- - name: build
template: ci-dind
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- make controller-image server-image repo-server-image
- name: test
template: ci-builder
arguments:
parameters:
- name: cmd
value: "dep ensure && make lint test && bash <(curl -s https://codecov.io/bash) -f coverage.out"
# The step builds argo cd image, deploy argo cd components into throw-away kubernetes cluster provisioned using k3s and run e2e tests against it.
- name: build-e2e
inputs:
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:v0.13.1
imagePullPolicy: Always
command: [sh, -c]
# Main contains build argocd image. The image is saved it into k3s agent images directory so it could be preloaded by the k3s cluster.
args: ["
dep ensure && until docker ps; do sleep 3; done && \
make image DEV_IMAGE=true && mkdir -p /var/lib/rancher/k3s/agent/images && \
docker save argocd:latest > /var/lib/rancher/k3s/agent/images/argocd.tar && \
touch /var/lib/rancher/k3s/ready && until ls /etc/rancher/k3s/k3s.yaml; do sleep 3; done && \
kubectl create ns argocd-e2e && kustomize build ./test/manifests/ci | kubectl apply -n argocd-e2e -f - && \
kubectl rollout status deployment -n argocd-e2e argocd-application-controller && kubectl rollout status deployment -n argocd-e2e argocd-server && \
git config --global user.email \"test@example.com\" && \
export ARGOCD_SERVER=$(kubectl get service argocd-server -o=jsonpath={.spec.clusterIP} -n argocd-e2e):443 && make test-e2e"
]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: USER
value: argocd
- name: DOCKER_HOST
value: 127.0.0.1
- name: DOCKER_BUILDKIT
value: "1"
- name: KUBECONFIG
value: /etc/rancher/k3s/k3s.yaml
volumeMounts:
- name: tmp
mountPath: /tmp
- name: k3setc
mountPath: /etc/rancher/k3s
- name: k3svar
mountPath: /var/lib/rancher/k3s
sidecars:
- name: dind
image: docker:18.09-dind
securityContext:
privileged: true
resources:
requests:
memory: 2048Mi
cpu: 500m
mirrorVolumeMounts: true
# Steps waits for file /var/lib/rancher/k3s/ready which indicates that all required images are ready, then starts the cluster.
- name: k3s
image: rancher/k3s:v0.3.0-rc1
imagePullPolicy: Always
command: [sh, -c]
args: ["until ls /var/lib/rancher/k3s/ready; do sleep 3; done && k3s server || true"]
securityContext:
privileged: true
volumeMounts:
- name: tmp
mountPath: /tmp
- name: k3setc
mountPath: /etc/rancher/k3s
- name: k3svar
mountPath: /var/lib/rancher/k3s
value: "{{item}}"
withItems:
- dep ensure && make cli lint test
- name: test-e2e
template: ci-builder
arguments:
parameters:
- name: cmd
value: "dep ensure && make test-e2e"
- name: ci-builder
inputs:
@@ -110,23 +48,14 @@ spec:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:v0.13.1
imagePullPolicy: Always
command: [bash, -c]
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["{{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: CODECOV_TOKEN
valueFrom:
secretKeyRef:
name: codecov-token
key: codecov-token
resources:
requests:
memory: 1024Mi
cpu: 200m
archiveLocation:
archiveLogs: true
- name: ci-dind
inputs:
@@ -139,25 +68,21 @@ spec:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:v0.13.1
imagePullPolicy: Always
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["until docker ps; do sleep 3; done && {{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: DOCKER_HOST
value: 127.0.0.1
- name: DOCKER_BUILDKIT
value: "1"
resources:
requests:
memory: 1024Mi
cpu: 200m
sidecars:
- name: dind
image: docker:18.09-dind
image: docker:17.10-dind
securityContext:
privileged: true
mirrorVolumeMounts: true
archiveLocation:
archiveLogs: true

View File

@@ -1,6 +0,0 @@
ignore:
- "**/*.pb.go"
- "**/*_test.go"
- "pkg/apis/.*"
- "pkg/client/.*"
- "test/.*"

View File

@@ -1,12 +1,4 @@
# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from
# a user's workspace, and are only building off of what is locked by dep.
.vscode/
.idea/
.DS_Store
vendor/
dist/
*.iml
# delve debug binaries
cmd/**/debug
debug.test
coverage.out
vendor
dist

1
.gitignore vendored
View File

@@ -7,4 +7,3 @@ dist/
# delve debug binaries
cmd/**/debug
debug.test
coverage.out

View File

@@ -1,22 +0,0 @@
run:
deadline: 8m
skip-files:
- ".*\\.pb\\.go"
skip-dirs:
- pkg/client
- vendor
linter-settings:
goimports:
local-prefixes: github.com/argoproj/argo-cd
linters:
enable:
- vet
- gofmt
- goimports
- deadcode
- varcheck
- structcheck
- ineffassign
- unconvert
- misspell
disable-all: true

View File

@@ -1,331 +1,5 @@
# Changelog
## v0.11.2 (2019-02-19)
+ Adds client retry. Fixes #959 (#1119)
- Prevent deletion hotloop (#1115)
- Fix EncodeX509KeyPair function so it takes in account chained certificates (#1137) (@amarruedo)
- Exclude metrics.k8s.io from watch (#1128)
- Fix issue where dex restart could cause login failures (#1114)
- Relax ingress/service health check to accept non-empty ingress list (#1053)
- [UI] Correctly handle empty response from repository/<repo>/apps API
## v0.11.1 (2019-01-18)
+ Allow using redis as a cache in repo-server (#1020)
- Fix controller deadlock when checking for stale cache (#1044)
- Namespaces are not being sorted during apply (#1038)
- Controller cache was susceptible to clock skew in managed cluster
- Fix ability to unset ApplicationSource specific parameters
- Fix force resource delete API (#1033)
- Incorrect PermissionDenied error during app creation when using project roles + user-defined RBAC (#1019)
- Fix `kubctl convert` issue preventing deployment of extensions/NetworkPolicy (#1012)
- Do not allow metadata.creationTimestamp to affect sync status (#1021)
- Graceful handling of clusters where API resource discovery is partially successful (#1018)
- Handle k8s resources circular dependency (#1016)
- Fix `app diff --local` command (#1008)
## v0.11.0 (2019-01-10)
This is Argo CD's biggest release ever and introduces a completely redesigned controller architecture.
### New Features
#### Performance & Scalability
The application controller has a completely redesigned architecture which improved performance and
scalability during application reconciliation.
This was achieved by introducing an in-memory, live state cache of lightweight Kubernetes object
metadata. During reconciliation, the controller no longer performs expensive, in-line queries of app
related resources in K8s API server, instead relying on the metadata available in the live state
cache. This dramatically improves performance and responsiveness, and is less burdensome to the K8s
API server.
#### Object relationship visualization for CRDs
With the new controller design, Argo CD is now able to understand ownership relationship between
*all* Kubernetes objects, not just the built-in types. This enables Argo CD to visualize
parent/child relationships between all kubernetes objects, including CRDs.
#### Multi-namespaced applications
During sync, Argo CD will now honor any explicitly set namespace in a manifest. Manifests without a
namespace will continue deploy to the "preferred" namespace, as specified in app's
`spec.destination.namespace`. This enables support for a class of applications which install to
multiple namespaces. For example, Argo CD can now install the
[prometheus-operator](https://github.com/helm/charts/tree/master/stable/prometheus-operator)
helm chart, which deploys some resources into `kube-system`, and others into the
`prometheus-operator` namespace.
#### Large application support
Full resource objects are no longer stored in the Application CRD object status. Instead, only
lightweight metadata is stored in the status, such as a resource's sync and health status.
This change enabled Argo CD to support applications with a very large number of resources
(e.g. istio), and reduces the bandwidth requirements when listing applications in the UI.
#### Resource lifecycle hook improvements
Resource lifecycle hooks (e.g. PreSync, PostSync) are now visible/manageable from the UI.
Additionally, bare Pods with a restart policy of Never can now be used as a resource hook, as an
alternative to Jobs, Workflows.
#### K8s recommended application labels
The tracking label for resources has been changed to use `app.kubernetes.io/instance`, as
recommended in [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/),
(changed from `applications.argoproj.io/app-name`). This will enable applications managed by Argo CD
to interoperate with other tooling which are also converging on this labeling, such as the
Kubernetes dashboard. Additionally, Argo CD no longer injects any tracking labels at the
`spec.template.metadata` level.
#### External OIDC provider support
Argo CD now supports auth delegation to an existing, external OIDC providers without the need for
running Dex (e.g. Okta, OneLogin, Auth0, Microsoft, etc...)
The optional, [Dex IDP OIDC provider](https://github.com/dexidp/dex) is still bundled as part of the
default installation, in order to provide a seamless out-of-box experience, enabling Argo CD to
integrate with non-OIDC providers, and to benefit from Dex's full range of
[connectors](https://github.com/dexidp/dex/tree/master/Documentation/connectors).
#### OIDC group bindings to Project Roles
OIDC group claims from an OAuth2 provider can now be bound to a Argo CD project roles. Previously,
group claims could only be managed in the centralized ConfigMap, `argocd-rbac-cm`. They can now be
managed at a project level. This enables project admins to self service access to applications
within a project.
#### Declarative Argo CD configuration
Argo CD settings can be now be configured either declaratively, or imperatively. The `argocd-cm`
ConfigMap now has a `repositories` field, which can reference credentials in a normal Kubernetes
secret which you can create declaratively, outside of Argo CD.
#### Helm repository support
Helm repositories can be configured at the system level, enabling the deployment of helm charts
which have a dependency to external helm repositories.
### Breaking changes:
* Argo CD's resource names were renamed for consistency. For example, the application-controller
deployment was renamed to argocd-application-controller. When upgrading from v0.10 to v0.11,
the older resources should be pruned to avoid inconsistent state and controller in-fighting.
* As a consequence to moving to recommended kubernetes labels, when upgrading from v0.10 to v0.11,
all applications will immediately be OutOfSync due to the change in tracking labels. This will
correct itself with another sync of the application. However, since Pods will be recreated, please
take this into consideration, especially if your applications are configured with auto-sync.
* There was significant reworking of the `app.status` fields to reduce the payload size, simplify
the datastructure and remove fields which were no longer used by the controller. No breaking
changes were made in `app.spec`.
* An older Argo CD CLI (v0.10 and below) will not be compatible with Argo CD v0.11. To keep
CI pipelines in sync with the API server, it is recommended to have pipelines download the CLI
directly from the API server https://${ARGOCD_SERVER}/download/argocd-linux-amd64 during the CI
pipeline.
### Changes since v0.10:
* Improve Application state reconciliation performance (#806)
* Refactor, consolidate and rename resource type data structures
+ Declarative setup and configuration of ArgoCD (#536)
+ Declaratively add helm repositories (#747)
+ Switch to k8s recommended app.kubernetes.io/instance label (#857)
+ Ability for a single application to deploy into multiple namespaces (#696)
+ Self service group access to project applications (#742)
+ Support for Pods as a sync hook (#801)
+ Support 'crd-install' helm hook (#355)
+ Use external 'diff' utility to render actual vs target state difference
+ Show sync policy in app list view
* Remove resources state from application CRD (#758)
* API server & UI should serve argocd binaries instead of linking to GitHub (#716)
* Update versions for kubectl (v1.13.1), helm (v2.12.1), ksonnet (v0.13.1)
* Update version of aws-iam-authenticator (0.4.0-alpha.1)
* Ability to force refresh of application manifests from git
* Improve diff assessment for Secrets, ClusterRoles, Roles
- Failed to deploy helm chart with local dependencies and no internet access (#786)
- Out of sync reported if Secrets with stringData are used (#763)
- Unable to delete application in K8s v1.12 (#718)
## v0.10.6 (2018-11-14)
- Fix issue preventing in-cluster app sync due to go-client changes (issue #774)
## v0.10.5 (2018-11-13)
+ Increase concurrency of application controller
* Update dependencies to k8s v1.12 and client-go v9.0 (#729)
- add argo cluster permission to view logs (#766) (@conorfennell)
- Fix issue where applications could not be deleted on k8s v1.12
- Allow 'syncApplication' action to reference target revision rather then hard-coding to 'HEAD' (#69) (@chrisgarland)
- Issue #768 - Fix application wizard crash
## v0.10.4 (2018-11-07)
* Upgrade to Helm v0.11.0 (@amarrella)
- Health check is not discerning apiVersion when assessing CRDs (issue #753)
- Fix nil pointer dereference in util/health (@mduarte)
## v0.10.3 (2018-10-28)
* Fix applying TLS version settings
* Update to kustomize 1.0.10 (@twz123)
## v0.10.2 (2018-10-25)
* Update to kustomize 1.0.9 (@twz123)
- Fix app refresh err when k8s patch is too slow
## v0.10.1 (2018-10-24)
- Handle case where OIDC settings become invalid after dex server restart (issue #710)
- git clean also needs to clean files under gitignore (issue #711)
## v0.10.0 (2018-10-19)
### Changes since v0.9:
+ Allow more fine-grained sync (issue #508)
+ Display init container logs (issue #681)
+ Redirect to /auth/login instead of /login when SSO token is used for authenticaion (issue #348)
+ Support ability to use a helm values files from a URL (issue #624)
+ Support public not-connected repo in app creation UI (issue #426)
+ Use ksonnet CLI instead of ksonnet libs (issue #626)
+ We should be able to select the order of the `yaml` files while creating a Helm App (#664)
* Remove default params from app history (issue #556)
* Update to ksonnet v0.13.0
* Update to kustomize 1.0.8
- API Server fails to return apps due to grpc max message size limit (issue #690)
- App Creation UI for Helm Apps shows only files prefixed with `values-` (issue #663)
- App creation UI should allow specifying values files outside of helm app directory bug (issue #658)
- argocd-server logs credentials in plain text when adding git repositories (issue #653)
- Azure Repos do not work as a repository (issue #643)
- Better update conflict error handing during app editing (issue #685)
- Cluster watch needs to be restarted when CRDs get created (issue #627)
- Credentials not being accepted for Google Source Repositories (issue #651)
- Default project is created without permission to deploy cluster level resources (issue #679)
- Generate role token click resets policy changes (issue #655)
- Input type text instead of password on Connect repo panel (issue #693)
- Metrics endpoint not reachable through the metrics kubernetes service (issue #672)
- Operation stuck in 'in progress' state if application has no resources (issue #682)
- Project should influence options for cluster and namespace during app creation (issue #592)
- Repo server unable to execute ls-remote for private repos (issue #639)
- Resource is always out of sync if it has only 'ksonnet.io/component' label (issue #686)
- Resource nodes are 'jumping' on app details page (issue #683)
- Sync always suggest using latest revision instead of target UI bug (issue #669)
- Temporary ignore service catalog resources (issue #650)
## v0.9.2 (2018-09-28)
* Update to kustomize 1.0.8
- Fix issue where argocd-server logged credentials in plain text during repo add (issue #653)
- Credentials not being accepted for Google Source Repositories (issue #651)
- Azure Repos do not work as a repository (issue #643)
- Temporary ignore service catalog resources (issue #650)
- Normalize policies by always adding space after comma
## v0.9.1 (2018-09-24)
- Repo server unable to execute ls-remote for private repos (issue #639)
## v0.9.0 (2018-09-24)
### Notes about upgrading from v0.8
* Cluster wide resources should be allowed in default project (due to issue #330):
```
argocd project allow-cluster-resource default '*' '*'
```
* Projects now provide the ability to allow or deny deployments of cluster-scoped resources
(e.g. Namespaces, ClusterRoles, CustomResourceDefinitions). When upgrading from v0.8 to v0.9, to
match the behavior of v0.8 (which did not have restrictions on deploying resources) and continue to
allow deployment of cluster-scoped resources, an additional command should be run:
```bash
argocd proj allow-cluster-resource default '*' '*'
```
The above command allows the `default` project to deploy any cluster-scoped resources which matches
the behavior of v0.8.
* The secret keys in the argocd-secret containing the TLS certificate and key, has been renamed from
`server.crt` and `server.key` to the standard `tls.crt` and `tls.key` keys. This enables Argo CD
to integrate better with Ingress and cert-manager. When upgrading to v0.9, the `server.crt` and
`server.key` keys in argocd-secret should be renamed to the new keys.
### Changes since v0.8:
+ Auto-sync option in application CRD instance (issue #79)
+ Support raw jsonnet as an application source (issue #540)
+ Reorder K8s resources to correct creation order (issue #102)
+ Redact K8s secrets from API server payloads (issue #470)
+ Support --in-cluster authentication without providing a kubeconfig (issue #527)
+ Special handling of CustomResourceDefinitions (issue #613)
+ Argo CD should download helm chart dependencies (issue #582)
+ Export Argo CD stats as prometheus style metrics (issue #513)
+ Support restricting TLS version (issue #609)
+ Use 'kubectl auth reconcile' before 'kubectl apply' (issue #523)
+ Projects need controls on cluster-scoped resources (issue #330)
+ Support IAM Authentication for managing external K8s clusters (issue #482)
+ Compatibility with cert manager (issue #617)
* Enable TLS for repo server (issue #553)
* Split out dex into it's own deployment (instead of sidecar) (issue #555)
+ [UI] Support selection of helm values files in App creation wizard (issue #499)
+ [UI] Support specifying source revision in App creation wizard allow (issue #503)
+ [UI] Improve resource diff rendering (issue #457)
+ [UI] Indicate number of ready containers in pod (issue #539)
+ [UI] Indicate when app is overriding parameters (issue #503)
+ [UI] Provide a YAML view of resources (issue #396)
+ [UI] Project Role/Token management from UI (issue #548)
+ [UI] App creation wizard should allow specifying source revision (issue #562)
+ [UI] Ability to modify application from UI (issue #615)
+ [UI] indicate when operation is in progress or has failed (issue #566)
- Fix issue where changes were not pulled when tracking a branch (issue #567)
- Lazy enforcement of unknown cluster/namespace restricted resources (issue #599)
- Fix controller hot loop when app source contains bad manifests (issue #568)
- Fix issue where Argo CD fails to deploy when resources are in a K8s list format (issue #584)
- Fix comparison failure when app contains unregistered custom resource (issue #583)
- Fix issue where helm hooks were being deployed as part of sync (issue #605)
- Fix race conditions in kube.GetResourcesWithLabel and DeleteResourceWithLabel (issue #587)
- [UI] Fix issue where projects filter does not work when application got changed
- [UI] Creating apps from directories is not obvious (issue #565)
- Helm hooks are being deployed as resources (issue #605)
- Disagreement in three way diff calculation (issue #597)
- SIGSEGV in kube.GetResourcesWithLabel (issue #587)
- Argo CD fails to deploy resources list (issue #584)
- Branch tracking not working properly (issue #567)
- Controller hot loop when application source has bad manifests (issue #568)
## v0.8.2 (2018-09-12)
- Downgrade ksonnet from v0.12.0 to v0.11.0 due to quote unescape regression
- Fix CLI panic when performing an initial `argocd sync/wait`
## v0.8.1 (2018-09-10)
+ [UI] Support selection of helm values files in App creation wizard (issue #499)
+ [UI] Support specifying source revision in App creation wizard allow (issue #503)
+ [UI] Improve resource diff rendering (issue #457)
+ [UI] Indicate number of ready containers in pod (issue #539)
+ [UI] Indicate when app is overriding parameters (issue #503)
+ [UI] Provide a YAML view of resources (issue #396)
- Fix issue where changes were not pulled when tracking a branch (issue #567)
- Fix controller hot loop when app source contains bad manifests (issue #568)
- [UI] Fix issue where projects filter does not work when application got changed
## v0.8.0 (2018-09-04)
### Notes about upgrading from v0.7
* The RBAC model has been improved to support explicit denies. What this means is that any previous
RBAC policy rules, need to be rewritten to include one extra column with the effect:
`allow` or `deny`. For example, if a rule was written like this:
```
p, my-org:my-team, applications, get, */*
```
It should be rewritten to look like this:
```
p, my-org:my-team, applications, get, */*, allow
```
### Changes since v0.7:
+ Support kustomize as an application source (issue #510)
+ Introduce project tokens for automation access (issue #498)
+ Add ability to delete a single application resource to support immutable updates (issue #262)
+ Update RBAC model to support explicit denies (issue #497)
+ Ability to view Kubernetes events related to application projects for auditing
+ Add PVC healthcheck to controller (issue #501)
+ Run all containers as an unprivileged user (issue #528)
* Upgrade ksonnet to v0.12.0
* Add readiness probes to API server (issue #522)
* Use gRPC error codes instead of fmt.Errorf (#532)
- API discovery becomes best effort when partial resource list is returned (issue #524)
- Fix `argocd app wait` printing incorrect Sync output (issue #542)
- Fix issue where argocd could not sync to a tag (#541)
- Fix issue where static assets were browser cached between upgrades (issue #489)
## v0.7.2 (2018-08-21)
- API discovery becomes best effort when partial resource list is returned (issue #524)
@@ -399,7 +73,7 @@ RBAC policy rules, need to be rewritten to include one extra column with the eff
## v0.5.0 (2018-06-12)
+ RBAC access control
+ Repository/Cluster state monitoring
+ Argo CD settings import/export
+ ArgoCD settings import/export
+ Application creation UI wizard
+ argocd app manifests for printing the application manifests
+ argocd app unset command to unset parameter overrides

View File

@@ -1,183 +1,76 @@
# Contributing
## Before You Start
You must install and run the ArgoCD using a local Kubernetes (e.g. Docker for Desktop or Minikube) first. This will help you understand the application, but also get your local environment set-up.
Then, to get a good grounding in Go, try out [the tutorial](https://tour.golang.org/).
## Pre-requisites
Install:
## Requirements
Make sure you have following tools installed
* [docker](https://docs.docker.com/install/#supported-platforms)
* [golang](https://golang.org/)
* [dep](https://github.com/golang/dep)
* [protobuf](https://developers.google.com/protocol-buffers/)
* [ksonnet](https://github.com/ksonnet/ksonnet#install)
* [helm](https://github.com/helm/helm/releases)
* [kustomize](https://github.com/kubernetes-sigs/kustomize/releases)
* [go-swagger](https://github.com/go-swagger/go-swagger/blob/master/docs/install.md)
* [jq](https://stedolan.github.io/jq/)
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
* [minikube](https://kubernetes.io/docs/setup/minikube/) or Docker for Desktop
```
brew tap go-swagger/go-swagger
brew install go dep protobuf kubectl ksonnet/tap/ks kubernetes-helm jq go-swagger
$ brew tap go-swagger/go-swagger
$ brew install go dep protobuf kubectl ksonnet/tap/ks kubernetes-helm jq go-swagger
$ go get -u github.com/golang/protobuf/protoc-gen-go
$ go get -u github.com/go-swagger/go-swagger/cmd/swagger
$ go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
$ go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
```
Set up environment variables (e.g. is `~/.bashrc`):
Nice to have [gometalinter](https://github.com/alecthomas/gometalinter) and [goreman](https://github.com/mattn/goreman):
```
export GOPATH=~/go
export PATH=$PATH:$GOPATH/bin
```
Install go dependencies:
```
go get -u github.com/golang/protobuf/protoc-gen-go
go get -u github.com/go-swagger/go-swagger/cmd/swagger
go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
go get -u github.com/mattn/goreman
$ go get -u gopkg.in/alecthomas/gometalinter.v2 github.com/mattn/goreman && gometalinter.v2 --install
```
## Building
```
go get -u github.com/argoproj/argo-cd
dep ensure
make
$ go get -u github.com/argoproj/argo-cd
$ dep ensure
$ make
```
The make command can take a while, and we recommend building the specific component you are working on
* `make codegen` - Builds protobuf and swagger files
NOTE: The make command can take a while, and we recommend building the specific component you are working on
* `make cli` - Make the argocd CLI tool
* `make server` - Make the API/repo/controller server
* `make codegen` - Builds protobuf and swagger files
* `make argocd-util` - Make the administrator's utility, used for certain tasks such as import/export
## Running Tests
## Generating ArgoCD manifests for a specific image repository/tag
To run unit tests:
During development, the `update-manifests.sh` script, can be used to conveniently regenerate the
ArgoCD installation manifests with a customized image namespace and tag. This enables developers
to easily apply manifests which are using the images that they pushed into their personal container
repository.
```
make test
$ IMAGE_NAMESPACE=jessesuen IMAGE_TAG=latest ./hack/update-manifests.sh
$ kubectl apply -n argocd -f ./manifests/install.yaml
```
To run e2e tests:
## Running locally
You need to have access to kubernetes cluster (including [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) or [docker edge](https://docs.docker.com/docker-for-mac/install/) ) in order to run Argo CD on your laptop:
* install kubectl: `brew install kubectl`
* make sure `kubectl` is connected to your cluster (e.g. `kubectl get pods` should work).
* install application CRD using following command:
```
make test-e2e
$ kubectl create -f install/manifests/01_application-crd.yaml
```
## Running Locally
It is much easier to run and debug if you run ArgoCD on your local machine than in the Kubernetes cluster.
You should scale the deployemnts to zero:
* start Argo CD services using [goreman](https://github.com/mattn/goreman):
```
kubectl -n argocd scale deployment.extensions/argocd-application-controller --replicas 0
kubectl -n argocd scale deployment.extensions/argocd-dex-server --replicas 0
kubectl -n argocd scale deployment.extensions/argocd-repo-server --replicas 0
kubectl -n argocd scale deployment.extensions/argocd-server --replicas 0
kubectl -n argocd scale deployment.extensions/argocd-redis --replicas 0
$ goreman start
```
Then checkout and build the UI next to your code
```
cd ~/go/src/github.com/argoproj
git clone git@github.com:argoproj/argo-cd-ui.git
```
Follow the UI's [README](https://github.com/argoproj/argo-cd-ui/blob/master/README.md) to build it.
Note: you'll need to use the https://localhost:6443 cluster now.
Then start the services:
```
cd ~/go/src/github.com/argoproj/argo-cd
goreman start
```
You can now execute `argocd` command against your locally running ArgoCD by appending `--server localhost:8080 --plaintext --insecure`, e.g.:
```
argocd app set guestbook --path guestbook --repo https://github.com/argoproj/argocd-example-apps.git --dest-server https://localhost:6443 --dest-namespace default --server localhost:8080 --plaintext --insecure
```
You can open the UI: http://localhost:8080
Note: you'll need to use the https://kubernetes.default.svc cluster now.
## Running Local Containers
You may need to run containers locally, so here's how:
Create login to Docker Hub, then login.
```
docker login
```
Add your username as the environment variable, e.g. to your `~/.bash_profile`:
```
export IMAGE_NAMESPACE=alexcollinsintuit
```
If you have not built the UI image (see [the UI README](https://github.com/argoproj/argo-cd-ui/blob/master/README.md)), then do the following:
```
docker pull argoproj/argocd-ui:latest
docker tag argoproj/argocd-ui:latest $IMAGE_NAMESPACE/argocd-ui:latest
docker push $IMAGE_NAMESPACE/argocd-ui:latest
```
Build the images:
```
DOCKER_PUSH=true make image
```
Update the manifests:
```
make manifests
```
Install the manifests:
```
kubectl -n argocd apply --force -f manifests/install.yaml
```
Scale your deployments up:
```
kubectl -n argocd scale deployment.extensions/argocd-application-controller --replicas 1
kubectl -n argocd scale deployment.extensions/argocd-dex-server --replicas 1
kubectl -n argocd scale deployment.extensions/argocd-repo-server --replicas 1
kubectl -n argocd scale deployment.extensions/argocd-server --replicas 1
kubectl -n argocd scale deployment.extensions/argocd-redis --replicas 1
```
Now you can set-up the port-forwarding (see [README](README.md)) and open the UI or CLI.
## Pre-commit Checks
Before you commit, make sure you've formatted and linted your code, or your PR will fail CI:
```
STAGED_GO_FILES=$(git diff --cached --name-only | grep ".go$")
gofmt -w $STAGED_GO_FILES
make codgen
make precommit ;# lint and test
```
## Troubleshooting
* Ensure argocd is installed: ./dist/argocd install
* Ensure you're logged in: ./dist/argocd login --username admin --password <whatever password you set at install> localhost:8080
* Ensure that roles are configured: kubectl create -f install/manifests/02c_argocd-rbac-cm.yaml
* Ensure minikube is running: minikube stop && minikube start
* Ensure Argo CD is aware of minikube: ./dist/argocd cluster add minikube

View File

@@ -1,152 +0,0 @@
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM golang:1.11.4 as builder
RUN apt-get update && apt-get install -y \
git \
make \
wget \
gcc \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
WORKDIR /tmp
# Install docker
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 18.09.1
RUN wget -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/x86_64/docker-${DOCKER_VERSION}.tgz" && \
tar --extract --file docker.tgz --strip-components 1 --directory /usr/local/bin/ && \
rm docker.tgz
# Install dep
ENV DEP_VERSION=0.5.0
RUN wget https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -O /usr/local/bin/dep && \
chmod +x /usr/local/bin/dep
# Install gometalinter
ENV GOMETALINTER_VERSION=2.0.12
RUN curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v${GOMETALINTER_VERSION}/gometalinter-${GOMETALINTER_VERSION}-linux-amd64.tar.gz | \
tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- && \
ln -s $GOPATH/bin/gometalinter $GOPATH/bin/gometalinter.v2
# Install packr
ENV PACKR_VERSION=1.21.9
RUN wget https://github.com/gobuffalo/packr/releases/download/v${PACKR_VERSION}/packr_${PACKR_VERSION}_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
# Install kubectl
# NOTE: keep the version synced with https://storage.googleapis.com/kubernetes-release/release/stable.txt
ENV KUBECTL_VERSION=1.14.0
RUN curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \
chmod +x /usr/local/bin/kubectl && \
kubectl version --client
# Install ksonnet
ENV KSONNET_VERSION=0.13.1
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks && \
ks version
# Install helm
ENV HELM_VERSION=2.12.1
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /usr/local/bin/helm && \
helm version --client
# Install kustomize
ENV KUSTOMIZE1_VERSION=1.0.11
RUN curl -L -o /usr/local/bin/kustomize1 https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE1_VERSION}/kustomize_${KUSTOMIZE1_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/kustomize1 && \
kustomize1 version
ENV KUSTOMIZE_VERSION=2.0.3
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/kustomize && \
kustomize version
# Install AWS IAM Authenticator
ENV AWS_IAM_AUTHENTICATOR_VERSION=0.4.0-alpha.1
RUN curl -L -o /usr/local/bin/aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/${AWS_IAM_AUTHENTICATOR_VERSION}/aws-iam-authenticator_${AWS_IAM_AUTHENTICATOR_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/aws-iam-authenticator
# Install golangci-lint
RUN wget https://install.goreleaser.com/github.com/golangci/golangci-lint.sh && \
chmod +x ./golangci-lint.sh && \
./golangci-lint.sh -b $GOPATH/bin && \
golangci-lint linters
COPY .golangci.yml ${GOPATH}/src/dummy/.golangci.yml
RUN cd ${GOPATH}/src/dummy && \
touch dummy.go \
golangci-lint run
####################################################################################################
# Argo CD Base - used as the base for both the release and dev argocd images
####################################################################################################
FROM debian:9.5-slim as argocd-base
RUN groupadd -g 999 argocd && \
useradd -r -u 999 -g argocd argocd && \
mkdir -p /home/argocd && \
chown argocd:argocd /home/argocd && \
apt-get update && \
apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY hack/ssh_known_hosts /etc/ssh/ssh_known_hosts
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY --from=builder /usr/local/bin/kustomize1 /usr/local/bin/kustomize1
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
COPY --from=builder /usr/local/bin/aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=argocd
USER argocd
WORKDIR /home/argocd
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM golang:1.11.4 as argocd-build
COPY --from=builder /usr/local/bin/dep /usr/local/bin/dep
COPY --from=builder /usr/local/bin/packr /usr/local/bin/packr
# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep
# to install all the packages of our dep lock file
COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml
COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock
RUN cd ${GOPATH}/src/dummy && \
dep ensure -vendor-only && \
mv vendor/* ${GOPATH}/src/ && \
rmdir vendor
# Perform the build
WORKDIR /go/src/github.com/argoproj/argo-cd
COPY . .
RUN make cli server controller repo-server argocd-util && \
make CLI_NAME=argocd-darwin-amd64 GOOS=darwin cli
####################################################################################################
# Final image
####################################################################################################
FROM argocd-base
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/

88
Dockerfile-argocd Normal file
View File

@@ -0,0 +1,88 @@
FROM debian:9.4 as builder
RUN apt-get update && apt-get install -y \
git \
make \
wget \
gcc \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Install go
ENV GO_VERSION 1.10.3
ENV GO_ARCH amd64
ENV GOPATH /root/go
ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH}
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
# Install protoc, dep, packr
ENV PROTOBUF_VERSION 3.5.1
RUN cd /usr/local && \
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-*.zip && \
wget https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -O /usr/local/bin/dep && \
chmod +x /usr/local/bin/dep && \
wget https://github.com/gobuffalo/packr/releases/download/v1.11.0/packr_1.11.0_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep
# to install all the packages of our dep lock file
COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml
COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock
RUN cd ${GOPATH}/src/dummy && \
dep ensure -vendor-only && \
mv vendor/* ${GOPATH}/src/ && \
rmdir vendor
# Perform the build
WORKDIR /root/go/src/github.com/argoproj/argo-cd
COPY . .
ARG MAKE_TARGET="cli server controller repo-server argocd-util"
RUN make ${MAKE_TARGET}
##############################################################
# This stage will pull in or build any CLI tooling we need for our final image
FROM golang:1.10 as cli-tooling
# NOTE: we frequently switch between tip of master ksonnet vs. official builds. Comment/uncomment
# the corresponding section to switch between the two options:
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /ks
# Option 2: use official tagged ksonnet release
env KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /ks
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl
env HELM_VERSION=2.9.1
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /helm
##############################################################
FROM debian:9.3
RUN apt-get update && apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=cli-tooling /ks /usr/local/bin/ks
COPY --from=cli-tooling /helm /usr/local/bin/helm
COPY --from=cli-tooling /kubectl /usr/local/bin/kubectl
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=root
COPY --from=builder /root/go/src/github.com/argoproj/argo-cd/dist/* /
ARG BINARY
CMD /${BINARY}

28
Dockerfile-ci-builder Normal file
View File

@@ -0,0 +1,28 @@
FROM golang:1.10.3
WORKDIR /tmp
RUN curl -O https://get.docker.com/builds/Linux/x86_64/docker-1.13.1.tgz && \
tar -xzf docker-1.13.1.tgz && \
mv docker/docker /usr/local/bin/docker && \
rm -rf ./docker && \
go get -u github.com/golang/dep/cmd/dep && \
go get -u gopkg.in/alecthomas/gometalinter.v2 && \
gometalinter.v2 --install
# Install kubectl
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl && mv /kubectl /usr/local/bin/kubectl
# Install ksonnet
env KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks && \
rm -rf /tmp/ks_${KSONNET_VERSION}
# Install helm
env HELM_VERSION=2.9.1
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /usr/local/bin/helm

View File

@@ -1,5 +0,0 @@
####################################################################################################
# argocd-dev
####################################################################################################
FROM argocd-base
COPY argocd* /usr/local/bin/

786
Gopkg.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +1,55 @@
# Packages should only be added to the following list when we use them *outside* of our go code.
# (e.g. we want to build the binary to invoke as part of the build process, such as in
# generate-proto.sh). Normal use of golang packages should be added via `dep ensure`, and pinned
# with a [[constraint]] or [[override]] when version is important.
required = [
"github.com/golang/protobuf/protoc-gen-go",
"github.com/gogo/protobuf/protoc-gen-gofast",
"github.com/gogo/protobuf/protoc-gen-gogofast",
"golang.org/x/sync/errgroup",
"k8s.io/code-generator/cmd/go-to-protobuf",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
"golang.org/x/sync/errgroup",
"github.com/golang/protobuf/protoc-gen-go",
]
[[constraint]]
name = "google.golang.org/grpc"
version = "1.15.0"
[[constraint]]
name = "github.com/gogo/protobuf"
version = "1.1.1"
# override github.com/grpc-ecosystem/go-grpc-middleware's constraint on master
[[override]]
name = "github.com/golang/protobuf"
version = "1.2.0"
version = "1.9.2"
[[constraint]]
name = "github.com/grpc-ecosystem/grpc-gateway"
version = "v1.3.1"
# prometheus does not believe in semversioning yet
[[constraint]]
name = "github.com/prometheus/client_golang"
revision = "7858729281ec582767b20e0d696b6041d995d5e0"
[[constraint]]
branch = "release-1.12"
# override argo outdated dependency
[[override]]
branch = "release-1.10"
name = "k8s.io/api"
[[constraint]]
branch = "release-1.12"
name = "k8s.io/code-generator"
[[override]]
branch = "release-1.10"
name = "k8s.io/apimachinery"
[[constraint]]
branch = "release-9.0"
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.10"
[[constraint]]
branch = "release-1.10"
name = "k8s.io/code-generator"
[[override]]
branch = "release-7.0"
name = "k8s.io/client-go"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.2"
version = "1.2.1"
[[constraint]]
name = "github.com/ksonnet/ksonnet"
version = "v0.11.0"
[[constraint]]
name = "github.com/gobuffalo/packr"
version = "v1.11.0"
[[constraint]]
branch = "master"
name = "github.com/argoproj/pkg"
[[constraint]]
branch = "master"
name = "github.com/yudai/gojsondiff"
# override ksonnet's logrus dependency
[[override]]
name = "github.com/sirupsen/logrus"
revision = "ea8897e79973357ba785ac2533559a6297e83c44"

125
Makefile
View File

@@ -10,28 +10,25 @@ GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run vendor/github.com/gobuffalo/packr/packr/main.go"; fi)
# docker image publishing options
DOCKER_PUSH=false
IMAGE_TAG=latest
# perform static compilation
STATIC_BUILD=true
# build development images
DEV_IMAGE=false
override LDFLAGS += \
-X ${PACKAGE}.version=${VERSION} \
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}
ifeq (${STATIC_BUILD}, true)
override LDFLAGS += -extldflags "-static"
endif
# docker image publishing options
DOCKER_PUSH=false
IMAGE_TAG=latest
ifneq (${GIT_TAG},)
IMAGE_TAG=${GIT_TAG}
LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
endif
ifneq (${IMAGE_NAMESPACE},)
override LDFLAGS += -X ${PACKAGE}/install.imageNamespace=${IMAGE_NAMESPACE}
endif
ifneq (${IMAGE_TAG},)
override LDFLAGS += -X ${PACKAGE}/install.imageTag=${IMAGE_TAG}
endif
ifeq (${DOCKER_PUSH},true)
ifndef IMAGE_NAMESPACE
@@ -44,7 +41,7 @@ IMAGE_PREFIX=${IMAGE_NAMESPACE}/
endif
.PHONY: all
all: cli image argocd-util
all: cli server-image controller-image repo-server-image argocd-util
.PHONY: protogen
protogen:
@@ -55,90 +52,83 @@ clientgen:
./hack/update-codegen.sh
.PHONY: codegen
codegen: protogen clientgen format-code
codegen: protogen clientgen
# NOTE: we use packr to do the build instead of go, since we embed .yaml files into the go binary.
# This enables ease of maintenance of the yaml files.
.PHONY: cli
cli: clean-debug
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
${PACKR_CMD} build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
.PHONY: release-cli
release-cli: clean-debug image
docker create --name tmp-argocd-linux $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)
docker cp tmp-argocd-linux:/usr/local/bin/argocd ${DIST_DIR}/argocd-linux-amd64
docker cp tmp-argocd-linux:/usr/local/bin/argocd-darwin-amd64 ${DIST_DIR}/argocd-darwin-amd64
.PHONY: cli-linux
cli-linux: clean-debug
docker build --iidfile /tmp/argocd-linux-id --target builder --build-arg MAKE_TARGET="cli IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-linux-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-linux `cat /tmp/argocd-linux-id`
docker cp tmp-argocd-linux:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-linux-amd64 dist/
docker rm tmp-argocd-linux
.PHONY: cli-darwin
cli-darwin: clean-debug
docker build --iidfile /tmp/argocd-darwin-id --target builder --build-arg MAKE_TARGET="cli GOOS=darwin IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-darwin-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-darwin `cat /tmp/argocd-darwin-id`
docker cp tmp-argocd-darwin:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64 dist/
docker rm tmp-argocd-darwin
.PHONY: argocd-util
argocd-util: clean-debug
# Build argocd-util as a statically linked binary, so it could run within the alpine-based dex container (argoproj/argo-cd#844)
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
.PHONY: manifests
manifests:
.PHONY: install-manifest
install-manifest:
if [ "${IMAGE_NAMESPACE}" = "" ] ; then echo "IMAGE_NAMESPACE must be set to build install manifest" ; exit 1 ; fi
./hack/update-manifests.sh
# NOTE: we use packr to do the build instead of go, since we embed swagger files and policy.csv
# files into the go binary
.PHONY: server
server: clean-debug
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
.PHONY: server-image
server-image:
docker build --build-arg BINARY=argocd-server -t $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) ; fi
.PHONY: repo-server
repo-server:
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
.PHONY: repo-server-image
repo-server-image:
docker build --build-arg BINARY=argocd-repo-server -t $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) ; fi
.PHONY: controller
controller:
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
.PHONY: packr
packr:
go build -o ${DIST_DIR}/packr ./vendor/github.com/gobuffalo/packr/packr/
.PHONY: controller-image
controller-image:
docker build --build-arg BINARY=argocd-application-controller -t $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) ; fi
.PHONY: image
ifeq ($(DEV_IMAGE), true)
# The "dev" image builds the binaries from the users desktop environment (instead of in Docker)
# which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since
# the dist directory is under .dockerignore.
image: packr
docker build -t argocd-base --target argocd-base .
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd/argocd
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-darwin-amd64 ./cmd/argocd
cp Dockerfile.dev dist
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
else
image:
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
endif
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
.PHONY: cli-image
cli-image:
docker build --build-arg BINARY=argocd -t $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) ; fi
.PHONY: builder-image
builder-image:
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
docker push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG)
.PHONY: dep-ensure
dep-ensure:
dep ensure -no-vendor
.PHONY: format-code
format-code:
./hack/format-code.sh
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) -f Dockerfile-ci-builder .
.PHONY: lint
lint:
golangci-lint run
gometalinter.v2 --config gometalinter.json ./...
.PHONY: test
test:
go test -covermode=count -coverprofile=coverage.out `go list ./... | grep -v "github.com/argoproj/argo-cd/test/e2e"`
go test -v `go list ./... | grep -v "github.com/argoproj/argo-cd/test/e2e"`
.PHONY: test-e2e
test-e2e: cli
test-e2e:
go test -v -failfast -timeout 20m ./test/e2e
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
@@ -150,14 +140,13 @@ clean-debug:
clean: clean-debug
-rm -rf ${CURRENT_DIR}/dist
.PHONY: pre-commit
pre-commit: dep-ensure codegen format-code test lint
.PHONY: precheckin
precheckin: test lint
.PHONY: release-precheck
release-precheck: manifests
release-precheck: install-manifest
@if [ "$(GIT_TREE_STATE)" != "clean" ]; then echo 'git tree state is $(GIT_TREE_STATE)' ; exit 1; fi
@if [ -z "$(GIT_TAG)" ]; then echo 'commit must be tagged to perform release' ; exit 1; fi
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
.PHONY: release
release: release-precheck pre-commit image release-cli
release: release-precheck precheckin cli-darwin cli-linux server-image controller-image repo-server-image cli-image

View File

@@ -1,5 +1,4 @@
controller: sh -c "ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:6379 --repo-server localhost:8081"
api-server: sh -c "ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:6379 --disable-auth --insecure --dex-server http://localhost:5556 --repo-server localhost:8081 --staticassets ../argo-cd-ui/dist/app"
repo-server: go run ./cmd/argocd-repo-server/main.go --loglevel debug --redis localhost:6379
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/dexidp/dex:v2.14.0 serve /dex.yaml"
redis: docker run --rm -i -p 6379:6379 redis:5.0.3-alpine --save "" --appendonly no
controller: go run ./cmd/argocd-application-controller/main.go
api-server: go run ./cmd/argocd-server/main.go --insecure --disable-auth
repo-server: go run ./cmd/argocd-repo-server/main.go --loglevel debug
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -p 5557:5557 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/coreos/dex:v2.10.0 serve /dex.yaml"

View File

@@ -1,5 +1,3 @@
[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd)
# Argo CD - Declarative Continuous Delivery for Kubernetes
@@ -16,13 +14,6 @@ Application deployment and lifecycle management should be automated, auditable,
## Getting Started
### Quickstart
```bash
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
```
Follow our [getting started guide](docs/getting_started.md). Further [documentation](docs/)
is provided for additional features.
@@ -30,23 +21,15 @@ is provided for additional features.
Argo CD follows the **GitOps** pattern of using git repositories as the source of truth for defining
the desired application state. Kubernetes manifests can be specified in several ways:
* [kustomize](https://kustomize.io) applications
* [helm](https://helm.sh) charts
* [ksonnet](https://ksonnet.io) applications
* [jsonnet](https://jsonnet.org) files
* Plain directory of YAML/json manifests
* Any custom config management tool configured as a config management plugin
* [helm](https://helm.sh) charts
* Simple directory of YAML/json manifests
Argo CD automates the deployment of the desired application states in the specified target environments.
Application deployments can track updates to branches, tags, or pinned to a specific version of
Application deployments can track updates to branches, tags, or pinned to a specific version of
manifests at a git commit. See [tracking strategies](docs/tracking_strategies.md) for additional
details about the different tracking strategies available.
For a quick 10 minute overview of Argo CD, check out the demo presented to the Sig Apps community
meeting:
[![Alt text](https://img.youtube.com/vi/aWDIQMbp1cc/0.jpg)](https://youtu.be/aWDIQMbp1cc?t=1m4s)
## Architecture
![Argo CD Architecture](docs/argocd_architecture.png)
@@ -64,43 +47,23 @@ For additional details, see [architecture overview](docs/architecture.md).
## Features
* Automated deployment of applications to specified target environments
* Support for multiple config management/templating tools (Kustomize, Helm, Ksonnet, Jsonnet, plain-YAML)
* Ability to manage and deploy to multiple clusters
* SSO Integration (OIDC, OAuth2, LDAP, SAML 2.0, GitHub, GitLab, Microsoft, LinkedIn)
* Multi-tenancy and RBAC policies for authorization
* Rollback/Roll-anywhere to any application configuration committed in git repository
* Health status analysis of application resources
* Automated configuration drift detection and visualization
* Continuous monitoring of deployed applications
* Automated or manual syncing of applications to its desired state
* Web UI which provides real-time view of application activity
* CLI for automation and CI integration
* Webhook integration (GitHub, BitBucket, GitLab)
* Access tokens for automation
* Web and CLI based visualization of applications and differences between live vs. desired state
* Rollback/Roll-anywhere to any application state committed in the git repository
* Health assessment statuses on all components of the application
* SSO Integration (OIDC, OAuth2, LDAP, SAML 2.0, GitLab, Microsoft, LinkedIn)
* Webhook Integration (GitHub, BitBucket, GitLab)
* PreSync, Sync, PostSync hooks to support complex application rollouts (e.g.blue/green & canary upgrades)
* Audit trails for application events and API calls
* Prometheus metrics
* Parameter overrides for overriding ksonnet/helm parameters in git
## Community Blogs and Presentations
* GitOps with Argo CD: [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager](https://www.ibm.com/blogs/bluemix/2019/02/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2/)
* KubeCon talk: [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be)
* KubeCon talk: [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU)
* Among other things, desribes how Kubeflow uses Argo CD to implement GitOPs for ML
* SIG Apps demo: [Argo CD - GitOps Continuous Delivery for Kubernetes](https://www.youtube.com/watch?v=aWDIQMbp1cc&feature=youtu.be&t=1m4s)
## Project Resources
* Argo GitHub: https://github.com/argoproj
* Argo Slack: [click here to join](https://argoproj.github.io/community/join-slack)
* Argo website: https://argoproj.github.io/
## Development Status
* Argo CD is actively developed and is being used in production to deploy SaaS services at Intuit
* Argo CD is being used in production to deploy SaaS services at Intuit
## Roadmap
### v0.12
* Support for custom K8S manifest templating engines
* Support for custom health assessments (e.g. CRD health)
* Improved prometheus metrics
* Higher availability
* UI improvements
* Auto-sync toggle to directly apply git state changes to live state
* Service account/access key management for CI pipelines
* Support for additional config management tools (Kustomize?)
* Revamped UI, and feature parity with CLI
* Customizable application actions

View File

@@ -1 +1 @@
0.12.3
0.7.2

View File

@@ -1,29 +0,0 @@
# Built-in policy which defines two roles: role:readonly and role:admin,
# and additionally assigns the admin user to the role:admin role.
# There are two policy formats:
# 1. Applications (which belong to a project):
# p, <user/group>, <resource>, <action>, <project>/<object>
# 2. All other resources:
# p, <user/group>, <resource>, <action>, <object>
p, role:readonly, applications, get, */*, allow
p, role:readonly, clusters, get, *, allow
p, role:readonly, repositories, get, *, allow
p, role:readonly, projects, get, *, allow
p, role:admin, applications, create, */*, allow
p, role:admin, applications, update, */*, allow
p, role:admin, applications, delete, */*, allow
p, role:admin, applications, sync, */*, allow
p, role:admin, clusters, create, *, allow
p, role:admin, clusters, update, *, allow
p, role:admin, clusters, delete, *, allow
p, role:admin, repositories, create, *, allow
p, role:admin, repositories, update, *, allow
p, role:admin, repositories, delete, *, allow
p, role:admin, projects, create, *, allow
p, role:admin, projects, update, *, allow
p, role:admin, projects, delete, *, allow
g, role:admin, role:readonly
g, admin, role:admin
1 # Built-in policy which defines two roles: role:readonly and role:admin,
2 # and additionally assigns the admin user to the role:admin role.
3 # There are two policy formats:
4 # 1. Applications (which belong to a project):
5 # p, <user/group>, <resource>, <action>, <project>/<object>
6 # 2. All other resources:
7 # p, <user/group>, <resource>, <action>, <object>
8 p, role:readonly, applications, get, */*, allow
9 p, role:readonly, clusters, get, *, allow
10 p, role:readonly, repositories, get, *, allow
11 p, role:readonly, projects, get, *, allow
12 p, role:admin, applications, create, */*, allow
13 p, role:admin, applications, update, */*, allow
14 p, role:admin, applications, delete, */*, allow
15 p, role:admin, applications, sync, */*, allow
16 p, role:admin, clusters, create, *, allow
17 p, role:admin, clusters, update, *, allow
18 p, role:admin, clusters, delete, *, allow
19 p, role:admin, repositories, create, *, allow
20 p, role:admin, repositories, update, *, allow
21 p, role:admin, repositories, delete, *, allow
22 p, role:admin, projects, create, *, allow
23 p, role:admin, projects, update, *, allow
24 p, role:admin, projects, delete, *, allow
25 g, role:admin, role:readonly
26 g, admin, role:admin

View File

@@ -2,29 +2,28 @@ package main
import (
"context"
"flag"
"fmt"
"os"
"strconv"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/settings"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/stats"
)
@@ -37,26 +36,28 @@ const (
func newCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
repoServerTimeoutSeconds int
statusProcessors int
operationProcessors int
logLevel string
glogLevel int
cacheSrc func() (*cache.Cache, error)
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
statusProcessors int
operationProcessors int
logLevel string
glogLevel int
)
var command = cobra.Command{
Use: cliName,
Short: "application-controller is a controller to operate on applications CRD",
RunE: func(c *cobra.Command, args []string) error {
cli.SetLogLevel(logLevel)
cli.SetGLogLevel(glogLevel)
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
// Set the glog level for the k8s go-client
_ = flag.CommandLine.Parse([]string{})
_ = flag.Lookup("logtostderr").Value.Set("true")
_ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel))
config, err := clientConfig.ClientConfig()
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
errors.CheckError(err)
kubeClient := kubernetes.NewForConfigOrDie(config)
@@ -65,32 +66,37 @@ func newCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
// TODO (amatyushentsev): Use config map to store controller configuration
controllerConfig := controller.ApplicationControllerConfig{
Namespace: namespace,
InstanceID: "",
}
db := db.NewDB(namespace, kubeClient)
resyncDuration := time.Duration(appResyncPeriod) * time.Second
repoClientset := reposerver.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
repoClientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
appStateManager := controller.NewAppStateManager(db, appClient, repoClientset, namespace)
cache, err := cacheSrc()
errors.CheckError(err)
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
appController, err := controller.NewApplicationController(
appController := controller.NewApplicationController(
namespace,
settingsMgr,
kubeClient,
appClient,
repoClientset,
cache,
resyncDuration)
errors.CheckError(err)
db,
appStateManager,
resyncDuration,
&controllerConfig)
secretController := controller.NewSecretController(kubeClient, repoClientset, resyncDuration, namespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
log.Infof("Application Controller (version: %s) starting (namespace: %s)", argocd.GetVersion(), namespace)
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
go secretController.Run(ctx)
go appController.Run(ctx, statusProcessors, operationProcessors)
// Wait forever
select {}
},
@@ -98,13 +104,11 @@ func newCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", defaultAppResyncPeriod, "Time period in seconds for application resync.")
command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().IntVar(&statusProcessors, "status-processors", 1, "Number of application status processors")
command.Flags().IntVar(&operationProcessors, "operation-processors", 1, "Number of application operation processors")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
cacheSrc = cache.AddCacheFlagsToCmd(&command)
return &command
}

View File

@@ -3,59 +3,50 @@ package main
import (
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/ksonnet"
"github.com/argoproj/argo-cd/util/stats"
"github.com/argoproj/argo-cd/util/tls"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-repo-server"
port = 8081
)
func newCommand() *cobra.Command {
var (
logLevel string
parallelismLimit int64
cacheSrc func() (*cache.Cache, error)
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
logLevel string
)
var command = cobra.Command{
Use: cliName,
Short: "Run argocd-repo-server",
RunE: func(c *cobra.Command, args []string) error {
cli.SetLogLevel(logLevel)
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
cache, err := cacheSrc()
errors.CheckError(err)
server, err := reposerver.NewServer(git.NewFactory(), cache, tlsConfigCustomizer, parallelismLimit)
errors.CheckError(err)
server := reposerver.NewServer(git.NewFactory(), newCache())
grpc := server.CreateGRPC()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", common.PortRepoServer))
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
errors.CheckError(err)
http.Handle("/metrics", promhttp.Handler())
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf(":%d", common.PortRepoServerMetrics), nil)) }()
ksVers, err := ksonnet.KsonnetVersion()
errors.CheckError(err)
log.Infof("argocd-repo-server %s serving on %s", argocd.GetVersion(), listener.Addr())
log.Infof("ksonnet version: %s", ksVers)
stats.RegisterStackDumper()
stats.StartStatsTicker(10 * time.Minute)
stats.RegisterHeapDumper("memprofile")
@@ -66,12 +57,19 @@ func newCommand() *cobra.Command {
}
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().Int64Var(&parallelismLimit, "parallelismlimit", 0, "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
cacheSrc = cache.AddCacheFlagsToCmd(&command)
return &command
}
func newCache() cache.Cache {
return cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
// client := redis.NewClient(&redis.Options{
// Addr: "localhost:6379",
// Password: "",
// DB: 0,
// })
// return cache.NewRedisCache(client, repository.DefaultRepoCacheExpiration)
}
func main() {
if err := newCommand().Execute(); err != nil {
fmt.Println(err)

View File

@@ -2,75 +2,66 @@ package commands
import (
"context"
"flag"
"strconv"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/stats"
"github.com/argoproj/argo-cd/util/tls"
)
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
insecure bool
logLevel string
glogLevel int
clientConfig clientcmd.ClientConfig
staticAssetsDir string
baseHRef string
repoServerAddress string
dexServerAddress string
disableAuth bool
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
cacheSrc func() (*cache.Cache, error)
insecure bool
logLevel string
glogLevel int
clientConfig clientcmd.ClientConfig
staticAssetsDir string
repoServerAddress string
disableAuth bool
)
var command = &cobra.Command{
Use: cliName,
Short: "Run the argocd API server",
Long: "Run the argocd API server",
Run: func(c *cobra.Command, args []string) {
cli.SetLogLevel(logLevel)
cli.SetGLogLevel(glogLevel)
level, err := log.ParseLevel(logLevel)
errors.CheckError(err)
log.SetLevel(level)
// Set the glog level for the k8s go-client
_ = flag.CommandLine.Parse([]string{})
_ = flag.Lookup("logtostderr").Value.Set("true")
_ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel))
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
config.QPS = common.K8sClientConfigQPS
config.Burst = common.K8sClientConfigBurst
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
errors.CheckError(err)
cache, err := cacheSrc()
errors.CheckError(err)
kubeclientset := kubernetes.NewForConfigOrDie(config)
appclientset := appclientset.NewForConfigOrDie(config)
repoclientset := reposerver.NewRepoServerClientset(repoServerAddress, 0)
repoclientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
argoCDOpts := server.ArgoCDServerOpts{
Insecure: insecure,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
BaseHRef: baseHRef,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DexServerAddr: dexServerAddress,
DisableAuth: disableAuth,
TLSConfigCustomizer: tlsConfigCustomizer,
Cache: cache,
Insecure: insecure,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DisableAuth: disableAuth,
}
stats.RegisterStackDumper()
@@ -78,10 +69,10 @@ func NewCommand() *cobra.Command {
stats.RegisterHeapDumper("memprofile")
for {
argocd := server.NewServer(argoCDOpts)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
argocd := server.NewServer(ctx, argoCDOpts)
argocd.Run(ctx, common.PortAPIServer)
argocd.Run(ctx, 8080)
cancel()
}
},
@@ -90,14 +81,10 @@ func NewCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().BoolVar(&insecure, "insecure", false, "Run server without TLS")
command.Flags().StringVar(&staticAssetsDir, "staticassets", "", "Static assets directory path")
command.Flags().StringVar(&baseHRef, "basehref", "/", "Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address")
command.Flags().StringVar(&dexServerAddress, "dex-server", common.DefaultDexServerAddr, "Dex server address")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().BoolVar(&disableAuth, "disable-auth", false, "Disable client authentication")
command.AddCommand(cli.NewVersionCmd(cliName))
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
cacheSrc = cache.AddCacheFlagsToCmd(command)
return command
}

View File

@@ -1,37 +1,30 @@
package main
import (
"bufio"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"syscall"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/dex"
"github.com/argoproj/argo-cd/util/settings"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/dex"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
@@ -41,15 +34,9 @@ import (
const (
// CLIName is the name of the CLI
cliName = "argocd-util"
// YamlSeparator separates sections of a YAML file
yamlSeparator = "---\n"
)
var (
configMapResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}
secretResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
applicationsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applications"}
appprojectsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "appprojects"}
// YamlSeparator separates sections of a YAML file
yamlSeparator = "\n---\n"
)
// NewCommand returns a new instance of an argocd command
@@ -60,7 +47,7 @@ func NewCommand() *cobra.Command {
var command = &cobra.Command{
Use: cliName,
Short: "argocd-util has internal tools used by Argo CD",
Short: "argocd-util has internal tools used by ArgoCD",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
@@ -71,7 +58,7 @@ func NewCommand() *cobra.Command {
command.AddCommand(NewGenDexConfigCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.AddCommand(NewClusterConfig())
command.AddCommand(NewSettingsCommand())
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return command
@@ -83,7 +70,7 @@ func NewRunDexCommand() *cobra.Command {
)
var command = cobra.Command{
Use: "rundex",
Short: "Runs dex generating a config using settings from the Argo CD configmap and secret",
Short: "Runs dex generating a config using settings from the ArgoCD configmap and secret",
RunE: func(c *cobra.Command, args []string) error {
_, err := exec.LookPath("dex")
errors.CheckError(err)
@@ -92,15 +79,17 @@ func NewRunDexCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
prevSettings, err := settingsMgr.GetSettings()
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
updateCh := make(chan *settings.ArgoCDSettings, 1)
ctx := context.Background()
settingsMgr.StartNotifier(ctx, settings)
updateCh := make(chan struct{}, 1)
settingsMgr.Subscribe(updateCh)
for {
var cmd *exec.Cmd
dexCfgBytes, err := dex.GenerateDexConfigYAML(prevSettings)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
@@ -117,11 +106,10 @@ func NewRunDexCommand() *cobra.Command {
// loop until the dex config changes
for {
newSettings := <-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(newSettings)
<-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(dexCfgBytes) {
prevSettings = newSettings
log.Infof("dex config modified. restarting dex")
if cmd != nil && cmd.Process != nil {
err = cmd.Process.Signal(syscall.SIGTERM)
@@ -149,14 +137,14 @@ func NewGenDexConfigCommand() *cobra.Command {
)
var command = cobra.Command{
Use: "gendexcfg",
Short: "Generates a dex config from Argo CD settings",
Short: "Generates a dex config from ArgoCD settings",
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
@@ -166,29 +154,7 @@ func NewGenDexConfigCommand() *cobra.Command {
return nil
}
if out == "" {
dexCfg := make(map[string]interface{})
err := yaml.Unmarshal(dexCfgBytes, &dexCfg)
errors.CheckError(err)
if staticClientsInterface, ok := dexCfg["staticClients"]; ok {
if staticClients, ok := staticClientsInterface.([]interface{}); ok {
for i := range staticClients {
staticClient := staticClients[i]
if mappings, ok := staticClient.(map[string]interface{}); ok {
for key := range mappings {
if key == "secret" {
mappings[key] = "******"
}
}
staticClients[i] = mappings
}
}
dexCfg["staticClients"] = staticClients
}
}
errors.CheckError(err)
maskedDexCfgBytes, err := yaml.Marshal(dexCfg)
errors.CheckError(err)
fmt.Printf(string(maskedDexCfgBytes))
fmt.Printf(string(dexCfgBytes))
} else {
err = ioutil.WriteFile(out, dexCfgBytes, 0644)
errors.CheckError(err)
@@ -206,153 +172,94 @@ func NewGenDexConfigCommand() *cobra.Command {
func NewImportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
prune bool
dryRun bool
)
var command = cobra.Command{
Use: "import SOURCE",
Short: "Import Argo CD data from stdin (specify `-') or a file",
Run: func(c *cobra.Command, args []string) {
RunE: func(c *cobra.Command, args []string) error {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
var (
input []byte
err error
newSettings *settings.ArgoCDSettings
newRepos []*v1alpha1.Repository
newClusters []*v1alpha1.Cluster
newApps []*v1alpha1.Application
newRBACCM *apiv1.ConfigMap
)
if in := args[0]; in == "-" {
input, err = ioutil.ReadAll(os.Stdin)
errors.CheckError(err)
} else {
input, err = ioutil.ReadFile(in)
errors.CheckError(err)
}
inputStrings := strings.Split(string(input), yamlSeparator)
err = yaml.Unmarshal([]byte(inputStrings[0]), &newSettings)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[1]), &newRepos)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[2]), &newClusters)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[3]), &newApps)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[4]), &newRBACCM)
errors.CheckError(err)
config, err := clientConfig.ClientConfig()
config.QPS = 100
config.Burst = 50
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
acdClients := newArgoCDClientsets(config, namespace)
kubeClientset := kubernetes.NewForConfigOrDie(config)
var input []byte
if in := args[0]; in == "-" {
input, err = ioutil.ReadAll(os.Stdin)
} else {
input, err = ioutil.ReadFile(in)
}
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
err = settingsMgr.SaveSettings(newSettings)
errors.CheckError(err)
var dryRunMsg string
if dryRun {
dryRunMsg = " (dry run)"
}
db := db.NewDB(namespace, kubeClientset)
// pruneObjects tracks live objects and it's current resource version. any remaining
// items in this map indicates the resource should be pruned since it no longer appears
// in the backup
pruneObjects := make(map[kube.ResourceKey]string)
configMaps, err := acdClients.configMaps.List(metav1.ListOptions{})
_, err = kubeClientset.CoreV1().ConfigMaps(namespace).Create(newRBACCM)
errors.CheckError(err)
for _, cm := range configMaps.Items {
cmName := cm.GetName()
if cmName == common.ArgoCDConfigMapName || cmName == common.ArgoCDRBACConfigMapName {
pruneObjects[kube.ResourceKey{Group: "", Kind: "ConfigMap", Name: cm.GetName()}] = cm.GetResourceVersion()
}
}
secrets, err := acdClients.secrets.List(metav1.ListOptions{})
errors.CheckError(err)
for _, secret := range secrets.Items {
if isArgoCDSecret(nil, secret) {
pruneObjects[kube.ResourceKey{Group: "", Kind: "Secret", Name: secret.GetName()}] = secret.GetResourceVersion()
}
}
applications, err := acdClients.applications.List(metav1.ListOptions{})
errors.CheckError(err)
for _, app := range applications.Items {
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "Application", Name: app.GetName()}] = app.GetResourceVersion()
}
projects, err := acdClients.projects.List(metav1.ListOptions{})
errors.CheckError(err)
for _, proj := range projects.Items {
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "AppProject", Name: proj.GetName()}] = proj.GetResourceVersion()
}
// Create or replace existing object
objs, err := kube.SplitYAML(string(input))
errors.CheckError(err)
for _, obj := range objs {
gvk := obj.GroupVersionKind()
key := kube.ResourceKey{Group: gvk.Group, Kind: gvk.Kind, Name: obj.GetName()}
resourceVersion, exists := pruneObjects[key]
delete(pruneObjects, key)
var dynClient dynamic.ResourceInterface
switch obj.GetKind() {
case "Secret":
dynClient = acdClients.secrets
case "ConfigMap":
dynClient = acdClients.configMaps
case "AppProject":
dynClient = acdClients.projects
case "Application":
dynClient = acdClients.applications
}
if !exists {
if !dryRun {
_, err = dynClient.Create(obj, metav1.CreateOptions{})
errors.CheckError(err)
}
fmt.Printf("%s/%s %s created%s\n", gvk.Group, gvk.Kind, obj.GetName(), dryRunMsg)
} else {
if !dryRun {
obj.SetResourceVersion(resourceVersion)
_, err = dynClient.Update(obj, metav1.UpdateOptions{})
errors.CheckError(err)
}
fmt.Printf("%s/%s %s replaced%s\n", gvk.Group, gvk.Kind, obj.GetName(), dryRunMsg)
for _, repo := range newRepos {
_, err := db.CreateRepository(context.Background(), repo)
if err != nil {
log.Warn(err)
}
}
// Delete objects not in backup
for key := range pruneObjects {
if prune {
var dynClient dynamic.ResourceInterface
switch key.Kind {
case "Secret":
dynClient = acdClients.secrets
case "AppProject":
dynClient = acdClients.projects
case "Application":
dynClient = acdClients.applications
default:
log.Fatalf("Unexpected kind '%s' in prune list", key.Kind)
}
if !dryRun {
err = dynClient.Delete(key.Name, &metav1.DeleteOptions{})
errors.CheckError(err)
}
fmt.Printf("%s/%s %s pruned%s\n", key.Group, key.Kind, key.Name, dryRunMsg)
} else {
fmt.Printf("%s/%s %s needs pruning\n", key.Group, key.Kind, key.Name)
for _, cluster := range newClusters {
_, err := db.CreateCluster(context.Background(), cluster)
if err != nil {
log.Warn(err)
}
}
appClientset := appclientset.NewForConfigOrDie(config)
for _, app := range newApps {
out, err := appClientset.ArgoprojV1alpha1().Applications(namespace).Create(app)
errors.CheckError(err)
log.Println(out)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().BoolVar(&dryRun, "dry-run", false, "Print what will be performed")
command.Flags().BoolVar(&prune, "prune", false, "Prune secrets, applications and projects which do not appear in the backup")
return &command
}
type argoCDClientsets struct {
configMaps dynamic.ResourceInterface
secrets dynamic.ResourceInterface
applications dynamic.ResourceInterface
projects dynamic.ResourceInterface
}
func newArgoCDClientsets(config *rest.Config, namespace string) *argoCDClientsets {
dynamicIf, err := dynamic.NewForConfig(config)
errors.CheckError(err)
return &argoCDClientsets{
configMaps: dynamicIf.Resource(configMapResource).Namespace(namespace),
secrets: dynamicIf.Resource(secretResource).Namespace(namespace),
applications: dynamicIf.Resource(applicationsResource).Namespace(namespace),
projects: dynamicIf.Resource(appprojectsResource).Namespace(namespace),
}
}
// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewExportCommand() *cobra.Command {
var (
@@ -362,48 +269,69 @@ func NewExportCommand() *cobra.Command {
var command = cobra.Command{
Use: "export",
Short: "Export all Argo CD data to stdout (default) or a file",
Run: func(c *cobra.Command, args []string) {
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
var writer io.Writer
if out == "-" {
writer = os.Stdout
} else {
f, err := os.Create(out)
errors.CheckError(err)
defer util.Close(f)
writer = bufio.NewWriter(f)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
// certificate data is included in secrets that are exported alongside
settings.Certificate = nil
db := db.NewDB(namespace, kubeClientset)
clusters, err := db.ListClusters(context.Background())
errors.CheckError(err)
repos, err := db.ListRepositories(context.Background())
errors.CheckError(err)
appClientset := appclientset.NewForConfigOrDie(config)
apps, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(metav1.ListOptions{})
errors.CheckError(err)
rbacCM, err := kubeClientset.CoreV1().ConfigMaps(namespace).Get(common.ArgoCDRBACConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
// remove extraneous cruft from output
rbacCM.ObjectMeta = metav1.ObjectMeta{
Name: rbacCM.ObjectMeta.Name,
}
acdClients := newArgoCDClientsets(config, namespace)
acdConfigMap, err := acdClients.configMaps.Get(common.ArgoCDConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
export(writer, *acdConfigMap)
acdRBACConfigMap, err := acdClients.configMaps.Get(common.ArgoCDRBACConfigMapName, metav1.GetOptions{})
errors.CheckError(err)
export(writer, *acdRBACConfigMap)
referencedSecrets := getReferencedSecrets(*acdConfigMap)
secrets, err := acdClients.secrets.List(metav1.ListOptions{})
errors.CheckError(err)
for _, secret := range secrets.Items {
if isArgoCDSecret(referencedSecrets, secret) {
export(writer, secret)
// remove extraneous cruft from output
for idx, app := range apps.Items {
apps.Items[idx].ObjectMeta = metav1.ObjectMeta{
Name: app.ObjectMeta.Name,
Finalizers: app.ObjectMeta.Finalizers,
}
apps.Items[idx].Status = v1alpha1.ApplicationStatus{
History: app.Status.History,
}
apps.Items[idx].Operation = nil
}
projects, err := acdClients.projects.List(metav1.ListOptions{})
errors.CheckError(err)
for _, proj := range projects.Items {
export(writer, proj)
}
applications, err := acdClients.applications.List(metav1.ListOptions{})
errors.CheckError(err)
for _, app := range applications.Items {
export(writer, app)
// take a list of exportable objects, marshal them to YAML,
// and return a string joined by a delimiter
output := func(delimiter string, oo ...interface{}) string {
out := make([]string, 0)
for _, o := range oo {
data, err := yaml.Marshal(o)
errors.CheckError(err)
out = append(out, string(data))
}
return strings.Join(out, delimiter)
}(yamlSeparator, settings, repos.Items, clusters.Items, apps.Items, rbacCM)
if out == "-" {
fmt.Println(output)
} else {
err = ioutil.WriteFile(out, []byte(output), 0644)
errors.CheckError(err)
}
return nil
},
}
@@ -413,130 +341,37 @@ func NewExportCommand() *cobra.Command {
return &command
}
// getReferencedSecrets examines the argocd-cm config for any referenced repo secrets and returns a
// map of all referenced secrets.
func getReferencedSecrets(un unstructured.Unstructured) map[string]bool {
var cm apiv1.ConfigMap
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &cm)
errors.CheckError(err)
referencedSecrets := make(map[string]bool)
if reposRAW, ok := cm.Data["repositories"]; ok {
repoCreds := make([]settings.RepoCredentials, 0)
err := yaml.Unmarshal([]byte(reposRAW), &repoCreds)
errors.CheckError(err)
for _, cred := range repoCreds {
if cred.PasswordSecret != nil {
referencedSecrets[cred.PasswordSecret.Name] = true
}
if cred.SSHPrivateKeySecret != nil {
referencedSecrets[cred.SSHPrivateKeySecret.Name] = true
}
if cred.UsernameSecret != nil {
referencedSecrets[cred.UsernameSecret.Name] = true
}
}
}
if helmReposRAW, ok := cm.Data["helm.repositories"]; ok {
helmRepoCreds := make([]settings.HelmRepoCredentials, 0)
err := yaml.Unmarshal([]byte(helmReposRAW), &helmRepoCreds)
errors.CheckError(err)
for _, cred := range helmRepoCreds {
if cred.CASecret != nil {
referencedSecrets[cred.CASecret.Name] = true
}
if cred.CertSecret != nil {
referencedSecrets[cred.CertSecret.Name] = true
}
if cred.KeySecret != nil {
referencedSecrets[cred.KeySecret.Name] = true
}
if cred.UsernameSecret != nil {
referencedSecrets[cred.UsernameSecret.Name] = true
}
if cred.PasswordSecret != nil {
referencedSecrets[cred.PasswordSecret.Name] = true
}
}
}
return referencedSecrets
}
// isArgoCDSecret returns whether or not the given secret is a part of Argo CD configuration
// (e.g. argocd-secret, repo credentials, or cluster credentials)
func isArgoCDSecret(repoSecretRefs map[string]bool, un unstructured.Unstructured) bool {
secretName := un.GetName()
if secretName == common.ArgoCDSecretName {
return true
}
if repoSecretRefs != nil {
if _, ok := repoSecretRefs[secretName]; ok {
return true
}
}
if labels := un.GetLabels(); labels != nil {
if _, ok := labels[common.LabelKeySecretType]; ok {
return true
}
}
if annotations := un.GetAnnotations(); annotations != nil {
if annotations[common.AnnotationKeyManagedBy] == common.AnnotationValueManagedByArgoCD {
return true
}
}
return false
}
// export writes the unstructured object and removes extraneous cruft from output before writing
func export(w io.Writer, un unstructured.Unstructured) {
name := un.GetName()
finalizers := un.GetFinalizers()
apiVersion := un.GetAPIVersion()
kind := un.GetKind()
labels := un.GetLabels()
annotations := un.GetAnnotations()
unstructured.RemoveNestedField(un.Object, "metadata")
un.SetName(name)
un.SetFinalizers(finalizers)
un.SetAPIVersion(apiVersion)
un.SetKind(kind)
un.SetLabels(labels)
un.SetAnnotations(annotations)
data, err := yaml.Marshal(un.Object)
errors.CheckError(err)
_, err = w.Write(data)
errors.CheckError(err)
_, err = w.Write([]byte(yamlSeparator))
errors.CheckError(err)
}
// NewClusterConfig returns a new instance of `argocd-util kubeconfig` command
func NewClusterConfig() *cobra.Command {
// NewSettingsCommand returns a new instance of `argocd-util settings` command
func NewSettingsCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
clientConfig clientcmd.ClientConfig
updateSuperuser bool
superuserPassword string
updateSignature bool
)
var command = &cobra.Command{
Use: "kubeconfig CLUSTER_URL OUTPUT_PATH",
Short: "Generates kubeconfig for the specified cluster",
Use: "settings",
Short: "Creates or updates ArgoCD settings",
Long: "Creates or updates ArgoCD settings",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
serverUrl := args[0]
output := args[1]
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if !(wasSpecified) {
namespace = "argocd"
}
kubeclientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
settingsMgr := settings.NewSettingsManager(kubeclientset, namespace)
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(context.Background(), kubeclientset, namespace), kubeclientset).GetCluster(context.Background(), serverUrl)
errors.CheckError(err)
err = kube.WriteKubeConfig(cluster.RESTConfig(), namespace, output)
errors.CheckError(err)
_ = settings.UpdateSettings(superuserPassword, settingsMgr, updateSignature, updateSuperuser, namespace)
},
}
command.Flags().BoolVar(&updateSuperuser, "update-superuser", false, "force updating the superuser password")
command.Flags().StringVar(&superuserPassword, "superuser-password", "", "password for super user")
command.Flags().BoolVar(&updateSignature, "update-signature", false, "force updating the server-side token signing signature")
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}

View File

@@ -6,15 +6,13 @@ import (
"os"
"syscall"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/account"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/localconfig"
"github.com/argoproj/argo-cd/util/settings"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
)
func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
@@ -52,9 +50,7 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
fmt.Print("\n")
}
if newPassword == "" {
var err error
newPassword, err = cli.ReadAndConfirmPassword()
errors.CheckError(err)
newPassword = settings.ReadAndConfirmPassword()
}
updatePasswordRequest := account.UpdatePasswordRequest{
@@ -62,30 +58,11 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
CurrentPassword: currentPassword,
}
acdClient := argocdclient.NewClientOrDie(clientOpts)
conn, usrIf := acdClient.NewAccountClientOrDie()
conn, usrIf := argocdclient.NewClientOrDie(clientOpts).NewAccountClientOrDie()
defer util.Close(conn)
ctx := context.Background()
_, err := usrIf.UpdatePassword(ctx, &updatePasswordRequest)
_, err := usrIf.UpdatePassword(context.Background(), &updatePasswordRequest)
errors.CheckError(err)
fmt.Printf("Password updated\n")
// Get a new JWT token after updating the password
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
errors.CheckError(err)
configCtx, err := localCfg.ResolveContext(clientOpts.Context)
errors.CheckError(err)
claims, err := configCtx.User.Claims()
errors.CheckError(err)
tokenString := passwordLogin(acdClient, claims.Subject, newPassword)
localCfg.UpsertUser(localconfig.User{
Name: localCfg.CurrentContext,
AuthToken: tokenString,
})
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
errors.CheckError(err)
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
},
}

File diff suppressed because it is too large Load Diff

View File

@@ -9,19 +9,17 @@ import (
"strings"
"text/tabwriter"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/util"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// NewClusterCommand returns a new instance of an `argocd cluster` command
@@ -45,10 +43,8 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
// NewClusterAddCommand returns a new instance of an `argocd cluster add` command
func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
var (
inCluster bool
upsert bool
awsRoleArn string
awsClusterName string
inCluster bool
upsert bool
)
var command = &cobra.Command{
Use: "add",
@@ -66,7 +62,6 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
if clstContext == nil {
log.Fatalf("Context %s does not exist in kubeconfig", args[0])
}
overrides := clientcmd.ConfigOverrides{
Context: *clstContext,
}
@@ -74,23 +69,12 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
managerBearerToken := ""
var awsAuthConf *argoappv1.AWSAuthConfig
if awsClusterName != "" {
awsAuthConf = &argoappv1.AWSAuthConfig{
ClusterName: awsClusterName,
RoleARN: awsRoleArn,
}
} else {
// Install RBAC resources for managing the cluster
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
managerBearerToken, err = common.InstallClusterManagerRBAC(clientset)
errors.CheckError(err)
}
// Install RBAC resources for managing the cluster
managerBearerToken := common.InstallClusterManagerRBAC(conf)
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
clst := NewCluster(args[0], conf, managerBearerToken, awsAuthConf)
clst := NewCluster(args[0], conf, managerBearerToken)
if inCluster {
clst.Server = common.KubernetesInternalAPIServerAddr
}
@@ -104,10 +88,8 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
},
}
command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file")
command.Flags().BoolVar(&inCluster, "in-cluster", false, "Indicates Argo CD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
command.Flags().BoolVar(&inCluster, "in-cluster", false, "Indicates ArgoCD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs")
command.Flags().StringVar(&awsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws-iam-authenticator will be used to access cluster")
command.Flags().StringVar(&awsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.")
return command
}
@@ -150,7 +132,7 @@ func printKubeContexts(ca clientcmd.ConfigAccess) {
}
}
func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAuthConf *argoappv1.AWSAuthConfig) *argoappv1.Cluster {
func NewCluster(name string, conf *rest.Config, managerBearerToken string) *argoappv1.Cluster {
tlsClientConfig := argoappv1.TLSClientConfig{
Insecure: conf.TLSClientConfig.Insecure,
ServerName: conf.TLSClientConfig.ServerName,
@@ -179,7 +161,6 @@ func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAu
Config: argoappv1.ClusterConfig{
BearerToken: managerBearerToken,
TLSClientConfig: tlsClientConfig,
AWSAuthConfig: awsAuthConf,
},
}
return &clst
@@ -221,14 +202,9 @@ func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
}
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
// clientset, err := kubernetes.NewForConfig(conf)
// errors.CheckError(err)
for _, clusterName := range args {
// TODO(jessesuen): find the right context and remove manager RBAC artifacts
// err := common.UninstallClusterManagerRBAC(clientset)
// errors.CheckError(err)
// common.UninstallClusterManagerRBAC(conf)
_, err := clusterIf.Delete(context.Background(), &cluster.ClusterQuery{Server: clusterName})
errors.CheckError(err)
}

View File

@@ -2,8 +2,4 @@ package commands
const (
cliName = "argocd"
// DefaultSSOLocalPort is the localhost port to listen on for the temporary web server performing
// the OAuth2 login flow.
DefaultSSOLocalPort = 8085
)

View File

@@ -8,12 +8,11 @@ import (
"strings"
"text/tabwriter"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/localconfig"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewContextCommand returns a new instance of an `argocd ctx` command

View File

@@ -2,19 +2,15 @@ package commands
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"strconv"
"time"
oidc "github.com/coreos/go-oidc"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/session"
@@ -23,8 +19,11 @@ import (
"github.com/argoproj/argo-cd/util/cli"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/localconfig"
oidcutil "github.com/argoproj/argo-cd/util/oidc"
"github.com/argoproj/argo-cd/util/rand"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
)
// NewLoginCommand returns a new instance of `argocd login` command
@@ -34,7 +33,6 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
username string
password string
sso bool
ssoPort int
)
var command = &cobra.Command{
Use: "login SERVER",
@@ -68,7 +66,6 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
ServerAddr: server,
Insecure: globalClientOpts.Insecure,
PlainText: globalClientOpts.PlainText,
GRPCWeb: globalClientOpts.GRPCWeb,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
setConn, setIf := acdClient.NewSettingsClientOrDie()
@@ -84,15 +81,12 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
if !sso {
tokenString = passwordLogin(acdClient, username, password)
} else {
ctx := context.Background()
httpClient, err := acdClient.HTTPClient()
acdSet, err := setIf.Get(context.Background(), &settings.SettingsQuery{})
errors.CheckError(err)
ctx = oidc.ClientContext(ctx, httpClient)
acdSet, err := setIf.Get(ctx, &settings.SettingsQuery{})
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, oauth2conf, provider)
if !ssoConfigured(acdSet) {
log.Fatalf("ArgoCD instance is not configured with SSO")
}
tokenString, refreshToken = oauth2Login(server, clientOpts.PlainText)
}
parser := &jwt.Parser{
@@ -113,7 +107,6 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
Server: server,
PlainText: globalClientOpts.PlainText,
Insecure: globalClientOpts.Insecure,
GRPCWeb: globalClientOpts.GRPCWeb,
})
localCfg.UpsertUser(localconfig.User{
Name: ctxName,
@@ -137,8 +130,7 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
command.Flags().StringVar(&ctxName, "name", "", "name to use for the context")
command.Flags().StringVar(&username, "username", "", "the username of an account to authenticate")
command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate")
command.Flags().BoolVar(&sso, "sso", false, "perform SSO login")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application")
command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login")
return command
}
@@ -152,107 +144,97 @@ func userDisplayName(claims jwt.MapClaims) string {
return claims["sub"].(string)
}
func ssoConfigured(set *settings.Settings) bool {
return set.DexConfig != nil && len(set.DexConfig.Connectors) > 0
}
// getFreePort asks the kernel for a free open port that is ready to use.
func getFreePort() (int, error) {
ln, err := net.Listen("tcp", "[::]:0")
if err != nil {
return 0, err
}
return ln.Addr().(*net.TCPAddr).Port, ln.Close()
}
// oauth2Login opens a browser, runs a temporary HTTP server to delegate OAuth2 login flow and
// returns the JWT token and a refresh token (if supported)
func oauth2Login(ctx context.Context, port int, oauth2conf *oauth2.Config, provider *oidc.Provider) (string, string) {
oauth2conf.RedirectURL = fmt.Sprintf("http://localhost:%d/auth/callback", port)
oidcConf, err := oidcutil.ParseConfig(provider)
func oauth2Login(host string, plaintext bool) (string, string) {
ctx := context.Background()
port, err := getFreePort()
errors.CheckError(err)
log.Debug("OIDC Configuration:")
log.Debugf(" supported_scopes: %v", oidcConf.ScopesSupported)
log.Debugf(" response_types_supported: %v", oidcConf.ResponseTypesSupported)
// handledRequests ensures we do not handle more requests than necessary
handledRequests := 0
// completionChan is to signal flow completed. Non-empty string indicates error
completionChan := make(chan string)
// stateNonce is an OAuth2 state nonce
stateNonce := rand.RandString(10)
var scheme = "https"
if plaintext {
scheme = "http"
}
conf := &oauth2.Config{
ClientID: common.ArgoCDCLIClientAppID,
Scopes: []string{"openid", "profile", "email", "groups", "offline_access"},
Endpoint: oauth2.Endpoint{
AuthURL: fmt.Sprintf("%s://%s%s/auth", scheme, host, common.DexAPIEndpoint),
TokenURL: fmt.Sprintf("%s://%s%s/token", scheme, host, common.DexAPIEndpoint),
},
RedirectURL: fmt.Sprintf("http://localhost:%d/auth/callback", port),
}
srv := &http.Server{Addr: ":" + strconv.Itoa(port)}
var tokenString string
var refreshToken string
loginCompleted := make(chan struct{})
handleErr := func(w http.ResponseWriter, errMsg string) {
http.Error(w, errMsg, http.StatusBadRequest)
completionChan <- errMsg
}
// Authorization redirect callback from OAuth2 auth flow.
// Handles both implicit and authorization code flow
callbackHandler := func(w http.ResponseWriter, r *http.Request) {
log.Debugf("Callback: %s", r.URL)
defer func() {
loginCompleted <- struct{}{}
}()
if formErr := r.FormValue("error"); formErr != "" {
handleErr(w, formErr+": "+r.FormValue("error_description"))
// Authorization redirect callback from OAuth2 auth flow.
if errMsg := r.FormValue("error"); errMsg != "" {
http.Error(w, errMsg+": "+r.FormValue("error_description"), http.StatusBadRequest)
log.Fatal(errMsg)
return
}
handledRequests++
if handledRequests > 2 {
// Since implicit flow will redirect back to ourselves, this counter ensures we do not
// fallinto a redirect loop (e.g. user visits the page by hand)
handleErr(w, "Unable to complete login flow: too many redirects")
code := r.FormValue("code")
if code == "" {
errMsg := fmt.Sprintf("no code in request: %q", r.Form)
http.Error(w, errMsg, http.StatusBadRequest)
log.Fatal(errMsg)
return
}
tok, err := conf.Exchange(ctx, code)
errors.CheckError(err)
log.Info("Authentication successful")
if len(r.Form) == 0 {
// If we get here, no form data was set. We presume to be performing an implicit login
// flow where the id_token is contained in a URL fragment, making it inaccessible to be
// read from the request. This javascript will redirect the browser to send the
// fragments as query parameters so our callback handler can read and return token.
fmt.Fprintf(w, `<script>window.location.search = window.location.hash.substring(1)</script>`)
var ok bool
tokenString, ok = tok.Extra("id_token").(string)
if !ok {
errMsg := "no id_token in token response"
http.Error(w, errMsg, http.StatusInternalServerError)
log.Fatal(errMsg)
return
}
if state := r.FormValue("state"); state != stateNonce {
handleErr(w, "Unknown state nonce")
return
}
tokenString = r.FormValue("id_token")
if tokenString == "" {
code := r.FormValue("code")
if code == "" {
handleErr(w, fmt.Sprintf("no code in request: %q", r.Form))
return
}
tok, err := oauth2conf.Exchange(ctx, code)
if err != nil {
handleErr(w, err.Error())
return
}
var ok bool
tokenString, ok = tok.Extra("id_token").(string)
if !ok {
handleErr(w, "no id_token in token response")
return
}
refreshToken, _ = tok.Extra("refresh_token").(string)
}
refreshToken, _ = tok.Extra("refresh_token").(string)
log.Debugf("Token: %s", tokenString)
log.Debugf("Refresh Token: %s", tokenString)
successPage := `
<div style="height:100px; width:100%!; display:flex; flex-direction: column; justify-content: center; align-items:center; background-color:#2ecc71; color:white; font-size:22"><div>Authentication successful!</div></div>
<p style="margin-top:20px; font-size:18; text-align:center">Authentication was successful, you can now return to CLI. This page will close automatically</p>
<script>window.onload=function(){setTimeout(this.close, 4000)}</script>
`
fmt.Fprintf(w, successPage)
completionChan <- ""
}
srv := &http.Server{Addr: "localhost:" + strconv.Itoa(port)}
http.HandleFunc("/auth/callback", callbackHandler)
// Redirect user to login & consent page to ask for permission for the scopes specified above.
fmt.Printf("Opening browser for authentication\n")
var url string
grantType := oidcutil.InferGrantType(oauth2conf, oidcConf)
switch grantType {
case oidcutil.GrantTypeAuthorizationCode:
url = oauth2conf.AuthCodeURL(stateNonce, oauth2.AccessTypeOffline)
case oidcutil.GrantTypeImplicit:
url = oidcutil.ImplicitFlowURL(oauth2conf, stateNonce, oauth2.AccessTypeOffline)
default:
log.Fatalf("Unsupported grant type: %v", grantType)
// add transport for self-signed certificate to context
sslcli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
fmt.Printf("Performing %s flow login: %s\n", grantType, url)
ctx = context.WithValue(ctx, oauth2.HTTPClient, sslcli)
// Redirect user to login & consent page to ask for permission for the scopes specified above.
log.Info("Opening browser for authentication")
url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
log.Infof("Authentication URL: %s", url)
time.Sleep(1 * time.Second)
err = open.Run(url)
errors.CheckError(err)
@@ -261,16 +243,8 @@ func oauth2Login(ctx context.Context, port int, oauth2conf *oauth2.Config, provi
log.Fatalf("listen: %s\n", err)
}
}()
errMsg := <-completionChan
if errMsg != "" {
log.Fatal(errMsg)
}
fmt.Printf("Authentication successful\n")
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
<-loginCompleted
_ = srv.Shutdown(ctx)
log.Debugf("Token: %s", tokenString)
log.Debugf("Refresh Token: %s", refreshToken)
return tokenString, refreshToken
}

View File

@@ -1,29 +1,26 @@
package commands
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"time"
"github.com/dustin/go-humanize"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strings"
"context"
"fmt"
"text/tabwriter"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/project"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/git"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/apis/meta/v1"
)
type projectOpts struct {
@@ -32,12 +29,6 @@ type projectOpts struct {
sources []string
}
type policyOpts struct {
action string
permission string
object string
}
func (opts *projectOpts) GetDestinations() []v1alpha1.ApplicationDestination {
destinations := make([]v1alpha1.ApplicationDestination, 0)
for _, destStr := range opts.destinations {
@@ -64,40 +55,22 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
os.Exit(1)
},
}
command.AddCommand(NewProjectRoleCommand(clientOpts))
command.AddCommand(NewProjectCreateCommand(clientOpts))
command.AddCommand(NewProjectGetCommand(clientOpts))
command.AddCommand(NewProjectDeleteCommand(clientOpts))
command.AddCommand(NewProjectListCommand(clientOpts))
command.AddCommand(NewProjectSetCommand(clientOpts))
command.AddCommand(NewProjectEditCommand(clientOpts))
command.AddCommand(NewProjectAddDestinationCommand(clientOpts))
command.AddCommand(NewProjectRemoveDestinationCommand(clientOpts))
command.AddCommand(NewProjectAddSourceCommand(clientOpts))
command.AddCommand(NewProjectRemoveSourceCommand(clientOpts))
command.AddCommand(NewProjectAllowClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectAllowNamespaceResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyNamespaceResourceCommand(clientOpts))
return command
}
func addProjFlags(command *cobra.Command, opts *projectOpts) {
command.Flags().StringVarP(&opts.description, "description", "", "", "Project description")
command.Flags().StringVarP(&opts.description, "description", "", "desc", "Project description")
command.Flags().StringArrayVarP(&opts.destinations, "dest", "d", []string{},
"Permitted destination server and namespace (e.g. https://192.168.99.100:8443,default)")
command.Flags().StringArrayVarP(&opts.sources, "src", "s", []string{}, "Permitted git source repository URL")
}
func addPolicyFlags(command *cobra.Command, opts *policyOpts) {
command.Flags().StringVarP(&opts.action, "action", "a", "", "Action to grant/deny permission on (e.g. get, create, list, update, delete)")
command.Flags().StringVarP(&opts.permission, "permission", "p", "allow", "Whether to allow or deny access to object with the action. This can only be 'allow' or 'deny'")
command.Flags().StringVarP(&opts.object, "object", "o", "", "Object within the project to grant/deny access. Use '*' for a wildcard. Will want access to '<project>/<object>'")
}
func humanizeTimestamp(epoch int64) string {
ts := time.Unix(epoch, 0)
return fmt.Sprintf("%s (%s)", ts.Format(time.RFC3339), humanize.Time(ts))
"Allowed deployment destination. Includes comma separated server url and namespace (e.g. https://192.168.99.100:8443,default")
command.Flags().StringArrayVarP(&opts.sources, "src", "s", []string{}, "Allowed deployment source repository URL.")
}
// NewProjectCreateCommand returns a new instance of an `argocd proj create` command
@@ -269,13 +242,8 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
errors.CheckError(err)
for _, item := range proj.Spec.SourceRepos {
if item == "*" && item == url {
fmt.Printf("Source repository '*' already allowed in project\n")
return
}
if git.SameURL(item, url) {
fmt.Printf("Source repository '%s' already allowed in project\n", item)
return
if item == git.NormalizeGitURL(url) {
log.Fatal("Specified source repository is already defined in project")
}
}
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos, url)
@@ -286,104 +254,6 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
return command
}
func modifyProjectResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, action func(proj *v1alpha1.AppProject, group string, kind string) bool) *cobra.Command {
return &cobra.Command{
Use: cmdUse,
Short: cmdDesc,
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, group, kind := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
if action(proj, group, kind) {
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
}
// NewProjectAllowNamespaceResourceCommand returns a new instance of an `deny-cluster-resources` command
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-namespace-resource PROJECT GROUP KIND"
desc := "Removes a namespaced API resource from the blacklist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.NamespaceResourceBlacklist {
if item.Group == group && item.Kind == kind {
index = i
break
}
}
if index == -1 {
fmt.Printf("Group '%s' and kind '%s' not in blacklisted namespaced resources\n", group, kind)
return false
}
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist[:index], proj.Spec.NamespaceResourceBlacklist[index+1:]...)
return true
})
}
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-namespace-resource PROJECT GROUP KIND"
desc := "Adds a namespaced API resource to the blacklist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.NamespaceResourceBlacklist {
if item.Group == group && item.Kind == kind {
fmt.Printf("Group '%s' and kind '%s' already present in blacklisted namespaced resources\n", group, kind)
return false
}
}
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist, v1.GroupKind{Group: group, Kind: kind})
return true
})
}
// NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-cluster-resource PROJECT GROUP KIND"
desc := "Removes a cluster-scoped API resource from the whitelist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.ClusterResourceWhitelist {
if item.Group == group && item.Kind == kind {
index = i
break
}
}
if index == -1 {
fmt.Printf("Group '%s' and kind '%s' not in whitelisted cluster resources\n", group, kind)
return false
}
proj.Spec.ClusterResourceWhitelist = append(proj.Spec.ClusterResourceWhitelist[:index], proj.Spec.ClusterResourceWhitelist[index+1:]...)
return true
})
}
// NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-cluster-resource PROJECT GROUP KIND"
desc := "Adds a cluster-scoped API resource to the whitelist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.ClusterResourceWhitelist {
if item.Group == group && item.Kind == kind {
fmt.Printf("Group '%s' and kind '%s' already present in whitelisted cluster resources\n", group, kind)
return false
}
}
proj.Spec.ClusterResourceWhitelist = append(proj.Spec.ClusterResourceWhitelist, v1.GroupKind{Group: group, Kind: kind})
return true
})
}
// NewProjectRemoveSourceCommand returns a new instance of an `argocd proj remove-src` command
func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
@@ -404,13 +274,13 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr
index := -1
for i, item := range proj.Spec.SourceRepos {
if item == url {
if item == git.NormalizeGitURL(url) {
index = i
break
}
}
if index == -1 {
fmt.Printf("Source repository '%s' does not exist in project\n", url)
log.Fatal("Specified source repository does not exist in project")
} else {
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos[:index], proj.Spec.SourceRepos[index+1:]...)
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
@@ -454,155 +324,12 @@ func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
projects, err := projIf.List(context.Background(), &project.ProjectQuery{})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\n")
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\n")
for _, p := range projects.Items {
printProjectLine(w, &p)
fmt.Fprintf(w, "%s\t%s\t%v\n", p.Name, p.Spec.Description, p.Spec.Destinations)
}
_ = w.Flush()
},
}
return command
}
func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
var destinations, sourceRepos, clusterWhitelist, namespaceBlacklist string
switch len(p.Spec.Destinations) {
case 0:
destinations = "<none>"
case 1:
destinations = fmt.Sprintf("%s,%s", p.Spec.Destinations[0].Server, p.Spec.Destinations[0].Namespace)
default:
destinations = fmt.Sprintf("%d destinations", len(p.Spec.Destinations))
}
switch len(p.Spec.SourceRepos) {
case 0:
sourceRepos = "<none>"
case 1:
sourceRepos = p.Spec.SourceRepos[0]
default:
sourceRepos = fmt.Sprintf("%d repos", len(p.Spec.SourceRepos))
}
switch len(p.Spec.ClusterResourceWhitelist) {
case 0:
clusterWhitelist = "<none>"
case 1:
clusterWhitelist = fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[0].Group, p.Spec.ClusterResourceWhitelist[0].Kind)
default:
clusterWhitelist = fmt.Sprintf("%d resources", len(p.Spec.ClusterResourceWhitelist))
}
switch len(p.Spec.NamespaceResourceBlacklist) {
case 0:
namespaceBlacklist = "<none>"
default:
namespaceBlacklist = fmt.Sprintf("%d resources", len(p.Spec.NamespaceResourceBlacklist))
}
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist)
}
// NewProjectGetCommand returns a new instance of an `argocd proj get` command
func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
const printProjFmtStr = "%-34s%s\n"
var command = &cobra.Command{
Use: "get PROJECT",
Short: "Get project details",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
p, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
fmt.Printf(printProjFmtStr, "Name:", p.Name)
fmt.Printf(printProjFmtStr, "Description:", p.Spec.Description)
// Print destinations
dest0 := "<none>"
if len(p.Spec.Destinations) > 0 {
dest0 = fmt.Sprintf("%s,%s", p.Spec.Destinations[0].Server, p.Spec.Destinations[0].Namespace)
}
fmt.Printf(printProjFmtStr, "Destinations:", dest0)
for i := 1; i < len(p.Spec.Destinations); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s,%s", p.Spec.Destinations[i].Server, p.Spec.Destinations[i].Namespace))
}
// Print sources
src0 := "<none>"
if len(p.Spec.SourceRepos) > 0 {
src0 = p.Spec.SourceRepos[0]
}
fmt.Printf(printProjFmtStr, "Repositories:", src0)
for i := 1; i < len(p.Spec.SourceRepos); i++ {
fmt.Printf(printProjFmtStr, "", p.Spec.SourceRepos[i])
}
// Print whitelisted cluster resources
cwl0 := "<none>"
if len(p.Spec.ClusterResourceWhitelist) > 0 {
cwl0 = fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[0].Group, p.Spec.ClusterResourceWhitelist[0].Kind)
}
fmt.Printf(printProjFmtStr, "Whitelisted Cluster Resources:", cwl0)
for i := 1; i < len(p.Spec.ClusterResourceWhitelist); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[i].Group, p.Spec.ClusterResourceWhitelist[i].Kind))
}
// Print blacklisted namespaced resources
rbl0 := "<none>"
if len(p.Spec.NamespaceResourceBlacklist) > 0 {
rbl0 = fmt.Sprintf("%s/%s", p.Spec.NamespaceResourceBlacklist[0].Group, p.Spec.NamespaceResourceBlacklist[0].Kind)
}
fmt.Printf(printProjFmtStr, "Blacklisted Namespaced Resources:", rbl0)
for i := 1; i < len(p.Spec.NamespaceResourceBlacklist); i++ {
fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s/%s", p.Spec.NamespaceResourceBlacklist[i].Group, p.Spec.NamespaceResourceBlacklist[i].Kind))
}
},
}
return command
}
func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "edit PROJECT",
Short: "Edit project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
projData, err := json.Marshal(proj.Spec)
errors.CheckError(err)
projData, err = yaml.JSONToYAML(projData)
errors.CheckError(err)
cli.InteractiveEdit(fmt.Sprintf("%s-*-edit.yaml", projName), projData, func(input []byte) error {
input, err = yaml.YAMLToJSON(input)
if err != nil {
return err
}
updatedSpec := v1alpha1.AppProjectSpec{}
err = json.Unmarshal(input, &updatedSpec)
if err != nil {
return err
}
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
if err != nil {
return err
}
proj.Spec = updatedSpec
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
if err != nil {
return fmt.Errorf("Failed to update project:\n%v", err)
}
return err
})
},
}
return command
}

View File

@@ -1,377 +0,0 @@
package commands
import (
"context"
"fmt"
"os"
"strconv"
"text/tabwriter"
timeutil "github.com/argoproj/pkg/time"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/project"
"github.com/argoproj/argo-cd/util"
projectutil "github.com/argoproj/argo-cd/util/project"
)
const (
policyTemplate = "p, proj:%s:%s, applications, %s, %s/%s, %s"
)
// NewProjectRoleCommand returns a new instance of the `argocd proj role` command
func NewProjectRoleCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
roleCommand := &cobra.Command{
Use: "role",
Short: "Manage a project's roles",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
},
}
roleCommand.AddCommand(NewProjectRoleListCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleGetCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleCreateCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleDeleteCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleCreateTokenCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleDeleteTokenCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleAddPolicyCommand(clientOpts))
roleCommand.AddCommand(NewProjectRoleRemovePolicyCommand(clientOpts))
return roleCommand
}
// NewProjectRoleAddPolicyCommand returns a new instance of an `argocd proj role add-policy` command
func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts policyOpts
)
var command = &cobra.Command{
Use: "add-policy PROJECT ROLE-NAME",
Short: "Add a policy to a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
role, roleIndex, err := projectutil.GetRoleByName(proj, roleName)
errors.CheckError(err)
policy := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission)
proj.Spec.Roles[roleIndex].Policies = append(role.Policies, policy)
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
addPolicyFlags(command, &opts)
return command
}
// NewProjectRoleRemovePolicyCommand returns a new instance of an `argocd proj role remove-policy` command
func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
opts policyOpts
)
var command = &cobra.Command{
Use: "remove-policy PROJECT ROLE-NAME",
Short: "Remove a policy from a role within a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
role, roleIndex, err := projectutil.GetRoleByName(proj, roleName)
errors.CheckError(err)
policyToRemove := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission)
duplicateIndex := -1
for i, policy := range role.Policies {
if policy == policyToRemove {
duplicateIndex = i
break
}
}
if duplicateIndex < 0 {
return
}
role.Policies[duplicateIndex] = role.Policies[len(role.Policies)-1]
proj.Spec.Roles[roleIndex].Policies = role.Policies[:len(role.Policies)-1]
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
addPolicyFlags(command, &opts)
return command
}
// NewProjectRoleCreateCommand returns a new instance of an `argocd proj role create` command
func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
description string
)
var command = &cobra.Command{
Use: "create PROJECT ROLE-NAME",
Short: "Create a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
_, _, err = projectutil.GetRoleByName(proj, roleName)
if err == nil {
fmt.Printf("Role '%s' already exists\n", roleName)
return
}
proj.Spec.Roles = append(proj.Spec.Roles, v1alpha1.ProjectRole{Name: roleName, Description: description})
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Role '%s' created\n", roleName)
},
}
command.Flags().StringVarP(&description, "description", "", "", "Project description")
return command
}
// NewProjectRoleDeleteCommand returns a new instance of an `argocd proj role delete` command
func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ROLE-NAME",
Short: "Delete a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
_, index, err := projectutil.GetRoleByName(proj, roleName)
if err != nil {
fmt.Printf("Role '%s' does not exist in project\n", roleName)
return
}
proj.Spec.Roles[index] = proj.Spec.Roles[len(proj.Spec.Roles)-1]
proj.Spec.Roles = proj.Spec.Roles[:len(proj.Spec.Roles)-1]
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Role '%s' deleted\n", roleName)
},
}
return command
}
// NewProjectRoleCreateTokenCommand returns a new instance of an `argocd proj role create-token` command
func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
expiresIn string
)
var command = &cobra.Command{
Use: "create-token PROJECT ROLE-NAME",
Short: "Create a project token",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
duration, err := timeutil.ParseDuration(expiresIn)
errors.CheckError(err)
token, err := projIf.CreateToken(context.Background(), &project.ProjectTokenCreateRequest{Project: projName, Role: roleName, ExpiresIn: int64(duration.Seconds())})
errors.CheckError(err)
fmt.Println(token.Token)
},
}
command.Flags().StringVarP(&expiresIn, "expires-in", "e", "0s", "Duration before the token will expire. (Default: No expiration)")
return command
}
// NewProjectRoleDeleteTokenCommand returns a new instance of an `argocd proj role delete-token` command
func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
Short: "Delete a project token",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
issuedAt, err := strconv.ParseInt(args[2], 10, 64)
errors.CheckError(err)
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
_, err = projIf.DeleteToken(context.Background(), &project.ProjectTokenDeleteRequest{Project: projName, Role: roleName, Iat: issuedAt})
errors.CheckError(err)
},
}
return command
}
// NewProjectRoleListCommand returns a new instance of an `argocd proj roles list` command
func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List all the roles in a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
project, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ROLE-NAME\tDESCRIPTION\n")
for _, role := range project.Spec.Roles {
fmt.Fprintf(w, "%s\t%s\n", role.Name, role.Description)
}
_ = w.Flush()
},
}
return command
}
// NewProjectRoleGetCommand returns a new instance of an `argocd proj roles get` command
func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "get PROJECT ROLE-NAME",
Short: "Get the details of a specific role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
roleName := args[1]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
role, _, err := projectutil.GetRoleByName(proj, roleName)
errors.CheckError(err)
printRoleFmtStr := "%-15s%s\n"
fmt.Printf(printRoleFmtStr, "Role Name:", roleName)
fmt.Printf(printRoleFmtStr, "Description:", role.Description)
fmt.Printf("Policies:\n")
fmt.Printf("%s\n", proj.ProjectPoliciesString())
fmt.Printf("JWT Tokens:\n")
// TODO(jessesuen): print groups
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ID\tISSUED-AT\tEXPIRES-AT\n")
for _, token := range role.JWTTokens {
expiresAt := "<none>"
if token.ExpiresAt > 0 {
expiresAt = humanizeTimestamp(token.ExpiresAt)
}
fmt.Fprintf(w, "%d\t%s\t%s\n", token.IssuedAt, humanizeTimestamp(token.IssuedAt), expiresAt)
}
_ = w.Flush()
},
}
return command
}
// NewProjectRoleAddGroupCommand returns a new instance of an `argocd proj role add-group` command
func NewProjectRoleAddGroupCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add-group PROJECT ROLE-NAME GROUP-CLAIM",
Short: "Add a policy to a project role",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, roleName, groupName := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
updated, err := projectutil.AddGroupToRole(proj, roleName, groupName)
errors.CheckError(err)
if updated {
fmt.Printf("Group '%s' already present in role '%s'\n", groupName, roleName)
return
}
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Group '%s' added to role '%s'\n", groupName, roleName)
},
}
return command
}
// NewProjectRoleRemoveGroupCommand returns a new instance of an `argocd proj role remove-group` command
func NewProjectRoleRemoveGroupCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "remove-group PROJECT ROLE-NAME GROUP-CLAIM",
Short: "Remove a group claim from a role within a project",
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, roleName, groupName := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
updated, err := projectutil.RemoveGroupFromRole(proj, roleName, groupName)
errors.CheckError(err)
if !updated {
fmt.Printf("Group '%s' not present in role '%s'\n", groupName, roleName)
return
}
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
fmt.Printf("Group '%s' removed from role '%s'\n", groupName, roleName)
},
}
return command
}

View File

@@ -1,18 +1,15 @@
package commands
import (
"context"
"fmt"
"os"
oidc "github.com/coreos/go-oidc"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/settings"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/localconfig"
"github.com/argoproj/argo-cd/util/session"
)
@@ -21,7 +18,6 @@ import (
func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
password string
ssoPort int
)
var command = &cobra.Command{
Use: "relogin",
@@ -40,34 +36,28 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
configCtx, err := localCfg.ResolveContext(localCfg.CurrentContext)
errors.CheckError(err)
parser := &jwt.Parser{
SkipClaimsValidation: true,
}
claims := jwt.StandardClaims{}
_, _, err = parser.ParseUnverified(configCtx.User.AuthToken, &claims)
errors.CheckError(err)
var tokenString string
var refreshToken string
clientOpts := argocdclient.ClientOptions{
ConfigPath: "",
ServerAddr: configCtx.Server.Server,
Insecure: configCtx.Server.Insecure,
GRPCWeb: globalClientOpts.GRPCWeb,
PlainText: configCtx.Server.PlainText,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
claims, err := configCtx.User.Claims()
errors.CheckError(err)
if claims.Issuer == session.SessionManagerClaimsIssuer {
clientOpts := argocdclient.ClientOptions{
ConfigPath: "",
ServerAddr: configCtx.Server.Server,
Insecure: configCtx.Server.Insecure,
PlainText: configCtx.Server.PlainText,
}
acdClient := argocdclient.NewClientOrDie(&clientOpts)
fmt.Printf("Relogging in as '%s'\n", claims.Subject)
tokenString = passwordLogin(acdClient, claims.Subject, password)
} else {
fmt.Println("Reinitiating SSO login")
setConn, setIf := acdClient.NewSettingsClientOrDie()
defer util.Close(setConn)
ctx := context.Background()
httpClient, err := acdClient.HTTPClient()
errors.CheckError(err)
ctx = oidc.ClientContext(ctx, httpClient)
acdSet, err := setIf.Get(ctx, &settings.SettingsQuery{})
errors.CheckError(err)
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
errors.CheckError(err)
tokenString, refreshToken = oauth2Login(ctx, ssoPort, oauth2conf, provider)
tokenString, refreshToken = oauth2Login(configCtx.Server.Server, configCtx.Server.PlainText)
}
localCfg.UpsertUser(localconfig.User{
@@ -81,6 +71,5 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
},
}
command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application")
return command
}

View File

@@ -39,10 +39,9 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
// NewRepoAddCommand returns a new instance of an `argocd repo add` command
func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
repo appsv1.Repository
upsert bool
sshPrivateKeyPath string
insecureIgnoreHostKey bool
repo appsv1.Repository
upsert bool
sshPrivateKeyPath string
)
var command = &cobra.Command{
Use: "add REPO",
@@ -60,13 +59,12 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
repo.SSHPrivateKey = string(keyData)
}
repo.InsecureIgnoreHostKey = insecureIgnoreHostKey
// First test the repo *without* username/password. This gives us a hint on whether this
// is a private repo.
// NOTE: it is important not to run git commands to test git credentials on the user's
// system since it may mess with their git credential store (e.g. osx keychain).
// See issue #315
err := git.TestRepo(repo.Repo, "", "", repo.SSHPrivateKey, repo.InsecureIgnoreHostKey)
err := git.TestRepo(repo.Repo, "", "", repo.SSHPrivateKey)
if err != nil {
if git.IsSSHURL(repo.Repo) {
// If we failed using git SSH credentials, then the repo is automatically bad
@@ -89,8 +87,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
command.Flags().StringVar(&repo.Username, "username", "", "username to the repository")
command.Flags().StringVar(&repo.Password, "password", "", "password to the repository")
command.Flags().StringVar(&sshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().BoolVar(&insecureIgnoreHostKey, "insecure-ignore-host-key", false, "disables SSH strict host key checking")
command.Flags().StringVar(&sshPrivateKeyPath, "sshPrivateKeyPath", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
return command
}

View File

@@ -1,26 +1,13 @@
package commands
import (
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/config"
"github.com/argoproj/argo-cd/util/localconfig"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
)
func init() {
cobra.OnInitialize(initConfig)
}
var logLevel string
func initConfig() {
cli.SetLogLevel(logLevel)
}
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
@@ -30,7 +17,7 @@ func NewCommand() *cobra.Command {
var command = &cobra.Command{
Use: cliName,
Short: "argocd controls a Argo CD server",
Short: "argocd controls a ArgoCD server",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
@@ -48,13 +35,12 @@ func NewCommand() *cobra.Command {
defaultLocalConfigPath, err := localconfig.DefaultLocalConfigPath()
errors.CheckError(err)
command.PersistentFlags().StringVar(&clientOpts.ConfigPath, "config", config.GetFlag("config", defaultLocalConfigPath), "Path to Argo CD config")
command.PersistentFlags().StringVar(&clientOpts.ServerAddr, "server", config.GetFlag("server", ""), "Argo CD server address")
command.PersistentFlags().BoolVar(&clientOpts.PlainText, "plaintext", config.GetBoolFlag("plaintext"), "Disable TLS")
command.PersistentFlags().BoolVar(&clientOpts.Insecure, "insecure", config.GetBoolFlag("insecure"), "Skip server certificate and domain verification")
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", config.GetFlag("server-crt", ""), "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", ""), "Authentication token")
command.PersistentFlags().BoolVar(&clientOpts.GRPCWeb, "grpc-web", config.GetBoolFlag("grpc-web"), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.")
command.PersistentFlags().StringVar(&logLevel, "loglevel", config.GetFlag("loglevel", "info"), "Set the logging level. One of: debug|info|warn|error")
command.PersistentFlags().StringVar(&clientOpts.ConfigPath, "config", defaultLocalConfigPath, "Path to ArgoCD config")
command.PersistentFlags().StringVar(&clientOpts.ServerAddr, "server", "", "ArgoCD server address")
command.PersistentFlags().BoolVar(&clientOpts.PlainText, "plaintext", false, "Disable TLS")
command.PersistentFlags().BoolVar(&clientOpts.Insecure, "insecure", false, "Skip server certificate and domain verification")
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", "", "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", "", "Authentication token")
return command
}

View File

@@ -4,13 +4,12 @@ import (
"context"
"fmt"
"github.com/golang/protobuf/ptypes/empty"
"github.com/spf13/cobra"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/util"
"github.com/golang/protobuf/ptypes/empty"
"github.com/spf13/cobra"
)
// NewVersionCmd returns a new `version` command to be used as a sub-command to root

View File

@@ -1,116 +1,105 @@
package common
// Default service addresses and URLS of Argo CD internal services
const (
// DefaultRepoServerAddr is the gRPC address of the Argo CD repo server
DefaultRepoServerAddr = "argocd-repo-server:8081"
// DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against
DefaultDexServerAddr = "http://argocd-dex-server:5556"
// DefaultRedisAddr is the default redis address
DefaultRedisAddr = "argocd-redis:6379"
import (
rbacv1 "k8s.io/api/rbac/v1"
"github.com/argoproj/argo-cd/pkg/apis/application"
)
// Kubernetes ConfigMap and Secret resource names which hold Argo CD settings
const (
ArgoCDConfigMapName = "argocd-cm"
// MetadataPrefix is the prefix used for our labels and annotations
MetadataPrefix = "argocd.argoproj.io"
// SecretTypeRepository indicates a secret type of repository
SecretTypeRepository = "repository"
// SecretTypeCluster indicates a secret type of cluster
SecretTypeCluster = "cluster"
// AuthCookieName is the HTTP cookie name where we store our auth token
AuthCookieName = "argocd.token"
// ResourcesFinalizerName is a number of application CRD finalizer
ResourcesFinalizerName = "resources-finalizer." + MetadataPrefix
// KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster
KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc"
)
const (
ArgoCDAdminUsername = "admin"
ArgoCDSecretName = "argocd-secret"
ArgoCDConfigMapName = "argocd-cm"
ArgoCDRBACConfigMapName = "argocd-rbac-cm"
)
const (
PortAPIServer = 8080
PortRepoServer = 8081
PortArgoCDMetrics = 8082
PortArgoCDAPIServerMetrics = 8083
PortRepoServerMetrics = 8084
)
// Argo CD application related constants
const (
// KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster
KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc"
// DefaultAppProjectName contains name of 'default' app project, which is available in every Argo CD installation
DefaultAppProjectName = "default"
// ArgoCDAdminUsername is the username of the 'admin' user
ArgoCDAdminUsername = "admin"
// ArgoCDUserAgentName is the default user-agent name used by the gRPC API client library and grpc-gateway
ArgoCDUserAgentName = "argocd-client"
// AuthCookieName is the HTTP cookie name where we store our auth token
AuthCookieName = "argocd.token"
// RevisionHistoryLimit is the max number of successful sync to keep in history
RevisionHistoryLimit = 10
// K8sClientConfigQPS controls the QPS to be used in K8s REST client configs
K8sClientConfigQPS = 25
// K8sClientConfigBurst controls the burst to be used in K8s REST client configs
K8sClientConfigBurst = 50
)
// Dex related constants
const (
// DexAPIEndpoint is the endpoint where we serve the Dex API server
DexAPIEndpoint = "/api/dex"
// LoginEndpoint is Argo CD's shorthand login endpoint which redirects to dex's OAuth 2.0 provider's consent page
// LoginEndpoint is ArgoCD's shorthand login endpoint which redirects to dex's OAuth 2.0 provider's consent page
LoginEndpoint = "/auth/login"
// CallbackEndpoint is Argo CD's final callback endpoint we reach after OAuth 2.0 login flow has been completed
// CallbackEndpoint is ArgoCD's final callback endpoint we reach after OAuth 2.0 login flow has been completed
CallbackEndpoint = "/auth/callback"
// ArgoCDClientAppName is name of the Oauth client app used when registering our web app to dex
ArgoCDClientAppName = "Argo CD"
ArgoCDClientAppName = "ArgoCD"
// ArgoCDClientAppID is the Oauth client ID we will use when registering our app to dex
ArgoCDClientAppID = "argo-cd"
// ArgoCDCLIClientAppName is name of the Oauth client app used when registering our CLI to dex
ArgoCDCLIClientAppName = "Argo CD CLI"
ArgoCDCLIClientAppName = "ArgoCD CLI"
// ArgoCDCLIClientAppID is the Oauth client ID we will use when registering our CLI to dex
ArgoCDCLIClientAppID = "argo-cd-cli"
)
// Resource metadata labels and annotations (keys and values) used by Argo CD components
const (
// LabelKeyAppInstance is the label key to use to uniquely identify the instance of an application
// The Argo CD application name is used as the instance name
LabelKeyAppInstance = "app.kubernetes.io/instance"
// LegacyLabelApplicationName is the legacy label (v0.10 and below) and is superceded by 'app.kubernetes.io/instance'
LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name"
// LabelKeySecretType contains the type of argocd secret (currently: 'cluster')
LabelKeySecretType = "argocd.argoproj.io/secret-type"
// LabelValueSecretTypeCluster indicates a secret type of cluster
LabelValueSecretTypeCluster = "cluster"
// AnnotationKeyHook contains the hook type of a resource
AnnotationKeyHook = "argocd.argoproj.io/hook"
// AnnotationKeyHookDeletePolicy is the policy of deleting a hook
AnnotationKeyHookDeletePolicy = "argocd.argoproj.io/hook-delete-policy"
// AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed.
// Might take values 'normal'/'hard'. Value 'hard' means manifest cache and target cluster state cache should be invalidated before refresh.
AnnotationKeyRefresh = "argocd.argoproj.io/refresh"
// AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application.
AnnotationKeyManagedBy = "managed-by"
// AnnotationValueManagedByArgoCD is a 'managed-by' annotation value for resources managed by Argo CD
AnnotationValueManagedByArgoCD = "argocd.argoproj.io"
// AnnotationKeyHelmHook is the helm hook annotation
AnnotationKeyHelmHook = "helm.sh/hook"
// AnnotationValueHelmHookCRDInstall is a value of crd helm hook
AnnotationValueHelmHookCRDInstall = "crd-install"
// ResourcesFinalizerName the finalizer value which we inject to finalize deletion of an application
ResourcesFinalizerName = "resources-finalizer.argocd.argoproj.io"
)
// Environment variables for tuning and debugging Argo CD
const (
// EnvVarSSODebug is an environment variable to enable additional OAuth debugging in the API server
EnvVarSSODebug = "ARGOCD_SSO_DEBUG"
// EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server
EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG"
// EnvVarFakeInClusterConfig is an environment variable to fake an in-cluster RESTConfig using
// the current kubectl context (for development purposes)
EnvVarFakeInClusterConfig = "ARGOCD_FAKE_IN_CLUSTER"
// DefaultAppProjectName contains name of default app project. The default app project allows deploying application to any cluster.
DefaultAppProjectName = "default"
)
const (
// MinClientVersion is the minimum client version that can interface with this API server.
// When introducing breaking changes to the API or datastructures, this number should be bumped.
// The value here may be lower than the current value in VERSION
MinClientVersion = "0.12.0"
// CacheVersion is a objects version cached using util/cache/cache.go.
// Number should be bumped in case of backward incompatible change to make sure cache is invalidated after upgrade.
CacheVersion = "0.12.0"
var (
// LabelKeyAppInstance refers to the application instance resource name
LabelKeyAppInstance = MetadataPrefix + "/app-instance"
// LabelKeySecretType contains the type of argocd secret (either 'cluster' or 'repo')
LabelKeySecretType = MetadataPrefix + "/secret-type"
// AnnotationConnectionStatus contains connection state status
AnnotationConnectionStatus = MetadataPrefix + "/connection-status"
// AnnotationConnectionMessage contains additional information about connection status
AnnotationConnectionMessage = MetadataPrefix + "/connection-message"
// AnnotationConnectionModifiedAt contains timestamp when connection state had been modified
AnnotationConnectionModifiedAt = MetadataPrefix + "/connection-modified-at"
// AnnotationHook contains the hook type of a resource
AnnotationHook = MetadataPrefix + "/hook"
// AnnotationHookDeletePolicy is the policy of deleting a hook
AnnotationHookDeletePolicy = MetadataPrefix + "/hook-delete-policy"
// AnnotationHelmHook is the helm hook annotation
AnnotationHelmHook = "helm.sh/hook"
// LabelKeyApplicationControllerInstanceID is the label which allows to separate application among multiple running application controllers.
LabelKeyApplicationControllerInstanceID = application.ApplicationFullName + "/controller-instanceid"
// LabelApplicationName is the label which indicates that resource belongs to application with the specified name
LabelApplicationName = application.ApplicationFullName + "/app-name"
// AnnotationKeyRefresh is the annotation key in the application which is updated with an
// arbitrary value (i.e. timestamp) on a git event, to force the controller to wake up and
// re-evaluate the application
AnnotationKeyRefresh = application.ApplicationFullName + "/refresh"
)
// ArgoCDManagerServiceAccount is the name of the service account for managing a cluster
const (
ArgoCDManagerServiceAccount = "argocd-manager"
ArgoCDManagerClusterRole = "argocd-manager-role"
ArgoCDManagerClusterRoleBinding = "argocd-manager-role-binding"
)
// ArgoCDManagerPolicyRules are the policies to give argocd-manager
var ArgoCDManagerPolicyRules = []rbacv1.PolicyRule{
{
APIGroups: []string{"*"},
Resources: []string{"*"},
Verbs: []string{"*"},
},
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"time"
"github.com/argoproj/argo-cd/errors"
log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
@@ -11,34 +12,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// ArgoCDManagerServiceAccount is the name of the service account for managing a cluster
const (
ArgoCDManagerServiceAccount = "argocd-manager"
ArgoCDManagerClusterRole = "argocd-manager-role"
ArgoCDManagerClusterRoleBinding = "argocd-manager-role-binding"
)
// ArgoCDManagerPolicyRules are the policies to give argocd-manager
var ArgoCDManagerPolicyRules = []rbacv1.PolicyRule{
{
APIGroups: []string{"*"},
Resources: []string{"*"},
Verbs: []string{"*"},
},
{
NonResourceURLs: []string{"*"},
Verbs: []string{"*"},
},
}
// CreateServiceAccount creates a service account
func CreateServiceAccount(
clientset kubernetes.Interface,
serviceAccountName string,
namespace string,
) error {
) {
serviceAccount := apiv1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
@@ -52,13 +34,12 @@ func CreateServiceAccount(
_, err := clientset.CoreV1().ServiceAccounts(namespace).Create(&serviceAccount)
if err != nil {
if !apierr.IsAlreadyExists(err) {
return fmt.Errorf("Failed to create service account %q: %v", serviceAccountName, err)
log.Fatalf("Failed to create service account '%s': %v\n", serviceAccountName, err)
}
log.Infof("ServiceAccount %q already exists", serviceAccountName)
return nil
fmt.Printf("ServiceAccount '%s' already exists\n", serviceAccountName)
return
}
log.Infof("ServiceAccount %q created", serviceAccountName)
return nil
fmt.Printf("ServiceAccount '%s' created\n", serviceAccountName)
}
// CreateClusterRole creates a cluster role
@@ -66,7 +47,7 @@ func CreateClusterRole(
clientset kubernetes.Interface,
clusterRoleName string,
rules []rbacv1.PolicyRule,
) error {
) {
clusterRole := rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
@@ -81,17 +62,16 @@ func CreateClusterRole(
_, err := crclient.Create(&clusterRole)
if err != nil {
if !apierr.IsAlreadyExists(err) {
return fmt.Errorf("Failed to create ClusterRole %q: %v", clusterRoleName, err)
log.Fatalf("Failed to create ClusterRole '%s': %v\n", clusterRoleName, err)
}
_, err = crclient.Update(&clusterRole)
if err != nil {
return fmt.Errorf("Failed to update ClusterRole %q: %v", clusterRoleName, err)
log.Fatalf("Failed to update ClusterRole '%s': %v\n", clusterRoleName, err)
}
log.Infof("ClusterRole %q updated", clusterRoleName)
fmt.Printf("ClusterRole '%s' updated\n", clusterRoleName)
} else {
log.Infof("ClusterRole %q created", clusterRoleName)
fmt.Printf("ClusterRole '%s' created\n", clusterRoleName)
}
return nil
}
// CreateClusterRoleBinding create a ClusterRoleBinding
@@ -101,7 +81,7 @@ func CreateClusterRoleBinding(
serviceAccountName,
clusterRoleName string,
namespace string,
) error {
) {
roleBinding := rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
@@ -126,34 +106,22 @@ func CreateClusterRoleBinding(
_, err := clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding)
if err != nil {
if !apierr.IsAlreadyExists(err) {
return fmt.Errorf("Failed to create ClusterRoleBinding %s: %v", clusterBindingRoleName, err)
log.Fatalf("Failed to create ClusterRoleBinding %s: %v\n", clusterBindingRoleName, err)
}
log.Infof("ClusterRoleBinding %q already exists", clusterBindingRoleName)
return nil
fmt.Printf("ClusterRoleBinding '%s' already exists\n", clusterBindingRoleName)
return
}
log.Infof("ClusterRoleBinding %q created, bound %q to %q", clusterBindingRoleName, serviceAccountName, clusterRoleName)
return nil
fmt.Printf("ClusterRoleBinding '%s' created, bound '%s' to '%s'\n", clusterBindingRoleName, serviceAccountName, clusterRoleName)
}
// InstallClusterManagerRBAC installs RBAC resources for a cluster manager to operate a cluster. Returns a token
func InstallClusterManagerRBAC(clientset kubernetes.Interface) (string, error) {
func InstallClusterManagerRBAC(conf *rest.Config) string {
const ns = "kube-system"
var err error
err = CreateServiceAccount(clientset, ArgoCDManagerServiceAccount, ns)
if err != nil {
return "", err
}
err = CreateClusterRole(clientset, ArgoCDManagerClusterRole, ArgoCDManagerPolicyRules)
if err != nil {
return "", err
}
err = CreateClusterRoleBinding(clientset, ArgoCDManagerClusterRoleBinding, ArgoCDManagerServiceAccount, ArgoCDManagerClusterRole, ns)
if err != nil {
return "", err
}
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
CreateServiceAccount(clientset, ArgoCDManagerServiceAccount, ns)
CreateClusterRole(clientset, ArgoCDManagerClusterRole, ArgoCDManagerPolicyRules)
CreateClusterRoleBinding(clientset, ArgoCDManagerClusterRoleBinding, ArgoCDManagerServiceAccount, ArgoCDManagerClusterRole, ns)
var serviceAccount *apiv1.ServiceAccount
var secretName string
@@ -169,51 +137,52 @@ func InstallClusterManagerRBAC(clientset kubernetes.Interface) (string, error) {
return true, nil
})
if err != nil {
return "", fmt.Errorf("Failed to wait for service account secret: %v", err)
log.Fatalf("Failed to wait for service account secret: %v", err)
}
secret, err := clientset.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("Failed to retrieve secret %q: %v", secretName, err)
log.Fatalf("Failed to retrieve secret '%s': %v", secretName, err)
}
token, ok := secret.Data["token"]
if !ok {
return "", fmt.Errorf("Secret %q for service account %q did not have a token", secretName, serviceAccount)
log.Fatalf("Secret '%s' for service account '%s' did not have a token", secretName, serviceAccount)
}
return string(token), nil
return string(token)
}
// UninstallClusterManagerRBAC removes RBAC resources for a cluster manager to operate a cluster
func UninstallClusterManagerRBAC(clientset kubernetes.Interface) error {
return UninstallRBAC(clientset, "kube-system", ArgoCDManagerClusterRoleBinding, ArgoCDManagerClusterRole, ArgoCDManagerServiceAccount)
func UninstallClusterManagerRBAC(conf *rest.Config) {
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
UninstallRBAC(clientset, "kube-system", ArgoCDManagerClusterRoleBinding, ArgoCDManagerClusterRole, ArgoCDManagerServiceAccount)
}
// UninstallRBAC uninstalls RBAC related resources for a binding, role, and service account
func UninstallRBAC(clientset kubernetes.Interface, namespace, bindingName, roleName, serviceAccount string) error {
func UninstallRBAC(clientset kubernetes.Interface, namespace, bindingName, roleName, serviceAccount string) {
if err := clientset.RbacV1().ClusterRoleBindings().Delete(bindingName, &metav1.DeleteOptions{}); err != nil {
if !apierr.IsNotFound(err) {
return fmt.Errorf("Failed to delete ClusterRoleBinding: %v", err)
log.Fatalf("Failed to delete ClusterRoleBinding: %v\n", err)
}
log.Infof("ClusterRoleBinding %q not found", bindingName)
fmt.Printf("ClusterRoleBinding '%s' not found\n", bindingName)
} else {
log.Infof("ClusterRoleBinding %q deleted", bindingName)
fmt.Printf("ClusterRoleBinding '%s' deleted\n", bindingName)
}
if err := clientset.RbacV1().ClusterRoles().Delete(roleName, &metav1.DeleteOptions{}); err != nil {
if !apierr.IsNotFound(err) {
return fmt.Errorf("Failed to delete ClusterRole: %v", err)
log.Fatalf("Failed to delete ClusterRole: %v\n", err)
}
log.Infof("ClusterRole %q not found", roleName)
fmt.Printf("ClusterRole '%s' not found\n", roleName)
} else {
log.Infof("ClusterRole %q deleted", roleName)
fmt.Printf("ClusterRole '%s' deleted\n", roleName)
}
if err := clientset.CoreV1().ServiceAccounts(namespace).Delete(serviceAccount, &metav1.DeleteOptions{}); err != nil {
if !apierr.IsNotFound(err) {
return fmt.Errorf("Failed to delete ServiceAccount: %v", err)
log.Fatalf("Failed to delete ServiceAccount: %v\n", err)
}
log.Infof("ServiceAccount %q in namespace %q not found", serviceAccount, namespace)
fmt.Printf("ServiceAccount '%s' in namespace '%s' not found\n", serviceAccount, namespace)
} else {
log.Infof("ServiceAccount %q deleted", serviceAccount)
fmt.Printf("ServiceAccount '%s' deleted\n", serviceAccount)
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,444 +0,0 @@
package controller
import (
"context"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
kubetesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
mockstatecache "github.com/argoproj/argo-cd/controller/cache/mocks"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
mockreposerver "github.com/argoproj/argo-cd/reposerver/mocks"
"github.com/argoproj/argo-cd/reposerver/repository"
mockrepoclient "github.com/argoproj/argo-cd/reposerver/repository/mocks"
"github.com/argoproj/argo-cd/test"
utilcache "github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
type fakeData struct {
apps []runtime.Object
manifestResponse *repository.ManifestResponse
managedLiveObjs map[kube.ResourceKey]*unstructured.Unstructured
}
func newFakeController(data *fakeData) *ApplicationController {
var clust corev1.Secret
err := yaml.Unmarshal([]byte(fakeCluster), &clust)
if err != nil {
panic(err)
}
// Mock out call to GenerateManifest
mockRepoClient := mockrepoclient.RepoServerServiceClient{}
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil)
mockRepoClientset := mockreposerver.Clientset{}
mockRepoClientset.On("NewRepoServerClient").Return(&fakeCloser{}, &mockRepoClient, nil)
secret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-secret",
Namespace: test.FakeArgoCDNamespace,
},
Data: map[string][]byte{
"admin.password": []byte("test"),
"server.secretkey": []byte("test"),
},
}
cm := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-cm",
Namespace: test.FakeArgoCDNamespace,
},
Data: nil,
}
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
ctrl, err := NewApplicationController(
test.FakeArgoCDNamespace,
settingsMgr,
kubeClient,
appclientset.NewSimpleClientset(data.apps...),
&mockRepoClientset,
utilcache.NewCache(utilcache.NewInMemoryCache(1*time.Hour)),
time.Minute,
)
if err != nil {
panic(err)
}
cancelProj := test.StartInformer(ctrl.projInformer)
defer cancelProj()
cancelApp := test.StartInformer(ctrl.appInformer)
defer cancelApp()
// Mock out call to GetManagedLiveObjs if fake data supplied
if data.managedLiveObjs != nil {
mockStateCache := mockstatecache.LiveStateCache{}
mockStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(data.managedLiveObjs, nil)
mockStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil)
ctrl.stateCache = &mockStateCache
ctrl.appStateManager.(*appStateManager).liveStateCache = &mockStateCache
}
return ctrl
}
type fakeCloser struct{}
func (f *fakeCloser) Close() error { return nil }
var fakeCluster = `
apiVersion: v1
data:
# {"bearerToken":"fake","tlsClientConfig":{"insecure":true},"awsAuthConfig":null}
config: eyJiZWFyZXJUb2tlbiI6ImZha2UiLCJ0bHNDbGllbnRDb25maWciOnsiaW5zZWN1cmUiOnRydWV9LCJhd3NBdXRoQ29uZmlnIjpudWxsfQ==
# minikube
name: aHR0cHM6Ly9sb2NhbGhvc3Q6NjQ0Mw==
# https://localhost:6443
server: aHR0cHM6Ly9sb2NhbGhvc3Q6NjQ0Mw==
kind: Secret
metadata:
labels:
argocd.argoproj.io/secret-type: cluster
name: some-secret
namespace: ` + test.FakeArgoCDNamespace + `
type: Opaque
`
var fakeApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app
namespace: ` + test.FakeArgoCDNamespace + `
spec:
destination:
namespace: ` + test.FakeDestNamespace + `
server: https://localhost:6443
project: default
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
syncPolicy:
automated: {}
status:
operationState:
finishedAt: 2018-09-21T23:50:29Z
message: successfully synced
operation:
sync:
revision: HEAD
phase: Succeeded
startedAt: 2018-09-21T23:50:25Z
syncResult:
resources:
- kind: RoleBinding
message: |-
rolebinding.rbac.authorization.k8s.io/always-outofsync reconciled
rolebinding.rbac.authorization.k8s.io/always-outofsync configured
name: always-outofsync
namespace: default
status: Synced
revision: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
`
func newFakeApp() *argoappv1.Application {
var app argoappv1.Application
err := yaml.Unmarshal([]byte(fakeApp), &app)
if err != nil {
panic(err)
}
return &app
}
func TestAutoSync(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.NotNil(t, app.Operation)
assert.NotNil(t, app.Operation.Sync)
assert.False(t, app.Operation.Sync.Prune)
}
func TestSkipAutoSync(t *testing.T) {
// Verify we skip when we previously synced to it in our most recent history
// Set current to 'aaaaa', desired to 'aaaa' and mark system OutOfSync
{
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
cond := ctrl.autoSync(app, &syncStatus)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when we are already Synced (even if revision is different)
{
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeSynced,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when auto-sync is disabled
{
app := newFakeApp()
app.Spec.SyncPolicy = nil
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when application is marked for deletion
{
app := newFakeApp()
now := metav1.Now()
app.DeletionTimestamp = &now
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// Verify we skip when previous sync attempt failed and return error condition
// Set current to 'aaaaa', desired to 'bbbbb' and add 'bbbbb' to failure history
{
app := newFakeApp()
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
Source: *app.Spec.Source.DeepCopy(),
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &syncStatus)
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
}
// TestAutoSyncIndicateError verifies we skip auto-sync and return error condition if previous sync failed
func TestAutoSyncIndicateError(t *testing.T) {
app := newFakeApp()
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
Parameters: []argoappv1.HelmParameter{
{
Name: "a",
Value: "1",
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{
Source: app.Spec.Source.DeepCopy(),
},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
Source: *app.Spec.Source.DeepCopy(),
},
}
cond := ctrl.autoSync(app, &syncStatus)
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// TestAutoSyncParameterOverrides verifies we auto-sync if revision is same but parameter overrides are different
func TestAutoSyncParameterOverrides(t *testing.T) {
app := newFakeApp()
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
Parameters: []argoappv1.HelmParameter{
{
Name: "a",
Value: "1",
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{
Source: &argoappv1.ApplicationSource{
Helm: &argoappv1.ApplicationSourceHelm{
Parameters: []argoappv1.HelmParameter{
{
Name: "a",
Value: "2", // this value changed
},
},
},
},
},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
}
cond := ctrl.autoSync(app, &syncStatus)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.NotNil(t, app.Operation)
}
// TestFinalizeAppDeletion verifies application deletion
func TestFinalizeAppDeletion(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
patched := false
fakeAppCs.ReactionChain = nil
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, nil, nil
})
err := ctrl.finalizeApplicationDeletion(app)
// TODO: use an interface to fake out the calls to GetResourcesWithLabel and DeleteResourceWithLabel
// For now just ensure we have an expected error condition
assert.Error(t, err) // Change this to assert.Nil when we stub out GetResourcesWithLabel/DeleteResourceWithLabel
assert.False(t, patched) // Change this to assert.True when we stub out GetResourcesWithLabel/DeleteResourceWithLabel
}
// TestNormalizeApplication verifies we normalize an application during reconciliation
func TestNormalizeApplication(t *testing.T) {
defaultProj := argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: test.FakeArgoCDNamespace,
},
Spec: argoappv1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []argoappv1.ApplicationDestination{
{
Server: "*",
Namespace: "*",
},
},
},
}
app := newFakeApp()
app.Spec.Project = ""
app.Spec.Source.Kustomize = &argoappv1.ApplicationSourceKustomize{NamePrefix: "foo-"}
data := fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &repository.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
{
// Verify we normalize the app because project is missing
ctrl := newFakeController(&data)
key, _ := cache.MetaNamespaceKeyFunc(app)
ctrl.appRefreshQueue.Add(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"}}` {
normalized = true
}
}
return true, nil, nil
})
ctrl.processAppRefreshQueueItem()
assert.True(t, normalized)
}
{
// Verify we don't unnecessarily normalize app when project is set
app.Spec.Project = "default"
data.apps[0] = app
ctrl := newFakeController(&data)
key, _ := cache.MetaNamespaceKeyFunc(app)
ctrl.appRefreshQueue.Add(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"}}` {
normalized = true
}
}
return true, nil, nil
})
ctrl.processAppRefreshQueueItem()
assert.False(t, normalized)
}
}

View File

@@ -1,195 +0,0 @@
package cache
import (
"context"
"sync"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
type LiveStateCache interface {
IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error)
// Returns child nodes for a given k8s resource
GetChildren(server string, obj *unstructured.Unstructured) ([]appv1.ResourceNode, error)
// Returns state of live nodes which correspond for target nodes of specified application.
GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
// Starts watching resources of each controlled cluster.
Run(ctx context.Context)
// Deletes specified resource from cluster.
Delete(server string, obj *unstructured.Unstructured) error
// Invalidate invalidates the entire cluster state cache
Invalidate()
}
func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isNamespaced bool) kube.ResourceKey {
key := kube.GetResourceKey(un)
if !isNamespaced {
key.Namespace = ""
} else if isNamespaced && key.Namespace == "" {
key.Namespace = a.Spec.Destination.Namespace
}
return key
}
func NewLiveStateCache(db db.ArgoDB, appInformer cache.SharedIndexInformer, settings *settings.ArgoCDSettings, kubectl kube.Kubectl, onAppUpdated func(appName string, fullRefresh bool)) LiveStateCache {
return &liveStateCache{
appInformer: appInformer,
db: db,
clusters: make(map[string]*clusterInfo),
lock: &sync.Mutex{},
onAppUpdated: onAppUpdated,
kubectl: kubectl,
settings: settings,
}
}
type liveStateCache struct {
db db.ArgoDB
clusters map[string]*clusterInfo
lock *sync.Mutex
appInformer cache.SharedIndexInformer
onAppUpdated func(appName string, fullRefresh bool)
kubectl kube.Kubectl
settings *settings.ArgoCDSettings
}
func (c *liveStateCache) processEvent(event watch.EventType, obj *unstructured.Unstructured, url string) error {
info, err := c.getSyncedCluster(url)
if err != nil {
return err
}
return info.processEvent(event, obj)
}
func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
c.lock.Lock()
defer c.lock.Unlock()
info, ok := c.clusters[server]
if !ok {
cluster, err := c.db.GetCluster(context.Background(), server)
if err != nil {
return nil, err
}
info = &clusterInfo{
apisMeta: make(map[schema.GroupKind]*apiMeta),
lock: &sync.Mutex{},
nodes: make(map[kube.ResourceKey]*node),
nsIndex: make(map[string]map[kube.ResourceKey]*node),
onAppUpdated: c.onAppUpdated,
kubectl: c.kubectl,
cluster: cluster,
syncTime: nil,
syncLock: &sync.Mutex{},
log: log.WithField("server", cluster.Server),
settings: c.settings,
}
c.clusters[cluster.Server] = info
}
return info, nil
}
func (c *liveStateCache) getSyncedCluster(server string) (*clusterInfo, error) {
info, err := c.getCluster(server)
if err != nil {
return nil, err
}
err = info.ensureSynced()
if err != nil {
return nil, err
}
return info, nil
}
func (c *liveStateCache) Invalidate() {
log.Info("invalidating live state cache")
c.lock.Lock()
defer c.lock.Unlock()
for _, clust := range c.clusters {
clust.lock.Lock()
clust.invalidate()
clust.lock.Unlock()
}
log.Info("live state cache invalidated")
}
func (c *liveStateCache) Delete(server string, obj *unstructured.Unstructured) error {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return err
}
return clusterInfo.delete(obj)
}
func (c *liveStateCache) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return false, err
}
return clusterInfo.isNamespaced(obj), nil
}
func (c *liveStateCache) GetChildren(server string, obj *unstructured.Unstructured) ([]appv1.ResourceNode, error) {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return nil, err
}
return clusterInfo.getChildren(obj), nil
}
func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server)
if err != nil {
return nil, err
}
return clusterInfo.getManagedLiveObjs(a, targetObjs)
}
func isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool {
for _, obj := range apps {
if app, ok := obj.(*appv1.Application); ok && app.Spec.Destination.Server == cluster.Server {
return true
}
}
return false
}
// Run watches for resource changes annotated with application label on all registered clusters and schedule corresponding app refresh.
func (c *liveStateCache) Run(ctx context.Context) {
util.RetryUntilSucceed(func() error {
clusterEventCallback := func(event *db.ClusterEvent) {
c.lock.Lock()
defer c.lock.Unlock()
if cluster, ok := c.clusters[event.Cluster.Server]; ok {
if event.Type == watch.Deleted {
cluster.invalidate()
delete(c.clusters, event.Cluster.Server)
} else if event.Type == watch.Modified {
cluster.cluster = event.Cluster
cluster.invalidate()
}
} else if event.Type == watch.Added && isClusterHasApps(c.appInformer.GetStore().List(), event.Cluster) {
go func() {
// warm up cache for cluster with apps
_, _ = c.getSyncedCluster(event.Cluster.Server)
}()
}
}
return c.db.WatchClusters(ctx, clusterEventCallback)
}, "watch clusters", ctx, clusterRetryTimeout)
<-ctx.Done()
}

View File

@@ -1,474 +0,0 @@
package cache
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
const (
clusterSyncTimeout = 24 * time.Hour
clusterRetryTimeout = 10 * time.Second
watchResourcesRetryTimeout = 1 * time.Second
)
type apiMeta struct {
namespaced bool
resourceVersion string
watchCancel context.CancelFunc
}
type clusterInfo struct {
syncLock *sync.Mutex
syncTime *time.Time
syncError error
apisMeta map[schema.GroupKind]*apiMeta
lock *sync.Mutex
nodes map[kube.ResourceKey]*node
nsIndex map[string]map[kube.ResourceKey]*node
onAppUpdated func(appName string, fullRefresh bool)
kubectl kube.Kubectl
cluster *appv1.Cluster
log *log.Entry
settings *settings.ArgoCDSettings
}
func (c *clusterInfo) replaceResourceCache(gk schema.GroupKind, resourceVersion string, objs []unstructured.Unstructured) {
c.lock.Lock()
defer c.lock.Unlock()
info, ok := c.apisMeta[gk]
if ok {
objByKind := make(map[kube.ResourceKey]*unstructured.Unstructured)
for i := range objs {
objByKind[kube.GetResourceKey(&objs[i])] = &objs[i]
}
for i := range objs {
obj := &objs[i]
key := kube.GetResourceKey(&objs[i])
existingNode, exists := c.nodes[key]
c.onNodeUpdated(exists, existingNode, obj, key)
}
for key, existingNode := range c.nodes {
if key.Kind != gk.Kind || key.Group != gk.Group {
continue
}
if _, ok := objByKind[key]; !ok {
c.onNodeRemoved(key, existingNode)
}
}
info.resourceVersion = resourceVersion
}
}
func createObjInfo(un *unstructured.Unstructured, appInstanceLabel string) *node {
ownerRefs := un.GetOwnerReferences()
// Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed
if un.GroupVersionKind().Group == "" && un.GetKind() == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0 {
ownerRefs = append(ownerRefs, metav1.OwnerReference{
Name: un.GetName(),
Kind: kube.ServiceKind,
APIVersion: "",
})
}
info := &node{
resourceVersion: un.GetResourceVersion(),
ref: v1.ObjectReference{
APIVersion: un.GetAPIVersion(),
Kind: un.GetKind(),
Name: un.GetName(),
Namespace: un.GetNamespace(),
},
ownerRefs: ownerRefs,
info: getNodeInfo(un),
}
appName := kube.GetAppInstanceLabel(un, appInstanceLabel)
if len(ownerRefs) == 0 && appName != "" {
info.appName = appName
info.resource = un
}
return info
}
func (c *clusterInfo) setNode(n *node) {
key := n.resourceKey()
c.nodes[key] = n
ns, ok := c.nsIndex[key.Namespace]
if !ok {
ns = make(map[kube.ResourceKey]*node)
c.nsIndex[key.Namespace] = ns
}
ns[key] = n
}
func (c *clusterInfo) removeNode(key kube.ResourceKey) {
delete(c.nodes, key)
if ns, ok := c.nsIndex[key.Namespace]; ok {
delete(ns, key)
if len(ns) == 0 {
delete(c.nsIndex, key.Namespace)
}
}
}
func (c *clusterInfo) invalidate() {
c.syncLock.Lock()
defer c.syncLock.Unlock()
c.syncTime = nil
for i := range c.apisMeta {
c.apisMeta[i].watchCancel()
}
c.apisMeta = nil
}
func (c *clusterInfo) synced() bool {
if c.syncTime == nil {
return false
}
if c.syncError != nil {
return time.Now().Before(c.syncTime.Add(clusterRetryTimeout))
}
return time.Now().Before(c.syncTime.Add(clusterSyncTimeout))
}
func (c *clusterInfo) stopWatching(gk schema.GroupKind) {
c.syncLock.Lock()
defer c.syncLock.Unlock()
if info, ok := c.apisMeta[gk]; ok {
info.watchCancel()
delete(c.apisMeta, gk)
c.replaceResourceCache(gk, "", []unstructured.Unstructured{})
log.Warnf("Stop watching %s not found on %s.", gk, c.cluster.Server)
}
}
// startMissingWatches lists supported cluster resources and start watching for changes unless watch is already running
func (c *clusterInfo) startMissingWatches() error {
apis, err := c.kubectl.GetAPIResources(c.cluster.RESTConfig(), c.settings)
if err != nil {
return err
}
for i := range apis {
api := apis[i]
if _, ok := c.apisMeta[api.GroupKind]; !ok {
ctx, cancel := context.WithCancel(context.Background())
info := &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel}
c.apisMeta[api.GroupKind] = info
go c.watchEvents(ctx, api, info)
}
}
return nil
}
func runSynced(lock *sync.Mutex, action func() error) error {
lock.Lock()
defer lock.Unlock()
return action()
}
func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo, info *apiMeta) {
util.RetryUntilSucceed(func() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
}()
err = runSynced(c.syncLock, func() error {
if info.resourceVersion == "" {
list, err := api.Interface.List(metav1.ListOptions{})
if err != nil {
return err
}
c.replaceResourceCache(api.GroupKind, list.GetResourceVersion(), list.Items)
}
return nil
})
if err != nil {
return err
}
w, err := api.Interface.Watch(metav1.ListOptions{ResourceVersion: info.resourceVersion})
if errors.IsNotFound(err) {
c.stopWatching(api.GroupKind)
return nil
}
err = runSynced(c.syncLock, func() error {
if errors.IsGone(err) {
info.resourceVersion = ""
log.Warnf("Resource version of %s on %s is too old.", api.GroupKind, c.cluster.Server)
}
return err
})
if err != nil {
return err
}
defer w.Stop()
for {
select {
case <-ctx.Done():
return nil
case event, ok := <-w.ResultChan():
if ok {
obj := event.Object.(*unstructured.Unstructured)
info.resourceVersion = obj.GetResourceVersion()
err = c.processEvent(event.Type, obj)
if err != nil {
log.Warnf("Failed to process event %s %s/%s/%s: %v", event.Type, obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err)
continue
}
if kube.IsCRD(obj) {
if event.Type == watch.Deleted {
group, groupOk, groupErr := unstructured.NestedString(obj.Object, "spec", "group")
kind, kindOk, kindErr := unstructured.NestedString(obj.Object, "spec", "names", "kind")
if groupOk && groupErr == nil && kindOk && kindErr == nil {
gk := schema.GroupKind{Group: group, Kind: kind}
c.stopWatching(gk)
}
} else {
err = runSynced(c.syncLock, func() error {
return c.startMissingWatches()
})
}
}
if err != nil {
log.Warnf("Failed to start missing watch: %v", err)
}
} else {
return fmt.Errorf("Watch %s on %s has closed", api.GroupKind, c.cluster.Server)
}
}
}
}, fmt.Sprintf("watch %s on %s", api.GroupKind, c.cluster.Server), ctx, watchResourcesRetryTimeout)
}
func (c *clusterInfo) sync() (err error) {
c.log.Info("Start syncing cluster")
for i := range c.apisMeta {
c.apisMeta[i].watchCancel()
}
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
c.nodes = make(map[kube.ResourceKey]*node)
apis, err := c.kubectl.GetAPIResources(c.cluster.RESTConfig(), c.settings)
if err != nil {
return err
}
lock := sync.Mutex{}
err = util.RunAllAsync(len(apis), func(i int) error {
api := apis[i]
list, err := api.Interface.List(metav1.ListOptions{})
if err != nil {
return err
}
lock.Lock()
for i := range list.Items {
c.setNode(createObjInfo(&list.Items[i], c.settings.GetAppInstanceLabelKey()))
}
lock.Unlock()
return nil
})
if err == nil {
err = c.startMissingWatches()
}
if err != nil {
log.Errorf("Failed to sync cluster %s: %v", c.cluster.Server, err)
return err
}
c.log.Info("Cluster successfully synced")
return nil
}
func (c *clusterInfo) ensureSynced() error {
c.syncLock.Lock()
defer c.syncLock.Unlock()
if c.synced() {
return c.syncError
}
err := c.sync()
syncTime := time.Now()
c.syncTime = &syncTime
c.syncError = err
return c.syncError
}
func (c *clusterInfo) getChildren(obj *unstructured.Unstructured) []appv1.ResourceNode {
c.lock.Lock()
defer c.lock.Unlock()
children := make([]appv1.ResourceNode, 0)
if objInfo, ok := c.nodes[kube.GetResourceKey(obj)]; ok {
nsNodes := c.nsIndex[obj.GetNamespace()]
for _, child := range nsNodes {
if objInfo.isParentOf(child) {
children = append(children, child.childResourceNodes(nsNodes, map[kube.ResourceKey]bool{objInfo.resourceKey(): true}))
}
}
}
return children
}
func (c *clusterInfo) isNamespaced(obj *unstructured.Unstructured) bool {
if api, ok := c.apisMeta[kube.GetResourceKey(obj).GroupKind()]; ok && !api.namespaced {
return false
}
return true
}
func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
c.lock.Lock()
defer c.lock.Unlock()
managedObjs := make(map[kube.ResourceKey]*unstructured.Unstructured)
// iterate all objects in live state cache to find ones associated with app
for key, o := range c.nodes {
if o.appName == a.Name && o.resource != nil && len(o.ownerRefs) == 0 {
managedObjs[key] = o.resource
}
}
// iterate target objects and identify ones that already exist in the cluster,\
// but are simply missing our label
lock := &sync.Mutex{}
err := util.RunAllAsync(len(targetObjs), func(i int) error {
targetObj := targetObjs[i]
key := GetTargetObjKey(a, targetObj, c.isNamespaced(targetObj))
lock.Lock()
managedObj := managedObjs[key]
lock.Unlock()
if managedObj == nil {
if existingObj, exists := c.nodes[key]; exists {
if existingObj.resource != nil {
managedObj = existingObj.resource
} else {
var err error
managedObj, err = c.kubectl.GetResource(c.cluster.RESTConfig(), targetObj.GroupVersionKind(), existingObj.ref.Name, existingObj.ref.Namespace)
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
}
}
}
if managedObj != nil {
managedObj, err := c.kubectl.ConvertToVersion(managedObj, targetObj.GroupVersionKind().Group, targetObj.GroupVersionKind().Version)
if err != nil {
return err
}
lock.Lock()
managedObjs[key] = managedObj
lock.Unlock()
}
return nil
})
if err != nil {
return nil, err
}
return managedObjs, nil
}
func (c *clusterInfo) delete(obj *unstructured.Unstructured) error {
return c.kubectl.DeleteResource(c.cluster.RESTConfig(), obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), false)
}
func (c *clusterInfo) processEvent(event watch.EventType, un *unstructured.Unstructured) error {
c.lock.Lock()
defer c.lock.Unlock()
key := kube.GetResourceKey(un)
existingNode, exists := c.nodes[key]
if event == watch.Deleted {
if exists {
c.onNodeRemoved(key, existingNode)
}
} else if event != watch.Deleted {
c.onNodeUpdated(exists, existingNode, un, key)
}
return nil
}
func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstructured.Unstructured, key kube.ResourceKey) {
nodes := make([]*node, 0)
if exists {
nodes = append(nodes, existingNode)
}
newObj := createObjInfo(un, c.settings.GetAppInstanceLabelKey())
c.setNode(newObj)
nodes = append(nodes, newObj)
toNotify := make(map[string]bool)
for i := range nodes {
n := nodes[i]
if ns, ok := c.nsIndex[n.ref.Namespace]; ok {
app := n.getApp(ns)
if app == "" || skipAppRequeing(key) {
continue
}
toNotify[app] = n.isRootAppNode() || toNotify[app]
}
}
for name, full := range toNotify {
c.onAppUpdated(name, full)
}
}
func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) {
appName := n.appName
if ns, ok := c.nsIndex[key.Namespace]; ok {
appName = n.getApp(ns)
}
c.removeNode(key)
if appName != "" {
c.onAppUpdated(appName, n.isRootAppNode())
}
}
var (
ignoredRefreshResources = map[string]bool{
"/" + kube.EndpointsKind: true,
}
)
// skipAppRequeing checks if the object is an API type which we want to skip requeuing against.
// We ignore API types which have a high churn rate, and/or whose updates are irrelevant to the app
func skipAppRequeing(key kube.ResourceKey) bool {
return ignoredRefreshResources[key.Group+"/"+key.Kind]
}

View File

@@ -1,338 +0,0 @@
package cache
import (
"fmt"
"sort"
"strings"
"sync"
"testing"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic/fake"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"github.com/argoproj/argo-cd/errors"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/kube/kubetest"
"github.com/argoproj/argo-cd/util/settings"
)
func strToUnstructured(jsonStr string) *unstructured.Unstructured {
obj := make(map[string]interface{})
err := yaml.Unmarshal([]byte(jsonStr), &obj)
errors.CheckError(err)
return &unstructured.Unstructured{Object: obj}
}
func mustToUnstructured(obj interface{}) *unstructured.Unstructured {
un, err := kube.ToUnstructured(obj)
errors.CheckError(err)
return un
}
var (
testPod = strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: helm-guestbook-pod
namespace: default
ownerReferences:
- apiVersion: extensions/v1beta1
kind: ReplicaSet
name: helm-guestbook-rs
resourceVersion: "123"`)
testRS = strToUnstructured(`
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: helm-guestbook-rs
namespace: default
ownerReferences:
- apiVersion: extensions/v1beta1
kind: Deployment
name: helm-guestbook
resourceVersion: "123"`)
testDeploy = strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: helm-guestbook
name: helm-guestbook
namespace: default
resourceVersion: "123"`)
)
func newCluster(objs ...*unstructured.Unstructured) *clusterInfo {
runtimeObjs := make([]runtime.Object, len(objs))
for i := range objs {
runtimeObjs[i] = objs[i]
}
scheme := runtime.NewScheme()
client := fake.NewSimpleDynamicClient(scheme, runtimeObjs...)
apiResources := []kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: "Pod"},
Interface: client.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}),
Meta: metav1.APIResource{Namespaced: true},
}, {
GroupKind: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"},
Interface: client.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}),
Meta: metav1.APIResource{Namespaced: true},
}, {
GroupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"},
Interface: client.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}),
Meta: metav1.APIResource{Namespaced: true},
}}
return newClusterExt(kubetest.MockKubectlCmd{APIResources: apiResources})
}
func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
return &clusterInfo{
lock: &sync.Mutex{},
nodes: make(map[kube.ResourceKey]*node),
onAppUpdated: func(appName string, fullRefresh bool) {},
kubectl: kubectl,
nsIndex: make(map[string]map[kube.ResourceKey]*node),
cluster: &appv1.Cluster{},
syncTime: nil,
syncLock: &sync.Mutex{},
apisMeta: make(map[schema.GroupKind]*apiMeta),
log: log.WithField("cluster", "test"),
settings: &settings.ArgoCDSettings{},
}
}
func TestGetChildren(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
rsChildren := cluster.getChildren(testRS)
assert.Equal(t, []appv1.ResourceNode{{
Kind: "Pod",
Namespace: "default",
Name: "helm-guestbook-pod",
Group: "",
Version: "v1",
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Children: make([]appv1.ResourceNode, 0),
ResourceVersion: "123",
}}, rsChildren)
deployChildren := cluster.getChildren(testDeploy)
assert.Equal(t, []appv1.ResourceNode{{
Kind: "ReplicaSet",
Namespace: "default",
Name: "helm-guestbook-rs",
Group: "apps",
Version: "v1",
ResourceVersion: "123",
Children: rsChildren,
Info: []appv1.InfoItem{},
}}, deployChildren)
}
func TestGetManagedLiveObjs(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
targetDeploy := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata:
name: helm-guestbook
labels:
app: helm-guestbook`)
managedObjs, err := cluster.getManagedLiveObjs(&appv1.Application{
ObjectMeta: metav1.ObjectMeta{Name: "helm-guestbook"},
Spec: appv1.ApplicationSpec{
Destination: appv1.ApplicationDestination{
Namespace: "default",
},
},
}, []*unstructured.Unstructured{targetDeploy})
assert.Nil(t, err)
assert.Equal(t, managedObjs, map[kube.ResourceKey]*unstructured.Unstructured{
kube.NewResourceKey("apps", "Deployment", "default", "helm-guestbook"): testDeploy,
})
}
func TestChildDeletedEvent(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
err = cluster.processEvent(watch.Deleted, testPod)
assert.Nil(t, err)
rsChildren := cluster.getChildren(testRS)
assert.Equal(t, []appv1.ResourceNode{}, rsChildren)
}
func TestProcessNewChildEvent(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced()
assert.Nil(t, err)
newPod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: helm-guestbook-pod2
namespace: default
ownerReferences:
- apiVersion: extensions/v1beta1
kind: ReplicaSet
name: helm-guestbook-rs
resourceVersion: "123"`)
err = cluster.processEvent(watch.Added, newPod)
assert.Nil(t, err)
rsChildren := cluster.getChildren(testRS)
sort.Slice(rsChildren, func(i, j int) bool {
return strings.Compare(rsChildren[i].Name, rsChildren[j].Name) < 0
})
assert.Equal(t, []appv1.ResourceNode{{
Kind: "Pod",
Namespace: "default",
Name: "helm-guestbook-pod",
Group: "",
Version: "v1",
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Children: make([]appv1.ResourceNode, 0),
ResourceVersion: "123",
}, {
Kind: "Pod",
Namespace: "default",
Name: "helm-guestbook-pod2",
Group: "",
Version: "v1",
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
Children: make([]appv1.ResourceNode, 0),
ResourceVersion: "123",
}}, rsChildren)
}
func TestUpdateResourceTags(t *testing.T) {
pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "default"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "test",
Image: "test",
}},
},
}
cluster := newCluster(mustToUnstructured(pod))
err := cluster.ensureSynced()
assert.Nil(t, err)
podNode := cluster.nodes[kube.GetResourceKey(mustToUnstructured(pod))]
assert.NotNil(t, podNode)
assert.Equal(t, []appv1.InfoItem{{Name: "Containers", Value: "0/1"}}, podNode.info)
pod.Status = corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: -1,
},
},
}},
}
err = cluster.processEvent(watch.Modified, mustToUnstructured(pod))
assert.Nil(t, err)
podNode = cluster.nodes[kube.GetResourceKey(mustToUnstructured(pod))]
assert.NotNil(t, podNode)
assert.Equal(t, []appv1.InfoItem{{Name: "Status Reason", Value: "ExitCode:-1"}, {Name: "Containers", Value: "0/1"}}, podNode.info)
}
func TestUpdateAppResource(t *testing.T) {
updatesReceived := make([]string, 0)
cluster := newCluster(testPod, testRS, testDeploy)
cluster.onAppUpdated = func(appName string, fullRefresh bool) {
updatesReceived = append(updatesReceived, fmt.Sprintf("%s: %v", appName, fullRefresh))
}
err := cluster.ensureSynced()
assert.Nil(t, err)
err = cluster.processEvent(watch.Modified, mustToUnstructured(testPod))
assert.Nil(t, err)
assert.Contains(t, updatesReceived, "helm-guestbook: false")
}
func TestCircularReference(t *testing.T) {
dep := testDeploy.DeepCopy()
dep.SetOwnerReferences([]metav1.OwnerReference{{
Name: testPod.GetName(),
Kind: testPod.GetKind(),
APIVersion: testPod.GetAPIVersion(),
}})
cluster := newCluster(testPod, testRS, dep)
err := cluster.ensureSynced()
assert.Nil(t, err)
children := cluster.getChildren(dep)
assert.Len(t, children, 1)
node := cluster.nodes[kube.GetResourceKey(dep)]
assert.NotNil(t, node)
app := node.getApp(cluster.nodes)
assert.Equal(t, "", app)
}
func TestWatchCacheUpdated(t *testing.T) {
removed := testPod.DeepCopy()
removed.SetName(testPod.GetName() + "-removed-pod")
updated := testPod.DeepCopy()
updated.SetName(testPod.GetName() + "-updated-pod")
updated.SetResourceVersion("updated-pod-version")
cluster := newCluster(removed, updated)
err := cluster.ensureSynced()
assert.Nil(t, err)
added := testPod.DeepCopy()
added.SetName(testPod.GetName() + "-new-pod")
podGroupKind := testPod.GroupVersionKind().GroupKind()
cluster.replaceResourceCache(podGroupKind, "updated-list-version", []unstructured.Unstructured{*updated, *added})
_, ok := cluster.nodes[kube.GetResourceKey(removed)]
assert.False(t, ok)
updatedNode, ok := cluster.nodes[kube.GetResourceKey(updated)]
assert.True(t, ok)
assert.Equal(t, updatedNode.resourceVersion, "updated-pod-version")
_, ok = cluster.nodes[kube.GetResourceKey(added)]
assert.True(t, ok)
}

View File

@@ -1,107 +0,0 @@
package cache
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
k8snode "k8s.io/kubernetes/pkg/util/node"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
)
func getNodeInfo(un *unstructured.Unstructured) []v1alpha1.InfoItem {
gvk := un.GroupVersionKind()
if gvk.Kind == kube.PodKind && gvk.Group == "" {
return getPodInfo(un)
}
return []v1alpha1.InfoItem{}
}
func getPodInfo(un *unstructured.Unstructured) []v1alpha1.InfoItem {
pod := v1.Pod{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
if err != nil {
return []v1alpha1.InfoItem{}
}
restarts := 0
totalContainers := len(pod.Spec.Containers)
readyContainers := 0
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
restarts += int(container.RestartCount)
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
}
} else {
reason = "Init:" + container.State.Terminated.Reason
}
initializing = true
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
reason = "Init:" + container.State.Waiting.Reason
initializing = true
default:
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
initializing = true
}
break
}
if !initializing {
restarts = 0
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
restarts += int(container.RestartCount)
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
reason = container.State.Waiting.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
reason = container.State.Terminated.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
}
} else if container.Ready && container.State.Running != nil {
hasRunning = true
readyContainers++
}
}
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
if reason == "Completed" && hasRunning {
reason = "Running"
}
}
if pod.DeletionTimestamp != nil && pod.Status.Reason == k8snode.NodeUnreachablePodReason {
reason = "Unknown"
} else if pod.DeletionTimestamp != nil {
reason = "Terminating"
}
info := make([]v1alpha1.InfoItem, 0)
if reason != "" {
info = append(info, v1alpha1.InfoItem{Name: "Status Reason", Value: reason})
}
return append(info, v1alpha1.InfoItem{Name: "Containers", Value: fmt.Sprintf("%d/%d", readyContainers, totalContainers)})
}

View File

@@ -1,105 +0,0 @@
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import context "context"
import kube "github.com/argoproj/argo-cd/util/kube"
import mock "github.com/stretchr/testify/mock"
import unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
// LiveStateCache is an autogenerated mock type for the LiveStateCache type
type LiveStateCache struct {
mock.Mock
}
// Delete provides a mock function with given fields: server, obj
func (_m *LiveStateCache) Delete(server string, obj *unstructured.Unstructured) error {
ret := _m.Called(server, obj)
var r0 error
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured) error); ok {
r0 = rf(server, obj)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetChildren provides a mock function with given fields: server, obj
func (_m *LiveStateCache) GetChildren(server string, obj *unstructured.Unstructured) ([]v1alpha1.ResourceNode, error) {
ret := _m.Called(server, obj)
var r0 []v1alpha1.ResourceNode
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured) []v1alpha1.ResourceNode); ok {
r0 = rf(server, obj)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]v1alpha1.ResourceNode)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, *unstructured.Unstructured) error); ok {
r1 = rf(server, obj)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetManagedLiveObjs provides a mock function with given fields: a, targetObjs
func (_m *LiveStateCache) GetManagedLiveObjs(a *v1alpha1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
ret := _m.Called(a, targetObjs)
var r0 map[kube.ResourceKey]*unstructured.Unstructured
if rf, ok := ret.Get(0).(func(*v1alpha1.Application, []*unstructured.Unstructured) map[kube.ResourceKey]*unstructured.Unstructured); ok {
r0 = rf(a, targetObjs)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[kube.ResourceKey]*unstructured.Unstructured)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*v1alpha1.Application, []*unstructured.Unstructured) error); ok {
r1 = rf(a, targetObjs)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Invalidate provides a mock function with given fields:
func (_m *LiveStateCache) Invalidate() {
_m.Called()
}
// IsNamespaced provides a mock function with given fields: server, obj
func (_m *LiveStateCache) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) {
ret := _m.Called(server, obj)
var r0 bool
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured) bool); ok {
r0 = rf(server, obj)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, *unstructured.Unstructured) error); ok {
r1 = rf(server, obj)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Run provides a mock function with given fields: ctx
func (_m *LiveStateCache) Run(ctx context.Context) {
_m.Called(ctx)
}

View File

@@ -1,115 +0,0 @@
package cache
import (
log "github.com/sirupsen/logrus"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type node struct {
resourceVersion string
ref v1.ObjectReference
ownerRefs []metav1.OwnerReference
info []appv1.InfoItem
appName string
resource *unstructured.Unstructured
}
func (n *node) isRootAppNode() bool {
return n.appName != "" && len(n.ownerRefs) == 0
}
func (n *node) resourceKey() kube.ResourceKey {
return kube.NewResourceKey(n.ref.GroupVersionKind().Group, n.ref.Kind, n.ref.Namespace, n.ref.Name)
}
func (n *node) isParentOf(child *node) bool {
for _, ownerRef := range child.ownerRefs {
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
if kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name) == n.resourceKey() {
return true
}
}
return false
}
func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion {
gv, err := schema.ParseGroupVersion(ownerRef.APIVersion)
if err != nil {
gv = schema.GroupVersion{}
}
return gv
}
func (n *node) getApp(ns map[kube.ResourceKey]*node) string {
return n.getAppRecursive(ns, map[kube.ResourceKey]bool{})
}
func (n *node) getAppRecursive(ns map[kube.ResourceKey]*node, visited map[kube.ResourceKey]bool) string {
if !visited[n.resourceKey()] {
visited[n.resourceKey()] = true
} else {
log.Warnf("Circular dependency detected: %v.", visited)
return n.appName
}
if n.appName != "" {
return n.appName
}
for _, ownerRef := range n.ownerRefs {
gv := ownerRefGV(ownerRef)
if parent, ok := ns[kube.NewResourceKey(gv.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name)]; ok {
app := parent.getAppRecursive(ns, visited)
if app != "" {
return app
}
}
}
return ""
}
func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey) map[kube.ResourceKey]bool {
newSet := make(map[kube.ResourceKey]bool)
for k, v := range set {
newSet[k] = v
}
for i := range keys {
newSet[keys[i]] = true
}
return newSet
}
func (n *node) childResourceNodes(ns map[kube.ResourceKey]*node, parents map[kube.ResourceKey]bool) appv1.ResourceNode {
children := make([]appv1.ResourceNode, 0)
for childKey := range ns {
if n.isParentOf(ns[childKey]) {
if parents[childKey] {
key := n.resourceKey()
log.Warnf("Circular dependency detected. %s is child and parent of %s", childKey.String(), key.String())
} else {
children = append(children, ns[childKey].childResourceNodes(ns, newResourceKeySet(parents, n.resourceKey())))
}
}
}
gv, err := schema.ParseGroupVersion(n.ref.APIVersion)
if err != nil {
gv = schema.GroupVersion{}
}
return appv1.ResourceNode{
Name: n.ref.Name,
Group: gv.Group,
Version: gv.Version,
Kind: n.ref.Kind,
Namespace: n.ref.Namespace,
Info: n.info,
Children: children,
ResourceVersion: n.resourceVersion,
}
}

View File

@@ -1,25 +0,0 @@
package cache
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsParentOf(t *testing.T) {
child := createObjInfo(testPod, "")
parent := createObjInfo(testRS, "")
grandParent := createObjInfo(testDeploy, "")
assert.True(t, parent.isParentOf(child))
assert.False(t, grandParent.isParentOf(child))
}
func TestIsParentOfSameKindDifferentGroup(t *testing.T) {
rs := testRS.DeepCopy()
rs.SetAPIVersion("somecrd.io/v1")
child := createObjInfo(testPod, "")
invalidParent := createObjInfo(rs, "")
assert.False(t, invalidParent.isParentOf(child))
}

View File

@@ -1,182 +0,0 @@
package metrics
import (
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/labels"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/util/git"
)
type MetricsServer struct {
*http.Server
syncCounter *prometheus.CounterVec
reconcileHistogram *prometheus.HistogramVec
}
const (
// MetricsPath is the endpoint to collect application metrics
MetricsPath = "/metrics"
)
// Follow Prometheus naming practices
// https://prometheus.io/docs/practices/naming/
var (
descAppDefaultLabels = []string{"namespace", "name", "project"}
descAppInfo = prometheus.NewDesc(
"argocd_app_info",
"Information about application.",
append(descAppDefaultLabels, "repo", "dest_server", "dest_namespace"),
nil,
)
descAppCreated = prometheus.NewDesc(
"argocd_app_created_time",
"Creation time in unix timestamp for an application.",
descAppDefaultLabels,
nil,
)
descAppSyncStatusCode = prometheus.NewDesc(
"argocd_app_sync_status",
"The application current sync status.",
append(descAppDefaultLabels, "sync_status"),
nil,
)
descAppHealthStatus = prometheus.NewDesc(
"argocd_app_health_status",
"The application current health status.",
append(descAppDefaultLabels, "health_status"),
nil,
)
)
// NewMetricsServer returns a new prometheus server which collects application metrics
func NewMetricsServer(addr string, appLister applister.ApplicationLister) *MetricsServer {
mux := http.NewServeMux()
appRegistry := NewAppRegistry(appLister)
appRegistry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
appRegistry.MustRegister(prometheus.NewGoCollector())
mux.Handle(MetricsPath, promhttp.HandlerFor(appRegistry, promhttp.HandlerOpts{}))
syncCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "argocd_app_sync_total",
Help: "Number of application syncs.",
},
append(descAppDefaultLabels, "phase"),
)
appRegistry.MustRegister(syncCounter)
reconcileHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "argocd_app_reconcile",
Help: "Application reconciliation performance.",
// Buckets chosen after observing a ~2100ms mean reconcile time
Buckets: []float64{0.25, .5, 1, 2, 4, 8, 16},
},
append(descAppDefaultLabels),
)
appRegistry.MustRegister(reconcileHistogram)
return &MetricsServer{
Server: &http.Server{
Addr: addr,
Handler: mux,
},
syncCounter: syncCounter,
reconcileHistogram: reconcileHistogram,
}
}
// IncSync increments the sync counter for an application
func (m *MetricsServer) IncSync(app *argoappv1.Application, state *argoappv1.OperationState) {
if !state.Phase.Completed() {
return
}
m.syncCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), string(state.Phase)).Inc()
}
// IncReconcile increments the reconcile counter for an application
func (m *MetricsServer) IncReconcile(app *argoappv1.Application, duration time.Duration) {
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject()).Observe(duration.Seconds())
}
type appCollector struct {
store applister.ApplicationLister
}
// NewAppCollector returns a prometheus collector for application metrics
func NewAppCollector(appLister applister.ApplicationLister) prometheus.Collector {
return &appCollector{
store: appLister,
}
}
// NewAppRegistry creates a new prometheus registry that collects applications
func NewAppRegistry(appLister applister.ApplicationLister) *prometheus.Registry {
registry := prometheus.NewRegistry()
registry.MustRegister(NewAppCollector(appLister))
return registry
}
// Describe implements the prometheus.Collector interface
func (c *appCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- descAppInfo
ch <- descAppCreated
ch <- descAppSyncStatusCode
ch <- descAppHealthStatus
}
// Collect implements the prometheus.Collector interface
func (c *appCollector) Collect(ch chan<- prometheus.Metric) {
apps, err := c.store.List(labels.NewSelector())
if err != nil {
log.Warnf("Failed to collect applications: %v", err)
return
}
for _, app := range apps {
collectApps(ch, app)
}
}
func boolFloat64(b bool) float64 {
if b {
return 1
}
return 0
}
func collectApps(ch chan<- prometheus.Metric, app *argoappv1.Application) {
addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) {
project := app.Spec.GetProject()
lv = append([]string{app.Namespace, app.Name, project}, lv...)
ch <- prometheus.MustNewConstMetric(desc, t, v, lv...)
}
addGauge := func(desc *prometheus.Desc, v float64, lv ...string) {
addConstMetric(desc, prometheus.GaugeValue, v, lv...)
}
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace)
addGauge(descAppCreated, float64(app.CreationTimestamp.Unix()))
syncStatus := app.Status.Sync.Status
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeSynced), string(argoappv1.SyncStatusCodeSynced))
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeOutOfSync), string(argoappv1.SyncStatusCodeOutOfSync))
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeUnknown || syncStatus == ""), string(argoappv1.SyncStatusCodeUnknown))
healthStatus := app.Status.Health.Status
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusUnknown || healthStatus == ""), argoappv1.HealthStatusUnknown)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusProgressing), argoappv1.HealthStatusProgressing)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusSuspended), argoappv1.HealthStatusSuspended)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusHealthy), argoappv1.HealthStatusHealthy)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusDegraded), argoappv1.HealthStatusDegraded)
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusMissing), argoappv1.HealthStatusMissing)
}

View File

@@ -1,233 +0,0 @@
package metrics
import (
"context"
"log"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
appinformer "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
)
const fakeApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app
namespace: argocd
spec:
destination:
namespace: dummy-namespace
server: https://localhost:6443
project: important-project
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
status:
sync:
status: Synced
health:
status: Healthy
`
const expectedResponse = `# HELP argocd_app_created_time Creation time in unix timestamp for an application.
# TYPE argocd_app_created_time gauge
argocd_app_created_time{name="my-app",namespace="argocd",project="important-project"} -6.21355968e+10
# HELP argocd_app_health_status The application current health status.
# TYPE argocd_app_health_status gauge
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd",project="important-project"} 1
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Suspended",name="my-app",namespace="argocd",project="important-project"} 0
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd",project="important-project"} 0
# HELP argocd_app_info Information about application.
# TYPE argocd_app_info gauge
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",repo="https://github.com/argoproj/argocd-example-apps"} 1
# HELP argocd_app_sync_status The application current sync status.
# TYPE argocd_app_sync_status gauge
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="OutOfSync"} 0
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Synced"} 1
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Unknown"} 0
`
const fakeDefaultApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app
namespace: argocd
spec:
destination:
namespace: dummy-namespace
server: https://localhost:6443
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
status:
sync:
status: Synced
health:
status: Healthy
`
const expectedDefaultResponse = `# HELP argocd_app_created_time Creation time in unix timestamp for an application.
# TYPE argocd_app_created_time gauge
argocd_app_created_time{name="my-app",namespace="argocd",project="default"} -6.21355968e+10
# HELP argocd_app_health_status The application current health status.
# TYPE argocd_app_health_status gauge
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd",project="default"} 1
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Suspended",name="my-app",namespace="argocd",project="default"} 0
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd",project="default"} 0
# HELP argocd_app_info Information about application.
# TYPE argocd_app_info gauge
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="default",repo="https://github.com/argoproj/argocd-example-apps"} 1
# HELP argocd_app_sync_status The application current sync status.
# TYPE argocd_app_sync_status gauge
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="OutOfSync"} 0
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="Synced"} 1
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="Unknown"} 0
`
func newFakeApp(fakeApp string) *argoappv1.Application {
var app argoappv1.Application
err := yaml.Unmarshal([]byte(fakeApp), &app)
if err != nil {
panic(err)
}
return &app
}
func newFakeLister(fakeApp ...string) (context.CancelFunc, applister.ApplicationLister) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var fakeApps []runtime.Object
for _, name := range fakeApp {
fakeApps = append(fakeApps, newFakeApp(name))
}
appClientset := appclientset.NewSimpleClientset(fakeApps...)
factory := appinformer.NewFilteredSharedInformerFactory(appClientset, 0, "argocd", func(options *metav1.ListOptions) {})
appInformer := factory.Argoproj().V1alpha1().Applications().Informer()
go appInformer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), appInformer.HasSynced) {
log.Fatal("Timed out waiting for caches to sync")
}
return cancel, factory.Argoproj().V1alpha1().Applications().Lister()
}
func testApp(t *testing.T, fakeApp string, expectedResponse string) {
cancel, appLister := newFakeLister(fakeApp)
defer cancel()
metricsServ := NewMetricsServer("localhost:8082", appLister)
req, err := http.NewRequest("GET", "/metrics", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
metricsServ.Handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK)
body := rr.Body.String()
log.Println(body)
assertMetricsPrinted(t, expectedResponse, body)
}
type testCombination struct {
application string
expectedResponse string
}
func TestMetrics(t *testing.T) {
combinations := []testCombination{
{
application: fakeApp,
expectedResponse: expectedResponse,
},
{
application: fakeDefaultApp,
expectedResponse: expectedDefaultResponse,
},
}
for _, combination := range combinations {
testApp(t, combination.application, combination.expectedResponse)
}
}
const appSyncTotal = `# HELP argocd_app_sync_total Number of application syncs.
# TYPE argocd_app_sync_total counter
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Error",project="important-project"} 1
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Failed",project="important-project"} 1
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Succeeded",project="important-project"} 2
`
func TestMetricsSyncCounter(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ := NewMetricsServer("localhost:8082", appLister)
fakeApp := newFakeApp(fakeApp)
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationRunning})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationFailed})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationError})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationSucceeded})
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationSucceeded})
req, err := http.NewRequest("GET", "/metrics", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
metricsServ.Handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK)
body := rr.Body.String()
log.Println(body)
assertMetricsPrinted(t, appSyncTotal, body)
}
// assertMetricsPrinted asserts every line in the expected lines appears in the body
func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
for _, line := range strings.Split(expectedLines, "\n") {
assert.Contains(t, body, line)
}
}
const appReconcileMetrics = `argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="0.25"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="0.5"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="1"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="2"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="4"} 0
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="8"} 1
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="16"} 1
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="+Inf"} 1
argocd_app_reconcile_sum{name="my-app",namespace="argocd",project="important-project"} 5
argocd_app_reconcile_count{name="my-app",namespace="argocd",project="important-project"} 1
`
func TestReconcileMetrics(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ := NewMetricsServer("localhost:8082", appLister)
fakeApp := newFakeApp(fakeApp)
metricsServ.IncReconcile(fakeApp, 5*time.Second)
req, err := http.NewRequest("GET", "/metrics", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
metricsServ.Handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK)
body := rr.Body.String()
log.Println(body)
assertMetricsPrinted(t, appReconcileMetrics, body)
}

View File

@@ -0,0 +1,205 @@
package controller
import (
"context"
"encoding/json"
"time"
"runtime/debug"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
type SecretController struct {
kubeClient kubernetes.Interface
secretQueue workqueue.RateLimitingInterface
secretInformer cache.SharedIndexInformer
repoClientset reposerver.Clientset
namespace string
}
func (ctrl *SecretController) Run(ctx context.Context) {
go ctrl.secretInformer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctrl.secretInformer.HasSynced) {
log.Error("Timed out waiting for caches to sync")
return
}
go wait.Until(func() {
for ctrl.processSecret() {
}
}, time.Second, ctx.Done())
}
func (ctrl *SecretController) processSecret() (processNext bool) {
secretKey, shutdown := ctrl.secretQueue.Get()
if shutdown {
processNext = false
return
} else {
processNext = true
}
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
ctrl.secretQueue.Done(secretKey)
}()
obj, exists, err := ctrl.secretInformer.GetIndexer().GetByKey(secretKey.(string))
if err != nil {
log.Errorf("Failed to get secret '%s' from informer index: %+v", secretKey, err)
return
}
if !exists {
// This happens after secret was deleted, but the work queue still had an entry for it.
return
}
secret, ok := obj.(*corev1.Secret)
if !ok {
log.Warnf("Key '%s' in index is not an secret", secretKey)
return
}
if secret.Labels[common.LabelKeySecretType] == common.SecretTypeCluster {
cluster := db.SecretToCluster(secret)
ctrl.updateState(secret, ctrl.getClusterState(cluster))
} else if secret.Labels[common.LabelKeySecretType] == common.SecretTypeRepository {
repo := db.SecretToRepo(secret)
ctrl.updateState(secret, ctrl.getRepoConnectionState(repo))
}
return
}
func (ctrl *SecretController) getRepoConnectionState(repo *v1alpha1.Repository) v1alpha1.ConnectionState {
state := v1alpha1.ConnectionState{
ModifiedAt: repo.ConnectionState.ModifiedAt,
Status: v1alpha1.ConnectionStatusUnknown,
}
closer, client, err := ctrl.repoClientset.NewRepositoryClient()
if err != nil {
log.Errorf("Unable to create repository client: %v", err)
return state
}
defer util.Close(closer)
_, err = client.ListDir(context.Background(), &repository.ListDirRequest{Repo: repo, Path: ".gitignore"})
if err == nil {
state.Status = v1alpha1.ConnectionStatusSuccessful
} else {
state.Status = v1alpha1.ConnectionStatusFailed
state.Message = err.Error()
}
return state
}
func (ctrl *SecretController) getClusterState(cluster *v1alpha1.Cluster) v1alpha1.ConnectionState {
state := v1alpha1.ConnectionState{
ModifiedAt: cluster.ConnectionState.ModifiedAt,
Status: v1alpha1.ConnectionStatusUnknown,
}
kubeClientset, err := kubernetes.NewForConfig(cluster.RESTConfig())
if err == nil {
_, err = kubeClientset.Discovery().ServerVersion()
}
if err == nil {
state.Status = v1alpha1.ConnectionStatusSuccessful
} else {
state.Status = v1alpha1.ConnectionStatusFailed
state.Message = err.Error()
}
return state
}
func (ctrl *SecretController) updateState(secret *corev1.Secret, state v1alpha1.ConnectionState) {
annotationsPatch := make(map[string]string)
for key, value := range db.AnnotationsFromConnectionState(&state) {
if secret.Annotations[key] != value {
annotationsPatch[key] = value
}
}
if len(annotationsPatch) > 0 {
annotationsPatch[common.AnnotationConnectionModifiedAt] = metav1.Now().Format(time.RFC3339)
patchData, err := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": annotationsPatch,
},
})
if err != nil {
log.Warnf("Unable to prepare secret state annotation patch: %v", err)
} else {
_, err := ctrl.kubeClient.CoreV1().Secrets(secret.Namespace).Patch(secret.Name, types.MergePatchType, patchData)
if err != nil {
log.Warnf("Unable to patch secret state annotation: %v", err)
}
}
}
}
func newSecretInformer(client kubernetes.Interface, resyncPeriod time.Duration, namespace string, secretQueue workqueue.RateLimitingInterface) cache.SharedIndexInformer {
informerFactory := informers.NewFilteredSharedInformerFactory(
client,
resyncPeriod,
namespace,
func(options *metav1.ListOptions) {
var req *labels.Requirement
req, err := labels.NewRequirement(common.LabelKeySecretType, selection.In, []string{common.SecretTypeCluster, common.SecretTypeRepository})
if err != nil {
panic(err)
}
options.FieldSelector = fields.Everything().String()
labelSelector := labels.NewSelector().Add(*req)
options.LabelSelector = labelSelector.String()
},
)
informer := informerFactory.Core().V1().Secrets().Informer()
informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
secretQueue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
secretQueue.Add(key)
}
},
},
)
return informer
}
func NewSecretController(kubeClient kubernetes.Interface, repoClientset reposerver.Clientset, resyncPeriod time.Duration, namespace string) *SecretController {
secretQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
return &SecretController{
kubeClient: kubeClient,
secretQueue: secretQueue,
secretInformer: newSecretInformer(kubeClient, resyncPeriod, namespace, secretQueue),
namespace: namespace,
repoClientset: repoClientset,
}
}

View File

@@ -10,330 +10,358 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"github.com/argoproj/argo-cd/common"
statecache "github.com/argoproj/argo-cd/controller/cache"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/argo"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/health"
hookutil "github.com/argoproj/argo-cd/util/hook"
kubeutil "github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
)
type managedResource struct {
Target *unstructured.Unstructured
Live *unstructured.Unstructured
Diff diff.DiffResult
Group string
Version string
Kind string
Namespace string
Name string
Hook bool
}
func GetLiveObjs(res []managedResource) []*unstructured.Unstructured {
objs := make([]*unstructured.Unstructured, len(res))
for i := range res {
objs[i] = res[i].Live
}
return objs
}
type ResourceInfoProvider interface {
IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error)
}
const (
maxHistoryCnt = 5
)
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool) (*comparisonResult, error)
CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) (
*v1alpha1.ComparisonResult, *repository.ManifestResponse, []v1alpha1.ApplicationCondition, error)
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
}
type comparisonResult struct {
reconciledAt metav1.Time
syncStatus *v1alpha1.SyncStatus
healthStatus *v1alpha1.HealthStatus
resources []v1alpha1.ResourceStatus
managedResources []managedResource
conditions []v1alpha1.ApplicationCondition
hooks []*unstructured.Unstructured
diffNormalizer diff.Normalizer
appSourceType v1alpha1.ApplicationSourceType
// ksonnetAppStateManager allows to compare application using KSonnet CLI
type ksonnetAppStateManager struct {
db db.ArgoDB
appclientset appclientset.Interface
repoClientset reposerver.Clientset
namespace string
}
// appStateManager allows to compare applications to git
type appStateManager struct {
db db.ArgoDB
settings *settings.ArgoCDSettings
appclientset appclientset.Interface
projInformer cache.SharedIndexInformer
kubectl kubeutil.Kubectl
repoClientset reposerver.Clientset
liveStateCache statecache.LiveStateCache
namespace string
}
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1.ApplicationSource, appLabelKey, revision string, noCache bool) ([]*unstructured.Unstructured, []*unstructured.Unstructured, *repository.ManifestResponse, error) {
helmRepos, err := m.db.ListHelmRepos(context.Background())
if err != nil {
return nil, nil, nil, err
// groupLiveObjects deduplicate list of kubernetes resources and choose correct version of resource: if resource has corresponding expected application resource then method pick
// kubernetes resource with matching version, otherwise chooses single kubernetes resource with any version
func groupLiveObjects(liveObjs []*unstructured.Unstructured, targetObjs []*unstructured.Unstructured) map[string]*unstructured.Unstructured {
targetByFullName := make(map[string]*unstructured.Unstructured)
for _, obj := range targetObjs {
targetByFullName[getResourceFullName(obj)] = obj
}
repo := m.getRepo(source.RepoURL)
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
liveListByFullName := make(map[string][]*unstructured.Unstructured)
for _, obj := range liveObjs {
list := liveListByFullName[getResourceFullName(obj)]
if list == nil {
list = make([]*unstructured.Unstructured, 0)
}
list = append(list, obj)
liveListByFullName[getResourceFullName(obj)] = list
}
liveByFullName := make(map[string]*unstructured.Unstructured)
for fullName, list := range liveListByFullName {
targetObj := targetByFullName[fullName]
var liveObj *unstructured.Unstructured
if targetObj != nil {
for i := range list {
if list[i].GetAPIVersion() == targetObj.GetAPIVersion() {
liveObj = list[i]
break
}
}
} else {
liveObj = list[0]
}
if liveObj != nil {
liveByFullName[getResourceFullName(liveObj)] = liveObj
}
}
return liveByFullName
}
func (s *ksonnetAppStateManager) getTargetObjs(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, *repository.ManifestResponse, error) {
repo := s.getRepo(app.Spec.Source.RepoURL)
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
defer util.Close(conn)
if revision == "" {
revision = source.TargetRevision
revision = app.Spec.Source.TargetRevision
}
tools := make([]*appv1.ConfigManagementPlugin, len(m.settings.ConfigManagementPlugins))
for i := range m.settings.ConfigManagementPlugins {
tools[i] = &m.settings.ConfigManagementPlugins[i]
// Decide what overrides to compare with.
var mfReqOverrides []*v1alpha1.ComponentParameter
if overrides != nil {
// If overrides is supplied, use that
mfReqOverrides = make([]*v1alpha1.ComponentParameter, len(overrides))
for i := range overrides {
item := overrides[i]
mfReqOverrides[i] = &item
}
} else {
// Otherwise, use the overrides in the app spec
mfReqOverrides = make([]*v1alpha1.ComponentParameter, len(app.Spec.Source.ComponentParameterOverrides))
for i := range app.Spec.Source.ComponentParameterOverrides {
item := app.Spec.Source.ComponentParameterOverrides[i]
mfReqOverrides[i] = &item
}
}
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &repository.ManifestRequest{
Repo: repo,
HelmRepos: helmRepos,
Revision: revision,
NoCache: noCache,
AppLabelKey: appLabelKey,
AppLabelValue: app.Name,
Namespace: app.Spec.Destination.Namespace,
ApplicationSource: &source,
Plugins: tools,
Repo: repo,
Environment: app.Spec.Source.Environment,
Path: app.Spec.Source.Path,
Revision: revision,
ComponentParameterOverrides: mfReqOverrides,
AppLabel: app.Name,
ValueFiles: app.Spec.Source.ValuesFiles,
})
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
targetObjs := make([]*unstructured.Unstructured, 0)
hooks := make([]*unstructured.Unstructured, 0)
for _, manifest := range manifestInfo.Manifests {
obj, err := v1alpha1.UnmarshalToUnstructured(manifest)
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
if hookutil.IsHook(obj) {
hooks = append(hooks, obj)
} else {
targetObjs = append(targetObjs, obj)
if isHook(obj) {
continue
}
targetObjs = append(targetObjs, obj)
}
return targetObjs, hooks, manifestInfo, nil
return targetObjs, manifestInfo, nil
}
func DeduplicateTargetObjects(
server string,
namespace string,
objs []*unstructured.Unstructured,
infoProvider ResourceInfoProvider,
) ([]*unstructured.Unstructured, []v1alpha1.ApplicationCondition, error) {
func (s *ksonnetAppStateManager) getLiveObjs(app *v1alpha1.Application, targetObjs []*unstructured.Unstructured) (
[]*unstructured.Unstructured, map[string]*unstructured.Unstructured, error) {
targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured)
for i := range objs {
obj := objs[i]
isNamespaced, err := infoProvider.IsNamespaced(server, obj)
if err != nil {
return objs, nil, err
}
if !isNamespaced {
obj.SetNamespace("")
} else if obj.GetNamespace() == "" {
obj.SetNamespace(namespace)
}
key := kubeutil.GetResourceKey(obj)
targetByKey[key] = append(targetByKey[key], obj)
// Get the REST config for the cluster corresponding to the environment
clst, err := s.db.GetCluster(context.Background(), app.Spec.Destination.Server)
if err != nil {
return nil, nil, err
}
conditions := make([]v1alpha1.ApplicationCondition, 0)
result := make([]*unstructured.Unstructured, 0)
for key, targets := range targetByKey {
if len(targets) > 1 {
conditions = append(conditions, appv1.ApplicationCondition{
Type: appv1.ApplicationConditionRepeatedResourceWarning,
Message: fmt.Sprintf("Resource %s appeared %d times among application resources.", key.String(), len(targets)),
})
restConfig := clst.RESTConfig()
// Retrieve the live versions of the objects. exclude any hook objects
labeledObjs, err := kubeutil.GetResourcesWithLabel(restConfig, app.Spec.Destination.Namespace, common.LabelApplicationName, app.Name)
if err != nil {
return nil, nil, err
}
liveObjs := make([]*unstructured.Unstructured, 0)
for _, obj := range labeledObjs {
if isHook(obj) {
continue
}
result = append(result, targets[len(targets)-1])
liveObjs = append(liveObjs, obj)
}
return result, conditions, nil
liveObjByFullName := groupLiveObjects(liveObjs, targetObjs)
controlledLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
// Move live resources which have corresponding target object to controlledLiveObj
dynClientPool := dynamic.NewDynamicClientPool(restConfig)
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, nil, err
}
for i, targetObj := range targetObjs {
fullName := getResourceFullName(targetObj)
liveObj := liveObjByFullName[fullName]
if liveObj == nil && targetObj.GetName() != "" {
// If we get here, it indicates we did not find the live resource when querying using
// our app label. However, it is possible that the resource was created/modified outside
// of ArgoCD. In order to determine that it is truly missing, we fall back to perform a
// direct lookup of the resource by name. See issue #141
gvk := targetObj.GroupVersionKind()
dclient, err := dynClientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return nil, nil, err
}
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(disco, gvk)
if err != nil {
return nil, nil, err
}
liveObj, err = kubeutil.GetLiveResource(dclient, targetObj, apiResource, app.Spec.Destination.Namespace)
if err != nil {
return nil, nil, err
}
}
controlledLiveObj[i] = liveObj
delete(liveObjByFullName, fullName)
}
return controlledLiveObj, liveObjByFullName, nil
}
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied source. If revision or overrides are empty, then compares against
// revision and supplied overrides. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool) (*comparisonResult, error) {
diffNormalizer, err := argo.NewDiffNormalizer(app.Spec.IgnoreDifferences, m.settings.ResourceOverrides)
if err != nil {
return nil, err
}
logCtx := log.WithField("application", app.Name)
logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
observedAt := metav1.Now()
func (s *ksonnetAppStateManager) CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) (
*v1alpha1.ComparisonResult, *repository.ManifestResponse, []v1alpha1.ApplicationCondition, error) {
failedToLoadObjs := false
conditions := make([]v1alpha1.ApplicationCondition, 0)
appLabelKey := m.settings.GetAppInstanceLabelKey()
targetObjs, hooks, manifestInfo, err := m.getRepoObjs(app, source, appLabelKey, revision, noCache)
targetObjs, manifestInfo, err := s.getTargetObjs(app, revision, overrides)
if err != nil {
targetObjs = make([]*unstructured.Unstructured, 0)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
failedToLoadObjs = true
}
targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Server, app.Spec.Destination.Namespace, targetObjs, m.liveStateCache)
if err != nil {
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
}
conditions = append(conditions, dedupConditions...)
logCtx.Debugf("Generated config manifests")
liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)
controlledLiveObj, liveObjByFullName, err := s.getLiveObjs(app, targetObjs)
if err != nil {
liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
controlledLiveObj = make([]*unstructured.Unstructured, len(targetObjs))
liveObjByFullName = make(map[string]*unstructured.Unstructured)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
failedToLoadObjs = true
}
logCtx.Debugf("Retrieved lived manifests")
for _, liveObj := range liveObjByKey {
if liveObj != nil {
appInstanceName := kubeutil.GetAppInstanceLabel(liveObj, appLabelKey)
if appInstanceName != "" && appInstanceName != app.Name {
for _, liveObj := range controlledLiveObj {
if liveObj != nil && liveObj.GetLabels() != nil {
if appLabelVal, ok := liveObj.GetLabels()[common.LabelApplicationName]; ok && appLabelVal != "" && appLabelVal != app.Name {
conditions = append(conditions, v1alpha1.ApplicationCondition{
Type: v1alpha1.ApplicationConditionSharedResourceWarning,
Message: fmt.Sprintf("%s/%s is part of a different application: %s", liveObj.GetKind(), liveObj.GetName(), appInstanceName),
Message: fmt.Sprintf("Resource %s/%s is controller by applications '%s' and '%s'", liveObj.GetKind(), liveObj.GetName(), app.Name, appLabelVal),
})
}
}
}
managedLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
for i, obj := range targetObjs {
gvk := obj.GroupVersionKind()
ns := util.FirstNonEmpty(obj.GetNamespace(), app.Spec.Destination.Namespace)
if namespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, obj); err == nil && !namespaced {
ns = ""
}
key := kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName())
if liveObj, ok := liveObjByKey[key]; ok {
managedLiveObj[i] = liveObj
delete(liveObjByKey, key)
} else {
managedLiveObj[i] = nil
// Move root level live resources to controlledLiveObj and add nil to targetObjs to indicate that target object is missing
for fullName := range liveObjByFullName {
liveObj := liveObjByFullName[fullName]
if !hasParent(liveObj) {
targetObjs = append(targetObjs, nil)
controlledLiveObj = append(controlledLiveObj, liveObj)
}
}
logCtx.Debugf("built managed objects list")
// Everything remaining in liveObjByKey are "extra" resources that aren't tracked in git.
// The following adds all the extras to the managedLiveObj list and backfills the targetObj
// list with nils, so that the lists are of equal lengths for comparison purposes.
for _, obj := range liveObjByKey {
targetObjs = append(targetObjs, nil)
managedLiveObj = append(managedLiveObj, obj)
}
log.Infof("Comparing app %s state in cluster %s (namespace: %s)", app.ObjectMeta.Name, app.Spec.Destination.Server, app.Spec.Destination.Namespace)
// Do the actual comparison
diffResults, err := diff.DiffArray(targetObjs, managedLiveObj, diffNormalizer)
diffResults, err := diff.DiffArray(targetObjs, controlledLiveObj)
if err != nil {
return nil, err
return nil, nil, nil, err
}
syncCode := v1alpha1.SyncStatusCodeSynced
managedResources := make([]managedResource, len(targetObjs))
resourceSummaries := make([]v1alpha1.ResourceStatus, len(targetObjs))
comparisonStatus := v1alpha1.ComparisonStatusSynced
resources := make([]v1alpha1.ResourceState, len(targetObjs))
for i := 0; i < len(targetObjs); i++ {
obj := managedLiveObj[i]
if obj == nil {
obj = targetObjs[i]
resState := v1alpha1.ResourceState{
ChildLiveResources: make([]v1alpha1.ResourceNode, 0),
}
if obj == nil {
continue
}
gvk := obj.GroupVersionKind()
resState := v1alpha1.ResourceStatus{
Namespace: obj.GetNamespace(),
Name: obj.GetName(),
Kind: gvk.Kind,
Version: gvk.Version,
Group: gvk.Group,
Hook: hookutil.IsHook(obj),
}
diffResult := diffResults.Diffs[i]
if resState.Hook {
// For resource hooks, don't store sync status, and do not affect overall sync status
} else if diffResult.Modified || targetObjs[i] == nil || managedLiveObj[i] == nil {
// Set resource state to OutOfSync since one of the following is true:
// * target and live resource are different
// * target resource not defined and live resource is extra
// * target resource present but live resource is missing
resState.Status = v1alpha1.SyncStatusCodeOutOfSync
syncCode = v1alpha1.SyncStatusCodeOutOfSync
if diffResult.Modified {
// Set resource state to 'OutOfSync' since target and corresponding live resource are different
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
resState.Status = v1alpha1.SyncStatusCodeSynced
resState.Status = v1alpha1.ComparisonStatusSynced
}
managedResources[i] = managedResource{
Name: resState.Name,
Namespace: resState.Namespace,
Group: resState.Group,
Kind: resState.Kind,
Version: resState.Version,
Live: managedLiveObj[i],
Target: targetObjs[i],
Diff: diffResult,
Hook: resState.Hook,
if targetObjs[i] == nil {
resState.TargetState = "null"
// Set resource state to 'OutOfSync' since target resource is missing and live resource is unexpected
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
targetObjBytes, err := json.Marshal(targetObjs[i].Object)
if err != nil {
return nil, nil, nil, err
}
resState.TargetState = string(targetObjBytes)
}
resourceSummaries[i] = resState
if controlledLiveObj[i] == nil {
resState.LiveState = "null"
// Set resource state to 'OutOfSync' since target resource present but corresponding live resource is missing
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
liveObjBytes, err := json.Marshal(controlledLiveObj[i].Object)
if err != nil {
return nil, nil, nil, err
}
resState.LiveState = string(liveObjBytes)
}
resources[i] = resState
}
for i, resource := range resources {
liveResource := controlledLiveObj[i]
if liveResource != nil {
childResources, err := getChildren(liveResource, liveObjByFullName)
if err != nil {
return nil, nil, nil, err
}
resource.ChildLiveResources = childResources
resources[i] = resource
}
}
if failedToLoadObjs {
syncCode = v1alpha1.SyncStatusCodeUnknown
comparisonStatus = v1alpha1.ComparisonStatusUnknown
}
syncStatus := v1alpha1.SyncStatus{
ComparedTo: appv1.ComparedTo{
Source: source,
Destination: app.Spec.Destination,
},
Status: syncCode,
compResult := v1alpha1.ComparisonResult{
ComparedTo: app.Spec.Source,
ComparedAt: metav1.Time{Time: time.Now().UTC()},
Resources: resources,
Status: comparisonStatus,
}
if manifestInfo != nil {
syncStatus.Revision = manifestInfo.Revision
}
healthStatus, err := health.SetApplicationHealth(resourceSummaries, GetLiveObjs(managedResources), m.settings.ResourceOverrides)
if err != nil {
conditions = append(conditions, appv1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
}
compRes := comparisonResult{
reconciledAt: observedAt,
syncStatus: &syncStatus,
healthStatus: healthStatus,
resources: resourceSummaries,
managedResources: managedResources,
conditions: conditions,
hooks: hooks,
diffNormalizer: diffNormalizer,
}
if manifestInfo != nil {
compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
}
return &compRes, nil
return &compResult, manifestInfo, conditions, nil
}
func (m *appStateManager) getRepo(repoURL string) *v1alpha1.Repository {
repo, err := m.db.GetRepository(context.Background(), repoURL)
func hasParent(obj *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
return obj.GetKind() == kubeutil.EndpointsKind || metav1.GetControllerOf(obj) != nil
}
func isControlledBy(obj *unstructured.Unstructured, parent *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
if obj.GetKind() == kubeutil.EndpointsKind && parent.GetKind() == kubeutil.ServiceKind {
return obj.GetName() == parent.GetName()
}
return metav1.IsControlledBy(obj, parent)
}
func getChildren(parent *unstructured.Unstructured, liveObjByFullName map[string]*unstructured.Unstructured) ([]v1alpha1.ResourceNode, error) {
children := make([]v1alpha1.ResourceNode, 0)
for fullName, obj := range liveObjByFullName {
if isControlledBy(obj, parent) {
delete(liveObjByFullName, fullName)
childResource := v1alpha1.ResourceNode{}
json, err := json.Marshal(obj)
if err != nil {
return nil, err
}
childResource.State = string(json)
childResourceChildren, err := getChildren(obj, liveObjByFullName)
if err != nil {
return nil, err
}
childResource.Children = childResourceChildren
children = append(children, childResource)
}
}
return children, nil
}
func getResourceFullName(obj *unstructured.Unstructured) string {
return fmt.Sprintf("%s:%s", obj.GetKind(), obj.GetName())
}
func (s *ksonnetAppStateManager) getRepo(repoURL string) *v1alpha1.Repository {
repo, err := s.db.GetRepository(context.Background(), repoURL)
if err != nil {
// If we couldn't retrieve from the repo service, assume public repositories
repo = &v1alpha1.Repository{Repo: repoURL}
@@ -341,23 +369,31 @@ func (m *appStateManager) getRepo(repoURL string) *v1alpha1.Repository {
return repo
}
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource) error {
var nextID int64
func (s *ksonnetAppStateManager) persistDeploymentInfo(
app *v1alpha1.Application, revision string, envParams []*v1alpha1.ComponentParameter, overrides *[]v1alpha1.ComponentParameter) error {
params := make([]v1alpha1.ComponentParameter, len(envParams))
for i := range envParams {
param := *envParams[i]
params[i] = param
}
var nextID int64 = 0
if len(app.Status.History) > 0 {
nextID = app.Status.History[len(app.Status.History)-1].ID + 1
}
history := append(app.Status.History, v1alpha1.RevisionHistory{
Revision: revision,
DeployedAt: metav1.NewTime(time.Now().UTC()),
ID: nextID,
Source: source,
history := append(app.Status.History, v1alpha1.DeploymentInfo{
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
Revision: revision,
Params: params,
DeployedAt: metav1.NewTime(time.Now().UTC()),
ID: nextID,
})
if len(history) > common.RevisionHistoryLimit {
history = history[1 : common.RevisionHistoryLimit+1]
if len(history) > maxHistoryCnt {
history = history[1 : maxHistoryCnt+1]
}
patch, err := json.Marshal(map[string]map[string][]v1alpha1.RevisionHistory{
patch, err := json.Marshal(map[string]map[string][]v1alpha1.DeploymentInfo{
"status": {
"history": history,
},
@@ -365,7 +401,7 @@ func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revi
if err != nil {
return err
}
_, err = m.appclientset.ArgoprojV1alpha1().Applications(m.namespace).Patch(app.Name, types.MergePatchType, patch)
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.namespace).Patch(app.Name, types.MergePatchType, patch)
return err
}
@@ -375,19 +411,11 @@ func NewAppStateManager(
appclientset appclientset.Interface,
repoClientset reposerver.Clientset,
namespace string,
kubectl kubeutil.Kubectl,
settings *settings.ArgoCDSettings,
liveStateCache statecache.LiveStateCache,
projInformer cache.SharedIndexInformer,
) AppStateManager {
return &appStateManager{
liveStateCache: liveStateCache,
db: db,
appclientset: appclientset,
kubectl: kubectl,
repoClientset: repoClientset,
namespace: namespace,
settings: settings,
projInformer: projInformer,
return &ksonnetAppStateManager{
db: db,
appclientset: appclientset,
repoClientset: repoClientset,
namespace: namespace,
}
}

View File

@@ -1,180 +0,0 @@
package controller
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/kube"
)
// TestCompareAppStateEmpty tests comparison when both git and live have no objects
func TestCompareAppStateEmpty(t *testing.T) {
app := newFakeApp()
data := fakeData{
manifestResponse: &repository.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 0, len(compRes.resources))
assert.Equal(t, 0, len(compRes.managedResources))
assert.Equal(t, 0, len(compRes.conditions))
}
// TestCompareAppStateMissing tests when there is a manifest defined in git which doesn't exist in live
func TestCompareAppStateMissing(t *testing.T) {
app := newFakeApp()
data := fakeData{
apps: []runtime.Object{app},
manifestResponse: &repository.ManifestResponse{
Manifests: []string{string(test.PodManifest)},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
assert.Equal(t, 1, len(compRes.managedResources))
assert.Equal(t, 0, len(compRes.conditions))
}
// TestCompareAppStateExtra tests when there is an extra object in live but not defined in git
func TestCompareAppStateExtra(t *testing.T) {
pod := test.NewPod()
pod.SetNamespace(test.FakeDestNamespace)
app := newFakeApp()
key := kube.ResourceKey{Group: "", Kind: "Pod", Namespace: test.FakeDestNamespace, Name: app.Name}
data := fakeData{
manifestResponse: &repository.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
key: pod,
},
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
assert.Equal(t, 1, len(compRes.managedResources))
assert.Equal(t, 0, len(compRes.conditions))
}
// TestCompareAppStateHook checks that hooks are detected during manifest generation, and not
// considered as part of resources when assessing Synced status
func TestCompareAppStateHook(t *testing.T) {
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
podBytes, _ := json.Marshal(pod)
app := newFakeApp()
data := fakeData{
apps: []runtime.Object{app},
manifestResponse: &repository.ManifestResponse{
Manifests: []string{string(podBytes)},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 0, len(compRes.resources))
assert.Equal(t, 0, len(compRes.managedResources))
assert.Equal(t, 0, len(compRes.conditions))
}
// TestCompareAppStateExtraHook tests when there is an extra _hook_ object in live but not defined in git
func TestCompareAppStateExtraHook(t *testing.T) {
pod := test.NewPod()
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
pod.SetNamespace(test.FakeDestNamespace)
app := newFakeApp()
key := kube.ResourceKey{Group: "", Kind: "Pod", Namespace: test.FakeDestNamespace, Name: app.Name}
data := fakeData{
manifestResponse: &repository.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
key: pod,
},
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
assert.Equal(t, 1, len(compRes.managedResources))
assert.Equal(t, 0, len(compRes.conditions))
}
func toJSON(t *testing.T, obj *unstructured.Unstructured) string {
data, err := json.Marshal(obj)
assert.NoError(t, err)
return string(data)
}
func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
obj1 := test.NewPod()
obj1.SetNamespace(test.FakeDestNamespace)
obj2 := test.NewPod()
obj3 := test.NewPod()
obj3.SetNamespace("kube-system")
app := newFakeApp()
data := fakeData{
manifestResponse: &repository.ManifestResponse{
Manifests: []string{toJSON(t, obj1), toJSON(t, obj2), toJSON(t, obj3)},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(obj1): obj1,
kube.GetResourceKey(obj3): obj3,
},
}
ctrl := newFakeController(&data)
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
assert.NoError(t, err)
assert.NotNil(t, compRes)
assert.Contains(t, compRes.conditions, argoappv1.ApplicationCondition{
Message: "Resource /Pod/fake-dest-ns/my-pod appeared 2 times among application resources.",
Type: argoappv1.ApplicationConditionRepeatedResourceWarning,
})
assert.Equal(t, 2, len(compRes.resources))
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,66 +0,0 @@
package controller
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/kube/kubetest"
)
var clusterRoleHook = `
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRole",
"metadata": {
"name": "cluster-role-hook",
"annotations": {
"argocd.argoproj.io/hook": "PostSync"
}
}
}`
func TestSyncHookProjectPermissions(t *testing.T) {
syncCtx := newTestSyncCtx(&v1.APIResourceList{
GroupVersion: "v1",
APIResources: []v1.APIResource{
{Name: "pod", Namespaced: true, Kind: "Pod", Group: "v1"},
},
}, &v1.APIResourceList{
GroupVersion: "rbac.authorization.k8s.io/v1",
APIResources: []v1.APIResource{
{Name: "clusterroles", Namespaced: false, Kind: "ClusterRole", Group: "rbac.authorization.k8s.io"},
},
})
syncCtx.kubectl = kubetest.MockKubectlCmd{}
crHook, _ := v1alpha1.UnmarshalToUnstructured(clusterRoleHook)
syncCtx.compareResult = &comparisonResult{
hooks: []*unstructured.Unstructured{
crHook,
},
managedResources: []managedResource{{
Target: test.NewPod(),
}},
}
syncCtx.proj.Spec.ClusterResourceWhitelist = []v1.GroupKind{}
syncCtx.syncOp.SyncStrategy = nil
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationFailed, syncCtx.opState.Phase)
assert.Len(t, syncCtx.syncRes.Resources, 0)
assert.Contains(t, syncCtx.opState.Message, "not permitted in project")
// Now add the resource to the whitelist and try again. Resource should be created
syncCtx.proj.Spec.ClusterResourceWhitelist = []v1.GroupKind{
{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
}
syncCtx.syncOp.SyncStrategy = nil
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[0].Status)
}

View File

@@ -1,560 +0,0 @@
package controller
import (
"fmt"
"reflect"
"strings"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
apiv1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/batch"
"github.com/argoproj/argo-cd/common"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
hookutil "github.com/argoproj/argo-cd/util/hook"
"github.com/argoproj/argo-cd/util/kube"
)
// doHookSync initiates (or continues) a hook-based sync. This method will be invoked when there may
// already be in-flight (potentially incomplete) jobs/workflows, and should be idempotent.
func (sc *syncContext) doHookSync(syncTasks []syncTask, hooks []*unstructured.Unstructured) {
if !sc.startedPreSyncPhase() {
if !sc.verifyPermittedHooks(hooks) {
return
}
}
// 1. Run PreSync hooks
if !sc.runHooks(hooks, appv1.HookTypePreSync) {
return
}
// 2. Run Sync hooks (e.g. blue-green sync workflow)
// Before performing Sync hooks, apply any normal manifests which aren't annotated with a hook.
// We only want to do this once per operation.
shouldContinue := true
if !sc.startedSyncPhase() {
if !sc.syncNonHookTasks(syncTasks) {
sc.setOperationPhase(appv1.OperationFailed, "one or more objects failed to apply")
return
}
shouldContinue = false
}
if !sc.runHooks(hooks, appv1.HookTypeSync) {
shouldContinue = false
}
if !shouldContinue {
return
}
// 3. Run PostSync hooks
// Before running PostSync hooks, we want to make rollout is complete (app is healthy). If we
// already started the post-sync phase, then we do not need to perform the health check.
postSyncHooks, _ := sc.getHooks(appv1.HookTypePostSync)
if len(postSyncHooks) > 0 && !sc.startedPostSyncPhase() {
sc.log.Infof("PostSync application health check: %s", sc.compareResult.healthStatus.Status)
if sc.compareResult.healthStatus.Status != appv1.HealthStatusHealthy {
sc.setOperationPhase(appv1.OperationRunning, fmt.Sprintf("waiting for %s state to run %s hooks (current health: %s)",
appv1.HealthStatusHealthy, appv1.HookTypePostSync, sc.compareResult.healthStatus.Status))
return
}
}
if !sc.runHooks(hooks, appv1.HookTypePostSync) {
return
}
// if we get here, all hooks successfully completed
sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced")
}
// verifyPermittedHooks verifies all hooks are permitted in the project
func (sc *syncContext) verifyPermittedHooks(hooks []*unstructured.Unstructured) bool {
for _, hook := range hooks {
gvk := hook.GroupVersionKind()
serverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
if err != nil {
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("unable to identify api resource type: %v", gvk))
return false
}
if !sc.proj.IsResourcePermitted(metav1.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, serverRes.Namespaced) {
sc.setOperationPhase(appv1.OperationFailed, fmt.Sprintf("Hook resource %s:%s is not permitted in project %s", gvk.Group, gvk.Kind, sc.proj.Name))
return false
}
if serverRes.Namespaced && !sc.proj.IsDestinationPermitted(appv1.ApplicationDestination{Namespace: hook.GetNamespace(), Server: sc.server}) {
gvk := hook.GroupVersionKind()
sc.setResourceDetails(&appv1.ResourceResult{
Name: hook.GetName(),
Group: gvk.Group,
Version: gvk.Version,
Kind: hook.GetKind(),
Namespace: hook.GetNamespace(),
Message: fmt.Sprintf("namespace %v is not permitted in project '%s'", hook.GetNamespace(), sc.proj.Name),
Status: appv1.ResultCodeSyncFailed,
})
return false
}
}
return true
}
// getHooks returns all Argo CD hooks, optionally filtered by ones of the specific type(s)
func (sc *syncContext) getHooks(hookTypes ...appv1.HookType) ([]*unstructured.Unstructured, error) {
var hooks []*unstructured.Unstructured
for _, hook := range sc.compareResult.hooks {
if hook.GetNamespace() == "" {
hook.SetNamespace(sc.namespace)
}
if !hookutil.IsArgoHook(hook) {
// TODO: in the future, if we want to map helm hooks to Argo CD lifecycles, we should
// include helm hooks in the returned list
continue
}
if len(hookTypes) > 0 {
match := false
for _, desiredType := range hookTypes {
if isHookType(hook, desiredType) {
match = true
break
}
}
if !match {
continue
}
}
hooks = append(hooks, hook)
}
return hooks, nil
}
// runHooks iterates & filters the target manifests for resources of the specified hook type, then
// creates the resource. Updates the sc.opRes.hooks with the current status. Returns whether or not
// we should continue to the next hook phase.
func (sc *syncContext) runHooks(hooks []*unstructured.Unstructured, hookType appv1.HookType) bool {
shouldContinue := true
for _, hook := range hooks {
if hookType == appv1.HookTypeSync && isHookType(hook, appv1.HookTypeSkip) {
// If we get here, we are invoking all sync hooks and reached a resource that is
// annotated with the Skip hook. This will update the resource details to indicate it
// was skipped due to annotation
gvk := hook.GroupVersionKind()
sc.setResourceDetails(&appv1.ResourceResult{
Name: hook.GetName(),
Group: gvk.Group,
Version: gvk.Version,
Kind: hook.GetKind(),
Namespace: hook.GetNamespace(),
Message: "Skipped",
})
continue
}
if !isHookType(hook, hookType) {
continue
}
updated, err := sc.runHook(hook, hookType)
if err != nil {
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("%s hook error: %v", hookType, err))
return false
}
if updated {
// If the result of running a hook, caused us to modify hook resource state, we should
// not proceed to the next hook phase. This is because before proceeding to the next
// phase, we want a full health assessment to happen. By returning early, we allow
// the application to get requeued into the controller workqueue, and on the next
// process iteration, a new CompareAppState() will be performed to get the most
// up-to-date live state. This enables us to accurately wait for an application to
// become Healthy before proceeding to run PostSync tasks.
shouldContinue = false
}
}
if !shouldContinue {
sc.log.Infof("Stopping after %s phase due to modifications to hook resource state", hookType)
return false
}
completed, successful := areHooksCompletedSuccessful(hookType, sc.syncRes.Resources)
if !completed {
return false
}
if !successful {
sc.setOperationPhase(appv1.OperationFailed, fmt.Sprintf("%s hook failed", hookType))
return false
}
return true
}
// syncNonHookTasks syncs or prunes the objects that are not handled by hooks using an apply sync.
// returns true if the sync was successful
func (sc *syncContext) syncNonHookTasks(syncTasks []syncTask) bool {
var nonHookTasks []syncTask
for _, task := range syncTasks {
if task.targetObj == nil {
nonHookTasks = append(nonHookTasks, task)
} else {
annotations := task.targetObj.GetAnnotations()
if annotations != nil && annotations[common.AnnotationKeyHook] != "" {
// we are doing a hook sync and this resource is annotated with a hook annotation
continue
}
// if we get here, this resource does not have any hook annotation so we
// should perform an `kubectl apply`
nonHookTasks = append(nonHookTasks, task)
}
}
return sc.doApplySync(nonHookTasks, false, sc.syncOp.SyncStrategy.Hook.Force, true)
}
// runHook runs the supplied hook and updates the hook status. Returns true if the result of
// invoking this method resulted in changes to any hook status
func (sc *syncContext) runHook(hook *unstructured.Unstructured, hookType appv1.HookType) (bool, error) {
// Hook resources names are deterministic, whether they are defined by the user (metadata.name),
// or formulated at the time of the operation (metadata.generateName). If user specifies
// metadata.generateName, then we will generate a formulated metadata.name before submission.
if hook.GetName() == "" {
postfix := strings.ToLower(fmt.Sprintf("%s-%s-%d", sc.syncRes.Revision[0:7], hookType, sc.opState.StartedAt.UTC().Unix()))
generatedName := hook.GetGenerateName()
hook = hook.DeepCopy()
hook.SetName(fmt.Sprintf("%s%s", generatedName, postfix))
}
// Check our hook statuses to see if we already completed this hook.
// If so, this method is a noop
prevStatus := sc.getHookStatus(hook, hookType)
if prevStatus != nil && prevStatus.HookPhase.Completed() {
return false, nil
}
gvk := hook.GroupVersionKind()
apiResource, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
if err != nil {
return false, err
}
resource := kube.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
resIf := kube.ToResourceInterface(sc.dynamicIf, apiResource, resource, hook.GetNamespace())
var liveObj *unstructured.Unstructured
existing, err := resIf.Get(hook.GetName(), metav1.GetOptions{})
if err != nil {
if !apierr.IsNotFound(err) {
return false, fmt.Errorf("Failed to get status of %s hook %s '%s': %v", hookType, gvk, hook.GetName(), err)
}
_, err := sc.kubectl.ApplyResource(sc.config, hook, hook.GetNamespace(), false, false)
if err != nil {
return false, fmt.Errorf("Failed to create %s hook %s '%s': %v", hookType, gvk, hook.GetName(), err)
}
created, err := resIf.Get(hook.GetName(), metav1.GetOptions{})
if err != nil {
return true, fmt.Errorf("Failed to get status of %s hook %s '%s': %v", hookType, gvk, hook.GetName(), err)
}
sc.log.Infof("%s hook %s '%s' created", hookType, gvk, created.GetName())
sc.setOperationPhase(appv1.OperationRunning, fmt.Sprintf("running %s hooks", hookType))
liveObj = created
} else {
liveObj = existing
}
hookStatus := newHookStatus(liveObj, hookType)
if hookStatus.HookPhase.Completed() {
if enforceHookDeletePolicy(hook, hookStatus.HookPhase) {
err = sc.deleteHook(hook.GetName(), hook.GetNamespace(), hook.GroupVersionKind())
if err != nil {
hookStatus.HookPhase = appv1.OperationFailed
hookStatus.Message = fmt.Sprintf("failed to delete %s hook: %v", hookStatus.HookPhase, err)
}
}
}
return sc.updateHookStatus(hookStatus), nil
}
// enforceHookDeletePolicy examines the hook deletion policy of a object and deletes it based on the status
func enforceHookDeletePolicy(hook *unstructured.Unstructured, phase appv1.OperationPhase) bool {
annotations := hook.GetAnnotations()
if annotations == nil {
return false
}
deletePolicies := strings.Split(annotations[common.AnnotationKeyHookDeletePolicy], ",")
for _, dp := range deletePolicies {
policy := appv1.HookDeletePolicy(strings.TrimSpace(dp))
if policy == appv1.HookDeletePolicyHookSucceeded && phase == appv1.OperationSucceeded {
return true
}
if policy == appv1.HookDeletePolicyHookFailed && phase == appv1.OperationFailed {
return true
}
}
return false
}
// isHookType tells whether or not the supplied object is a hook of the specified type
func isHookType(hook *unstructured.Unstructured, hookType appv1.HookType) bool {
annotations := hook.GetAnnotations()
if annotations == nil {
return false
}
resHookTypes := strings.Split(annotations[common.AnnotationKeyHook], ",")
for _, ht := range resHookTypes {
if string(hookType) == strings.TrimSpace(ht) {
return true
}
}
return false
}
// newHookStatus returns a hook status from an _live_ unstructured object
func newHookStatus(hook *unstructured.Unstructured, hookType appv1.HookType) appv1.ResourceResult {
gvk := hook.GroupVersionKind()
hookStatus := appv1.ResourceResult{
Name: hook.GetName(),
Kind: hook.GetKind(),
Group: gvk.Group,
Version: gvk.Version,
HookType: hookType,
HookPhase: appv1.OperationRunning,
Namespace: hook.GetNamespace(),
}
if isBatchJob(gvk) {
updateStatusFromBatchJob(hook, &hookStatus)
} else if isArgoWorkflow(gvk) {
updateStatusFromArgoWorkflow(hook, &hookStatus)
} else if isPod(gvk) {
updateStatusFromPod(hook, &hookStatus)
} else {
hookStatus.HookPhase = appv1.OperationSucceeded
hookStatus.Message = fmt.Sprintf("%s created", hook.GetName())
}
return hookStatus
}
// isRunnable returns if the resource object is a runnable type which needs to be terminated
func isRunnable(res *appv1.ResourceResult) bool {
gvk := res.GroupVersionKind()
return isBatchJob(gvk) || isArgoWorkflow(gvk) || isPod(gvk)
}
func isBatchJob(gvk schema.GroupVersionKind) bool {
return gvk.Group == "batch" && gvk.Kind == "Job"
}
func updateStatusFromBatchJob(hook *unstructured.Unstructured, hookStatus *appv1.ResourceResult) {
var job batch.Job
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &job)
if err != nil {
hookStatus.HookPhase = appv1.OperationError
hookStatus.Message = err.Error()
return
}
failed := false
var failMsg string
complete := false
var message string
for _, condition := range job.Status.Conditions {
switch condition.Type {
case batch.JobFailed:
failed = true
complete = true
failMsg = condition.Message
case batch.JobComplete:
complete = true
message = condition.Message
}
}
if !complete {
hookStatus.HookPhase = appv1.OperationRunning
hookStatus.Message = message
} else if failed {
hookStatus.HookPhase = appv1.OperationFailed
hookStatus.Message = failMsg
} else {
hookStatus.HookPhase = appv1.OperationSucceeded
hookStatus.Message = message
}
}
func isArgoWorkflow(gvk schema.GroupVersionKind) bool {
return gvk.Group == "argoproj.io" && gvk.Kind == "Workflow"
}
func updateStatusFromArgoWorkflow(hook *unstructured.Unstructured, hookStatus *appv1.ResourceResult) {
var wf wfv1.Workflow
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &wf)
if err != nil {
hookStatus.HookPhase = appv1.OperationError
hookStatus.Message = err.Error()
return
}
switch wf.Status.Phase {
case wfv1.NodePending, wfv1.NodeRunning:
hookStatus.HookPhase = appv1.OperationRunning
case wfv1.NodeSucceeded:
hookStatus.HookPhase = appv1.OperationSucceeded
case wfv1.NodeFailed:
hookStatus.HookPhase = appv1.OperationFailed
case wfv1.NodeError:
hookStatus.HookPhase = appv1.OperationError
}
hookStatus.Message = wf.Status.Message
}
func isPod(gvk schema.GroupVersionKind) bool {
return gvk.Group == "" && gvk.Kind == "Pod"
}
func updateStatusFromPod(hook *unstructured.Unstructured, hookStatus *appv1.ResourceResult) {
var pod apiv1.Pod
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &pod)
if err != nil {
hookStatus.HookPhase = appv1.OperationError
hookStatus.Message = err.Error()
return
}
getFailMessage := func(ctr *apiv1.ContainerStatus) string {
if ctr.State.Terminated != nil {
if ctr.State.Terminated.Message != "" {
return ctr.State.Terminated.Message
}
if ctr.State.Terminated.Reason == "OOMKilled" {
return ctr.State.Terminated.Reason
}
if ctr.State.Terminated.ExitCode != 0 {
return fmt.Sprintf("container %q failed with exit code %d", ctr.Name, ctr.State.Terminated.ExitCode)
}
}
return ""
}
switch pod.Status.Phase {
case apiv1.PodPending, apiv1.PodRunning:
hookStatus.HookPhase = appv1.OperationRunning
case apiv1.PodSucceeded:
hookStatus.HookPhase = appv1.OperationSucceeded
case apiv1.PodFailed:
hookStatus.HookPhase = appv1.OperationFailed
if pod.Status.Message != "" {
// Pod has a nice error message. Use that.
hookStatus.Message = pod.Status.Message
return
}
for _, ctr := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) {
if msg := getFailMessage(&ctr); msg != "" {
hookStatus.Message = msg
return
}
}
case apiv1.PodUnknown:
hookStatus.HookPhase = appv1.OperationError
}
}
func (sc *syncContext) getHookStatus(hookObj *unstructured.Unstructured, hookType appv1.HookType) *appv1.ResourceResult {
for _, hr := range sc.syncRes.Resources {
if !hr.IsHook() {
continue
}
ns := util.FirstNonEmpty(hookObj.GetNamespace(), sc.namespace)
if hookEqual(hr, hookObj.GroupVersionKind().Group, hookObj.GetKind(), ns, hookObj.GetName(), hookType) {
return hr
}
}
return nil
}
func hookEqual(hr *appv1.ResourceResult, group, kind, namespace, name string, hookType appv1.HookType) bool {
return bool(
hr.Group == group &&
hr.Kind == kind &&
hr.Namespace == namespace &&
hr.Name == name &&
hr.HookType == hookType)
}
// updateHookStatus updates the status of a hook. Returns true if the hook was modified
func (sc *syncContext) updateHookStatus(hookStatus appv1.ResourceResult) bool {
sc.lock.Lock()
defer sc.lock.Unlock()
for i, prev := range sc.syncRes.Resources {
if !prev.IsHook() {
continue
}
if hookEqual(prev, hookStatus.Group, hookStatus.Kind, hookStatus.Namespace, hookStatus.Name, hookStatus.HookType) {
if reflect.DeepEqual(prev, hookStatus) {
return false
}
if prev.HookPhase != hookStatus.HookPhase {
sc.log.Infof("Hook %s %s/%s hookPhase: %s -> %s", hookStatus.HookType, prev.Kind, prev.Name, prev.HookPhase, hookStatus.HookPhase)
}
if prev.Status != hookStatus.Status {
sc.log.Infof("Hook %s %s/%s status: %s -> %s", hookStatus.HookType, prev.Kind, prev.Name, prev.Status, hookStatus.Status)
}
if prev.Message != hookStatus.Message {
sc.log.Infof("Hook %s %s/%s message: '%s' -> '%s'", hookStatus.HookType, prev.Kind, prev.Name, prev.Message, hookStatus.Message)
}
sc.syncRes.Resources[i] = &hookStatus
return true
}
}
sc.syncRes.Resources = append(sc.syncRes.Resources, &hookStatus)
sc.log.Infof("Set new hook %s %s/%s. phase: %s, message: %s", hookStatus.HookType, hookStatus.Kind, hookStatus.Name, hookStatus.HookPhase, hookStatus.Message)
return true
}
// areHooksCompletedSuccessful checks if all the hooks of the specified type are completed and successful
func areHooksCompletedSuccessful(hookType appv1.HookType, hookStatuses []*appv1.ResourceResult) (bool, bool) {
isSuccessful := true
for _, hookStatus := range hookStatuses {
if !hookStatus.IsHook() {
continue
}
if hookStatus.HookType != hookType {
continue
}
if !hookStatus.HookPhase.Completed() {
return false, false
}
if !hookStatus.HookPhase.Successful() {
isSuccessful = false
}
}
return true, isSuccessful
}
// terminate looks for any running jobs/workflow hooks and deletes the resource
func (sc *syncContext) terminate() {
terminateSuccessful := true
for _, hookStatus := range sc.syncRes.Resources {
if !hookStatus.IsHook() {
continue
}
if hookStatus.HookPhase.Completed() {
continue
}
if isRunnable(hookStatus) {
hookStatus.HookPhase = appv1.OperationFailed
err := sc.deleteHook(hookStatus.Name, hookStatus.Namespace, hookStatus.GroupVersionKind())
if err != nil {
hookStatus.Message = fmt.Sprintf("Failed to delete %s hook %s/%s: %v", hookStatus.HookType, hookStatus.Kind, hookStatus.Name, err)
terminateSuccessful = false
} else {
hookStatus.Message = fmt.Sprintf("Deleted %s hook %s/%s", hookStatus.HookType, hookStatus.Kind, hookStatus.Name)
}
sc.updateHookStatus(*hookStatus)
}
}
if terminateSuccessful {
sc.setOperationPhase(appv1.OperationFailed, "Operation terminated")
} else {
sc.setOperationPhase(appv1.OperationError, "Operation termination had errors")
}
}
func (sc *syncContext) deleteHook(name, namespace string, gvk schema.GroupVersionKind) error {
apiResource, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
if err != nil {
return err
}
resource := kube.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
resIf := kube.ToResourceInterface(sc.dynamicIf, apiResource, resource, namespace)
propagationPolicy := metav1.DeletePropagationForeground
return resIf.Delete(name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
}

View File

@@ -1,528 +1,26 @@
package controller
import (
"fmt"
"sort"
"testing"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
fakedisco "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/rest"
testcore "k8s.io/client-go/testing"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/kube/kubetest"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/rest"
)
func newTestSyncCtx(resources ...*v1.APIResourceList) *syncContext {
fakeDisco := &fakedisco.FakeDiscovery{Fake: &testcore.Fake{}}
fakeDisco.Resources = append(resources,
&v1.APIResourceList{
GroupVersion: "v1",
APIResources: []v1.APIResource{
{Kind: "Pod", Group: "", Version: "v1", Namespaced: true},
{Kind: "Service", Group: "", Version: "v1", Namespaced: true},
},
},
&v1.APIResourceList{
GroupVersion: "apps/v1",
APIResources: []v1.APIResource{
{Kind: "Deployment", Group: "apps", Version: "v1", Namespaced: true},
},
})
sc := syncContext{
config: &rest.Config{},
namespace: test.FakeArgoCDNamespace,
server: test.FakeClusterURL,
syncRes: &v1alpha1.SyncOperationResult{},
syncOp: &v1alpha1.SyncOperation{
Prune: true,
SyncStrategy: &v1alpha1.SyncStrategy{
Apply: &v1alpha1.SyncStrategyApply{},
},
},
proj: &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: v1alpha1.AppProjectSpec{
Destinations: []v1alpha1.ApplicationDestination{{
Server: test.FakeClusterURL,
Namespace: test.FakeArgoCDNamespace,
}},
ClusterResourceWhitelist: []v1.GroupKind{
{Group: "*", Kind: "*"},
},
},
},
opState: &v1alpha1.OperationState{},
disco: fakeDisco,
log: log.WithFields(log.Fields{"application": "fake-app"}),
}
sc.kubectl = kubetest.MockKubectlCmd{}
return &sc
}
func TestSyncNotPermittedNamespace(t *testing.T) {
syncCtx := newTestSyncCtx()
targetPod := test.NewPod()
targetPod.SetNamespace("kube-system")
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: targetPod,
}, {
Live: nil,
Target: test.NewService(),
}},
}
syncCtx.sync()
assert.Equal(t, v1alpha1.OperationFailed, syncCtx.opState.Phase)
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
}
func TestSyncCreateInSortedOrder(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: test.NewPod(),
}, {
Live: nil,
Target: test.NewService(),
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 2)
for i := range syncCtx.syncRes.Resources {
if syncCtx.syncRes.Resources[i].Kind == "Pod" {
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[i].Status)
} else if syncCtx.syncRes.Resources[i].Kind == "Service" {
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[i].Status)
} else {
t.Error("Resource isn't a pod or a service")
}
}
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestSyncCreateNotWhitelistedClusterResources(t *testing.T) {
syncCtx := newTestSyncCtx(&v1.APIResourceList{
GroupVersion: v1alpha1.SchemeGroupVersion.String(),
APIResources: []v1.APIResource{
{Name: "workflows", Namespaced: false, Kind: "Workflow", Group: "argoproj.io"},
{Name: "application", Namespaced: false, Kind: "Application", Group: "argoproj.io"},
},
}, &v1.APIResourceList{
GroupVersion: "rbac.authorization.k8s.io/v1",
APIResources: []v1.APIResource{
{Name: "clusterroles", Namespaced: false, Kind: "ClusterRole", Group: "rbac.authorization.k8s.io"},
},
})
syncCtx.proj.Spec.ClusterResourceWhitelist = []v1.GroupKind{
{Group: "argoproj.io", Kind: "*"},
}
syncCtx.kubectl = kubetest.MockKubectlCmd{}
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: kube.MustToUnstructured(&rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{Name: "argo-ui-cluster-role"}}),
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
}
func TestSyncBlacklistedNamespacedResources(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.proj.Spec.NamespaceResourceBlacklist = []v1.GroupKind{
{Group: "*", Kind: "Deployment"},
}
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: test.NewDeployment(),
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
}
func TestSyncSuccessfully(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: test.NewService(),
}, {
Live: test.NewPod(),
Target: nil,
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 2)
for i := range syncCtx.syncRes.Resources {
if syncCtx.syncRes.Resources[i].Kind == "Pod" {
assert.Equal(t, v1alpha1.ResultCodePruned, syncCtx.syncRes.Resources[i].Status)
} else if syncCtx.syncRes.Resources[i].Kind == "Service" {
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[i].Status)
} else {
t.Error("Resource isn't a pod or a service")
}
}
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestSyncDeleteSuccessfully(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: test.NewService(),
Target: nil,
}, {
Live: test.NewPod(),
Target: nil,
}},
}
syncCtx.sync()
for i := range syncCtx.syncRes.Resources {
if syncCtx.syncRes.Resources[i].Kind == "Pod" {
assert.Equal(t, v1alpha1.ResultCodePruned, syncCtx.syncRes.Resources[i].Status)
} else if syncCtx.syncRes.Resources[i].Kind == "Service" {
assert.Equal(t, v1alpha1.ResultCodePruned, syncCtx.syncRes.Resources[i].Status)
} else {
t.Error("Resource isn't a pod or a service")
}
}
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestSyncCreateFailure(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = kubetest.MockKubectlCmd{
Commands: map[string]kubetest.KubectlOutput{
"test-service": {
Output: "",
Err: fmt.Errorf("error: error validating \"test.yaml\": error validating data: apiVersion not set; if you choose to ignore these errors, turn validation off with --validate=false"),
},
},
}
testSvc := test.NewService()
testSvc.SetAPIVersion("")
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: testSvc,
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
}
func TestSyncPruneFailure(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = kubetest.MockKubectlCmd{
Commands: map[string]kubetest.KubectlOutput{
"test-service": {
Output: "",
Err: fmt.Errorf(" error: timed out waiting for \"test-service\" to be synced"),
},
},
}
testSvc := test.NewService()
testSvc.SetName("test-service")
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: testSvc,
Target: nil,
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
}
func unsortedManifest() []syncTask {
return []syncTask{
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Pod",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "PersistentVolume",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "ConfigMap",
},
},
},
func newTestSyncCtx() *syncContext {
return &syncContext{
comparison: &v1alpha1.ComparisonResult{},
config: &rest.Config{},
namespace: "test-namespace",
syncOp: &v1alpha1.SyncOperation{},
opState: &v1alpha1.OperationState{},
log: log.WithFields(log.Fields{"application": "fake-app"}),
}
}
func sortedManifest() []syncTask {
return []syncTask{
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "ConfigMap",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "PersistentVolume",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Pod",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
},
},
},
}
}
func TestSortKubernetesResourcesSuccessfully(t *testing.T) {
unsorted := unsortedManifest()
ks := newKindSorter(unsorted, resourceOrder)
sort.Sort(ks)
expectedOrder := sortedManifest()
assert.Equal(t, len(unsorted), len(expectedOrder))
for i, sorted := range unsorted {
assert.Equal(t, expectedOrder[i], sorted)
}
func TestRunWorkflows(t *testing.T) {
// syncCtx := newTestSyncCtx()
// syncCtx.doWorkflowSync(nil, nil)
}
func TestSortManifestHandleNil(t *testing.T) {
task := syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
}
manifest := []syncTask{
{},
task,
}
ks := newKindSorter(manifest, resourceOrder)
sort.Sort(ks)
assert.Equal(t, task, manifest[0])
assert.Nil(t, manifest[1].targetObj)
}
func TestSyncNamespaceAgainstCRD(t *testing.T) {
crd := syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": "argoproj.io/alpha1",
"kind": "Workflow",
},
}}
namespace := syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Namespace",
},
},
}
unsorted := []syncTask{crd, namespace}
ks := newKindSorter(unsorted, resourceOrder)
sort.Sort(ks)
expectedOrder := []syncTask{namespace, crd}
assert.Equal(t, len(unsorted), len(expectedOrder))
for i, sorted := range unsorted {
assert.Equal(t, expectedOrder[i], sorted)
}
}
func TestDontSyncOrPruneHooks(t *testing.T) {
syncCtx := newTestSyncCtx()
targetPod := test.NewPod()
targetPod.SetName("dont-create-me")
targetPod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
liveSvc := test.NewService()
liveSvc.SetName("dont-prune-me")
liveSvc.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
syncCtx.compareResult = &comparisonResult{
managedResources: []managedResource{{
Live: nil,
Target: targetPod,
Hook: true,
}, {
Live: liveSvc,
Target: nil,
Hook: true,
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 0)
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestPersistRevisionHistory(t *testing.T) {
app := newFakeApp()
app.Status.OperationState = nil
app.Status.History = nil
defaultProject := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
Name: "default",
},
}
data := fakeData{
apps: []runtime.Object{app, defaultProject},
manifestResponse: &repository.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
}}
ctrl.appStateManager.SyncAppState(app, opState)
// Ensure we record spec.source into sync result
assert.Equal(t, app.Spec.Source, opState.SyncResult.Source)
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, v1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, 1, len(updatedApp.Status.History))
assert.Equal(t, app.Spec.Source, updatedApp.Status.History[0].Source)
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
}
func TestPersistRevisionHistoryRollback(t *testing.T) {
app := newFakeApp()
app.Status.OperationState = nil
app.Status.History = nil
defaultProject := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
Name: "default",
},
}
data := fakeData{
apps: []runtime.Object{app, defaultProject},
manifestResponse: &repository.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data)
// Sync with source specified
source := v1alpha1.ApplicationSource{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "test",
Value: "123",
},
},
},
}
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &source,
},
}}
ctrl.appStateManager.SyncAppState(app, opState)
// Ensure we record opState's source into sync result
assert.Equal(t, source, opState.SyncResult.Source)
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, v1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, 1, len(updatedApp.Status.History))
assert.Equal(t, source, updatedApp.Status.History[0].Source)
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
}

View File

@@ -1,4 +1,4 @@
# Argo CD Documentation
# ArgoCD Documentation
## [Getting Started](getting_started.md)
@@ -6,33 +6,11 @@
* [Architecture](architecture.md)
* [Tracking Strategies](tracking_strategies.md)
## Quick Reference
| Name | Kind | Description |
|------|------|-------------|
| [`argocd-cm.yaml`](argocd-cm.yaml) | ConfigMap | General Argo CD configuration |
| [`argocd-secret.yaml`](argocd-secret.yaml) | Secret | Password, Certificates, Signing Key |
| [`argocd-rbac-cm.yaml`](argocd-rbac-cm.yaml) | ConfigMap | RBAC Configuration |
| [`application.yaml`](application.yaml) | Application | Example application spec |
| [`project.yaml`](argocd-rbac-cm.yaml) | AppProject | Example project spec |
## Features
* [Application Sources](application_sources.md)
* [Application Parameters](parameters.md)
* [Projects](projects.md)
* [Automated Sync](auto_sync.md)
* [Resource Health](health.md)
* [Resource Hooks](resource_hooks.md)
* [Resource Diffing](diffing.md)
* [Single Sign On](sso.md)
* [Webhooks](webhook.md)
* [RBAC](rbac.md)
* [Declarative Setup](declarative-setup.md)
* [Prometheus Metrics](metrics.md)
* [Custom Tooling](custom_tools.md)
## Other
* [Security](security.md)
* [Best Practices](best_practices.md)
* [Configuring Ingress](ingress.md)
* [Integration with CI Pipelines](ci_automation.md)
* [F.A.Q.](faq.md)
* [RBAC](rbac.md)

View File

@@ -1,61 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: guestbook
spec:
# The project the application belongs to.
project: default
# Source of the application manifests
source:
repoURL: https://github.com/argoproj/argocd-example-apps.git
targetRevision: HEAD
path: guestbook
# helm specific config
helm:
valueFiles:
- values-prod.yaml
# kustomize specific config
kustomize:
namePrefix: prod-
# directory
directory:
recurse: true
jsonnet:
# A list of Jsonnet External Variables
extVars:
- name: foo
value: bar
# You can use "code to determine if the value is either string (false, the default) or Jsonnet code (if code is true).
- code: true
name: baz
value: "true"
# A list of Jsonnet Top-level Arguments
tlas:
- code: false
name: foo
value: bar
# plugin specific config
plugin:
- name: mypluginname
# Destination cluster and namespace to deploy the application
destination:
server: https://kubernetes.default.svc
namespace: guestbook
# Sync policy
syncPolicy:
automated:
prune: true
# Ignore differences at the specified json pointers
ignoreDifferences:
- group: apps
kind: Deployment
jsonPointers:
- /spec/replicas

View File

@@ -1,12 +1,10 @@
# Application Source Types
Argo CD supports several different ways in which kubernetes manifests can be defined:
ArgoCD supports several different ways in which kubernetes manifests can be defined:
* [ksonnet](https://ksonnet.io) applications
* [kustomize](https://kustomize.io) applications
* [helm](https://helm.sh) charts
* Directory of YAML/json/jsonnet manifests
* Any custom config management tool configured as a config management plugin
* Simple directory of YAML/json manifests
Some additional considerations should be made when deploying apps of a particular type:
@@ -37,7 +35,7 @@ guestbook-ui servicePort 80
guestbook-ui type "LoadBalancer"
```
When overriding ksonnet parameters in Argo CD, the component name should also be specified in the
When overriding ksonnet parameters in ArgoCD, the component name should also be specified in the
`argocd app set` command, in the form of `-p COMPONENT=PARAM=VALUE`. For example:
```
argocd app set guestbook-default -p guestbook-ui=image=gcr.io/heptio-images/ks-guestbook-demo:0.1
@@ -62,7 +60,7 @@ a `values.yaml`. For example, `service.type` is a common parameter which is expo
```
helm template . --set service.type=LoadBalancer
```
Similarly Argo CD can override values in the `values.yaml` parameters using `argo app set` command,
Similarly ArgoCD can override values in the `values.yaml` parameters using `argo app set` command,
in the form of `-p PARAM=VALUE`. For example:
```
argocd app set helm-guestbook -p service.type=LoadBalancer
@@ -70,16 +68,16 @@ argocd app set helm-guestbook -p service.type=LoadBalancer
### Helm Hooks
Helm hooks are equivalent in concept to [Argo CD resource hooks](resource_hooks.md). In helm, a hook
is any normal kubernetes resource annotated with the `helm.sh/hook` annotation. When Argo CD deploys
Helm hooks are equivalent in concept to [ArgoCD resource hooks](resource_hooks.md). In helm, a hook
is any normal kubernetes resource annotated with the `helm.sh/hook` annotation. When ArgoCD deploys
helm application which contains helm hooks, all helm hook resources are currently ignored during
the `kubectl apply` of the manifests. There is an
[open issue](https://github.com/argoproj/argo-cd/issues/355) to map Helm hooks to Argo CD's concept
the `kubectl apply` of the manifests. There is an
[open issue](https://github.com/argoproj/argo-cd/issues/355) to map Helm hooks to ArgoCD's concept
of Pre/Post/Sync hooks.
### Random Data
Helm templating has the ability to generate random data during chart rendering via the
Helm templating has the ability to generate random data during chart rendering via the
`randAlphaNum` function. Many helm charts from the [charts repository](https://github.com/helm/charts)
make use of this feature. For example, the following is the secret for the
[redis helm chart](https://github.com/helm/charts/blob/master/stable/redis/templates/secrets.yaml):
@@ -93,7 +91,7 @@ data:
{{- end }}
```
The Argo CD application controller periodically compares git state against the live state, running
The ArgoCD application controller periodically compares git state against the live state, running
the `helm template <CHART>` command to generate the helm manifests. Because the random value is
regenerated every time the comparison is made, any application which makes use of the `randAlphaNum`
function will always be in an `OutOfSync` state. This can be mitigated by explicitly setting a
@@ -102,35 +100,3 @@ value, in the values.yaml such that the value is stable between each comparison.
```
argocd app set redis -p password=abc123
```
## Config Management Plugins
Argo CD allows integrating more config management tools using config management plugins. Following changes are required to configure new plugin:
* Make sure required binaries are available in `argocd-repo-server` pod. The binaries can be added via volume mounts or using custom image (see [custom_tools](custom_tools.md)).
* Register a new plugin in `argocd-cm` ConfigMap:
```yaml
data:
configManagementPlugins: |
- name: pluginName
init: # Optional command to initialize application source directory
command: ["sample command"]
args: ["sample args"]
generate: # Command to generate manifests YAML
command: ["sample command"]
args: ["sample args"]
```
The `generate` command must print a valid YAML stream to stdout. Both `init` and `generate` commands are executed inside the application source directory.
Commands have access to system environment variables and following additional variables:
`ARGOCD_APP_NAME` - name of application; `ARGOCD_APP_NAMESPACE` - destination application namespace
* Create an application and specify required config management plugin name.
```
argocd app create <appName> --config-management-plugin <pluginName>
```
More config management plugin examples are available in [argocd-example-apps](https://github.com/argoproj/argocd-example-apps/tree/master/plugins).

View File

@@ -30,3 +30,23 @@ applications and compares the current, live state against the desired target sta
the git repo). It detects `OutOfSync` application state and optionally takes corrective action. It
is responsible for invoking any user-defined hooks for lifcecycle events (PreSync, Sync, PostSync)
### Application CRD (Custom Resource Definition)
The Application CRD is the Kubernetes resource object representing a deployed application instance
in an environment. It is defined by two key pieces of information:
* `source` reference to the desired state in git (repository, revision, path, environment)
* `destination` reference to the target cluster and namespace.
An example spec is as follows:
```
spec:
project: default
source:
repoURL: https://github.com/argoproj/argocd-example-apps.git
targetRevision: HEAD
path: guestbook
environment: default
destination:
server: https://kubernetes.default.svc
namespace: default
```

View File

@@ -1,116 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
data:
# Argo CD's externally facing base URL (optional). Required when configuring SSO
url: https://argo-cd-demo.argoproj.io
# A dex connector configuration (optional). See SSO configuration documentation:
# https://github.com/argoproj/argo-cd/blob/master/docs/sso.md
# https://github.com/dexidp/dex/tree/master/Documentation/connectors
dex.config: |
connectors:
# GitHub example
- type: github
id: github
name: GitHub
config:
clientID: aabbccddeeff00112233
clientSecret: $dex.github.clientSecret
orgs:
- name: your-github-org
teams:
- red-team
# OIDC configuration as an alternative to dex (optional).
oidc.config: |
name: Okta
issuer: https://dev-123456.oktapreview.com
clientID: aaaabbbbccccddddeee
clientSecret: $oidc.okta.clientSecret
# Git repositories configure Argo CD with (optional).
# This list is updated when configuring/removing repos from the UI/CLI
repositories: |
- url: https://github.com/argoproj/my-private-repository
passwordSecret:
name: my-secret
key: password
usernameSecret:
name: my-secret
key: username
sshPrivateKeySecret:
name: my-secret
key: sshPrivateKey
# Non-standard and private Helm repositories (optional).
helm.repositories: |
- url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts
name: istio.io
- url: https://my-private-chart-repo.internal
name: private-repo
usernameSecret:
name: my-secret
key: username
passwordSecret:
name: my-secret
key: password
# Configuration to customize resource behavior (optional). Keys are in the form: group/Kind.
resource.customizations: |
admissionregistration.k8s.io/MutatingWebhookConfiguration:
# List of json pointers in the object to ignore differences
ignoreDifferences:
jsonPointers:
- webhooks/0/clientConfig/caBundle
certmanager.k8s.io/Certificate:
# Lua script for customizing the health status assessment
health.lua: |
hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
# Configuration to completely ignore entire classes of resource group/kinds (optional).
# Excluding high-volume resources improves performance and memory usage, and reduces load and
# bandwidth to the Kubernetes API server.
# These are globs, so a "*" will match all values.
# If you omit groups/kinds/clusters then they will match all groups/kind/clusters.
# NOTE: events.k8s.io and metrics.k8s.io are excluded by default
resource.exclusions: |
- apiGroups:
- repositories.stash.appscode.com
kinds:
- Snapshot
clusters:
- "*.local"
# Configuration to add a config management plugin.
configManagementPlugins: |
- name: kasane
init:
command: [kasane, update]
generate:
command: [kasane, show]
# The metadata.label key name where Argo CD injects the app name as a tracking label (optional).
# Tracking labels are used to determine which resources need to be deleted when pruning.
# If omitted, Argo CD injects the app name into the label: 'app.kubernetes.io/instance'
application.instanceLabelKey: mycompany.com/appname

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
data:
# policy.csv is an file containing user-defined RBAC policies and role definitions (optional).
# Policy rules are in the form:
# p, subject, resource, action, object, effect
# Role definitions and bindings are in the form:
# g, subject, inherited-subject
# See https://github.com/argoproj/argo-cd/blob/master/docs/rbac.md for additional information.
policy.csv: |
# Grant all members of the group 'my-org:team-alpha; the ability to sync apps in 'my-project'
p, my-org:team-alpha, applications, sync, my-project/*, allow
# Grant all members of 'my-org:team-beta' admins
g, my-org:team-beta, role:admin
# policy.default is the name of the default role which Argo CD will falls back to, when
# authorizing API requests (optional). If omitted or empty, users may be still be able to login,
# but will see no apps, projects, etc...
policy.default: role:readonly

View File

@@ -1,25 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
type: Opaque
data:
# TLS certificate and private key for API server (required).
# Autogenerated with a self-signed ceritificate when keys are missing or invalid.
tls.crt:
tls.key:
# bcrypt hash of the admin password and its last modified time (required).
# Autogenerated to be the name of the argocd-server pod when missing.
admin.password:
admin.passwordMtime:
# random server signature key for session validation (required).
# Autogenerated when missing.
server.secretkey:
# Shared secrets for authenticating GitHub, GitLab, BitBucket webhook events (optional).
# See https://github.com/argoproj/argo-cd/blob/master/docs/webhook.md for additional details.
github.webhook.secret:
gitlab.webhook.secret:
bitbucket.webhook.uuid:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 73 KiB

View File

@@ -1,51 +0,0 @@
# Automated Sync Policy
Argo CD has the ability to automatically sync an application when it detects differences between
the desired manifests in git, and the live state in the cluster. A benefit of automatic sync is that
CI/CD pipelines no longer need direct access to the Argo CD API server to perform the deployment.
Instead, the pipeline makes a commit and push to the git repository with the changes to the
manifests in the tracking git repo.
To configure automated sync run:
```bash
argocd app set <APPNAME> --sync-policy automated
```
Alternatively, if creating the application an application manifest, specify a syncPolicy with an
`automated` policy.
```yaml
spec:
syncPolicy:
automated: {}
```
## Automatic Pruning
By default (and as a safety mechanism), automated sync will not delete resources when Argo CD detects
the resource is no longer defined in git. To prune the resources, a manual sync can always be
performed (with pruning checked). Pruning can also be enabled to happen automatically as part of the
automated sync by running:
```bash
argocd app set <APPNAME> --auto-prune
```
Or by setting the prune option to true in the automated sync policy:
```yaml
spec:
syncPolicy:
automated:
prune: true
```
## Automated Sync Semantics
* An automated sync will only be performed if the application is OutOfSync. Applications in a
Synced or error state will not attempt automated sync.
* Automated sync will only attempt one synchronization per unique combination of commit SHA1 and
application parameters. If the most recent successful sync in the history was already performed
against the same commit-SHA and parameters, a second sync will not be attempted.
* Automatic sync will not reattempt a sync if the previous sync attempt against the same commit-SHA
and parameters had failed.
* Rollback cannot be performed against an application with automated sync enabled.

View File

@@ -1,78 +0,0 @@
# Best Practices
## Separating config vs. source code repositories
Using a separate git repository to hold your kubernetes manifests, keeping the config separate
from your application source code, is highly recommended for the following reasons:
1. It provides a clean separation of application code vs. application config. There will be times
when you wish to modify just the manifests without triggering an entire CI build. For example,
you likely do _not_ want to trigger a build if you simply wish to bump the number of replicas in
a Deployment spec.
2. Cleaner audit log. For auditing purposes, a repo which only holds configuration will have a much
cleaner git history of what changes were made, without the noise coming from check-ins due to
normal development activity.
3. Your application may be comprised of services built from multiple git repositories, but is
deployed as a single unit. Oftentimes, microservices applications are comprised of services
with different versioning schemes, and release cycles (e.g. ELK, Kafka + Zookeeper). It may not
make sense to store the manifests in one of the source code repositories of a single component.
4. Separation of access. The developers who are developing the application, may not necessarily be
the same people who can/should push to production environments, either intentionally or
unintentionally. By having separate repos, commit access can be given to the source code repo,
and not the application config repo.
5. If you are automating your CI pipeline, pushing manifest changes to the same git repository can
trigger an infinite loop of build jobs and git commit triggers. Having a separate repo to push
config changes to, prevents this from happening.
## Leaving room for imperativeness
It may be desired to leave room for some imperativeness/automation, and not have everything defined
in your git manifests. For example, if you want the number of your deployment's replicas to be
managed by [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/),
then you would not want to track `replicas` in git.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
# do not include replicas in the manifests if you want replicas to be controlled by HPA
# replicas: 1
template:
spec:
containers:
- image: nginx:1.7.9
name: nginx
ports:
- containerPort: 80
...
```
## Ensuring manifests at git revisions are truly immutable
When using templating tools like `helm` or `kustomize`, it is possible for manifests to change
their meaning from one day to the next. This is typically caused by changes made to an upstream helm
repository or kustomize base.
For example, consider the following kustomization.yaml
```yaml
bases:
- github.com/argoproj/argo-cd//manifests/cluster-install
```
The above kustomization has a remote base to he HEAD revision of the argo-cd repo. Since this
is not stable target, the manifests for this kustomize application can suddenly change meaning, even without
any changes to your own git repository.
A better version would be to use a git tag or commit SHA. For example:
```yaml
bases:
- github.com/argoproj/argo-cd//manifests/cluster-install?ref=v0.11.1
```

View File

@@ -1,58 +0,0 @@
# Automation from CI Pipelines
Argo CD follows the GitOps model of deployment, where desired configuration changes are first
pushed to git, and the cluster state then syncs to the desired state in git. This is a departure
from imperative pipelines which do not traditionally use git repositories to hold application
config.
To push new container images into to a cluster managed by Argo CD, the following workflow (or
variations), might be used:
1. Build and publish a new container image
```
docker build -t mycompany/guestbook:v2.0 .
docker push mycompany/guestbook:v2.0
```
2. Update the local manifests using your preferred templating tool, and push the changes to git.
NOTE: the use of a different git repository to hold your kubernetes manifests (separate from
your application source code), is highly recommended. See [best practices](best_practices.md)
for further rationale.
```
git clone https://github.com/mycompany/guestbook-config.git
cd guestbook-config
# kustomize
kustomize edit set imagetag mycompany/guestbook:v2.0
# ksonnet
ks param set guestbook image mycompany/guestbook:v2.0
# plain yaml
kubectl patch --local -f config-deployment.yaml -p '{"spec":{"template":{"spec":{"containers":[{"name":"guestbook","image":"mycompany/guestbook:v2.0"}]}}}}' -o yaml
git add . -m "Update guestbook to v2.0"
git push
```
3. Synchronize the app (Optional)
For convenience, the argocd CLI can be downloaded directly from the API server. This is
useful so that the CLI used in the CI pipeline is always kept in-sync and uses argocd binary
that is always compatible with the Argo CD API server.
```
export ARGOCD_SERVER=argocd.mycompany.com
export ARGOCD_AUTH_TOKEN=<JWT token generated from project>
curl -sSL -o /usr/local/bin/argocd https://${ARGOCD_SERVER}/download/argocd-linux-amd64
argocd app sync guestbook
argocd app wait guestbook
```
If [automated synchronization](auto_sync.md) is configured for the application, this step is
unnecessary. The controller will automatically detect the new config (fast tracked using a
[webhook](webhook.md), or polled every 3 minutes), and automatically sync the new manifests.

View File

@@ -1,73 +0,0 @@
# Custom Tooling
Argo CD bundles preferred versions of its supported templating tools (helm, kustomize, ks, jsonnet)
as part of its container images. Sometimes, it may be desired to use a specific version of a tool
other than what Argo CD bundles. Some reasons to do this might be:
* To upgrade/downgrade to a specific version of a tool due to bugs or bug fixes.
* To install additional dependencies which to be used by kustomize's configmap/secret generators
(e.g. curl, vault, gpg, AWS CLI)
* In the future, to install a [custom templating tool](https://github.com/argoproj/argo-cd/issues/701)
As the Argo CD repo-server is the single service responsible for generating Kubernetes manifests, it
can be customized to use alternative toolchain required by your environment.
## Adding tools via volume mounts
The first technique is to use an `init` container and a `volumeMount` to copy a different verison of
a tool into the repo-server container. In the following example, an init container is overwriting
the helm binary with a different version than what is bundled in Argo CD:
```yaml
spec:
# 1. Define an emptyDir volume which will hold the custom binaries
volumes:
- name: custom-tools
emptyDir: {}
# 2. Use an init container to download/copy custom binaries into the emptyDir
initContainers:
- name: download-tools
image: alpine:3.8
command: [sh, -c]
args:
- wget -qO- https://storage.googleapis.com/kubernetes-helm/helm-v2.12.3-linux-amd64.tar.gz | tar -xvzf - &&
mv linux-amd64/helm /custom-tools/
volumeMounts:
- mountPath: /custom-tools
name: custom-tools
# 3. Volume mount the custom binary to the bin directory (overriding the existing version)
containers:
- name: argocd-repo-server
volumeMounts:
- mountPath: /usr/local/bin/helm
name: custom-tools
subPath: helm
```
## BYOI (build your own image)
Sometimes replacing a binary isn't sufficient and you need to install other dependencies. The
following example builds an entirely customized repo-server from a Dockerfile, installing extra
dependencies that may be needed for generating manifests.
```Dockerfile
FROM argoproj/argocd:latest
# Switch to root for the ability to perform install
USER root
# Install tools needed for your repo-server to retrieve & decrypt secrets, render manifests
# (e.g. curl, awscli, gpg, sops)
RUN apt-get update && \
apt-get install -y \
curl \
awscli \
gpg && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
curl -o /usr/local/bin/sops -L https://github.com/mozilla/sops/releases/download/3.2.0/sops-3.2.0.linux && \
chmod +x /usr/local/bin/sops
# Switch back to non-root user
USER argocd
```

View File

@@ -1,294 +0,0 @@
# Declarative Setup
Argo CD applications, projects and settings can be defined declaratively using Kubernetes manifests.
## Quick Reference
| Name | Kind | Description |
|------|------|-------------|
| [`argocd-cm.yaml`](argocd-cm.yaml) | ConfigMap | General Argo CD configuration |
| [`argocd-secret.yaml`](argocd-secret.yaml) | Secret | Password, Certificates, Signing Key |
| [`argocd-rbac-cm.yaml`](argocd-rbac-cm.yaml) | ConfigMap | RBAC Configuration |
| [`application.yaml`](application.yaml) | Application | Example application spec |
| [`project.yaml`](argocd-rbac-cm.yaml) | AppProject | Example project spec |
## Applications
The Application CRD is the Kubernetes resource object representing a deployed application instance
in an environment. It is defined by two key pieces of information:
* `source` reference to the desired state in git (repository, revision, path, environment)
* `destination` reference to the target cluster and namespace.
A minimal Application spec is as follows:
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: guestbook
spec:
project: default
source:
repoURL: https://github.com/argoproj/argocd-example-apps.git
targetRevision: HEAD
path: guestbook
destination:
server: https://kubernetes.default.svc
namespace: guestbook
```
See [application.yaml](application.yaml) for additional fields
## Projects
The AppProject CRD is the Kubernetes resource object representing a logical grouping of applications.
It is defined by the following key pieces of information:
* `sourceRepos` reference to the repositories that applications within the project can pull manifests from.
* `destinations` reference to clusters and namespaces that applications within the project can deploy into.
* `roles` list of entities with definitions of their access to resources within the project.
An example spec is as follows:
```yaml
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: my-project
spec:
description: Example Project
# Allow manifests to deploy from any git repos
sourceRepos:
- '*'
# Only permit applications to deploy to the guestbook namespace in the same cluster
destinations:
- namespace: guestbook
server: https://kubernetes.default.svc
# Deny all cluster-scoped resources from being created, except for Namespace
clusterResourceWhitelist:
- group: ''
kind: Namespace
# Allow all namespaced-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy
clusterResourceWhitelist:
- group: ''
kind: ResourceQuota
- group: ''
kind: LimitRange
- group: ''
kind: NetworkPolicy
roles:
# A role which provides read-only access to all applications in the project
- name: read-only
description: Read-only privileges to my-project
policies:
- p, proj:my-project:read-only, applications, get, my-project/*, allow
groups:
- my-oidc-group
# A role which provides sync privileges to only the guestbook-dev application, e.g. to provide
# sync privileges to a CI system
- name: ci-role
description: Sync privileges for guestbook-dev
policies:
- p, proj:my-project:ci-role, applications, sync, my-project/guestbook-dev, allow
# NOTE: JWT tokens can only be generated by the API server and the token is not persisted
# anywhere by Argo CD. It can be prematurely revoked by removing the entry from this list.
jwtTokens:
- iat: 1535390316
```
## Repositories
Repository credentials are stored in secret. Use following steps to configure a repo:
1. Create secret which contains repository credentials. Consider using [bitnami-labs/sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) to store encrypted secret
definition as a Kubernetes manifest.
2. Register repository in `argocd-cm` config map. Each repository must have `url` field and `usernameSecret`, `passwordSecret` or `sshPrivateKeySecret`.
Example:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
data:
repositories: |
- url: https://github.com/argoproj/my-private-repository
passwordSecret:
name: my-secret
key: password
usernameSecret:
name: my-secret
key: username
sshPrivateKeySecret:
name: my-secret
key: sshPrivateKey
```
## Clusters
Cluster credentials are stored in secrets same as repository credentials but does not require entry in `argocd-cm` config map. Each secret must have label
`argocd.argoproj.io/secret-type: cluster`.
The secret data must include following fields:
* `name` - cluster name
* `server` - cluster api server url
* `config` - JSON representation of following data structure:
```yaml
# Basic authentication settings
username: string
password: string
# Bearer authentication settings
bearerToken: string
# IAM authentication configuration
awsAuthConfig:
clusterName: string
roleARN: string
# Transport layer security configuration settings
tlsClientConfig:
# PEM-encoded bytes (typically read from a client certificate file).
caData: string
# PEM-encoded bytes (typically read from a client certificate file).
certData: string
# Server should be accessed without verifying the TLS certificate
insecure: boolean
# PEM-encoded bytes (typically read from a client certificate key file).
keyData: string
# ServerName is passed to the server for SNI and is used in the client to check server
# ceritificates against. If ServerName is empty, the hostname used to contact the
# server is used.
serverName: string
```
Cluster secret example:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mycluster-secret
labels:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
name: mycluster.com
server: https://mycluster.com
config: |
{
"bearerToken": "<authentication token>",
"tlsClientConfig": {
"insecure": false,
"caData": "<base64 encoded certificate>"
}
}
```
## Helm Chart repositories
Non standard Helm Chart repositories have to be registered under the `helm.repositories` key in the
`argocd-cm` ConfigMap. Each repository must have `url` and `name` fields. For private Helm repos you
may need to configure access credentials and HTTPS settings using `usernameSecret`, `passwordSecret`,
`caSecret`, `certSecret` and `keySecret` fields.
Example:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
data:
helm.repositories: |
- url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts
name: istio.io
- url: https://argoproj.github.io/argo-helm
name: argo
usernameSecret:
name: my-secret
key: username
passwordSecret:
name: my-secret
key: password
caSecret:
name: my-secret
key: ca
certSecret:
name: my-secret
key: cert
keySecret:
name: my-secret
key: key
```
## Resource Exclusion
Resources can be excluded from discovery and sync so that ArgoCD is unaware of them. For example, `events.k8s.io` and `metrics.k8s.io` are always excluded. Use cases:
* You have temporal issues and you want to exclude problematic resources.
* There are many of a kind of resources that impacts ArgoCD's performance.
* Restrict ArgoCD's access to certain kinds of resources, e.g. secrets. See [security.md#cluster-rbac](security.md#cluster-rbac).
To configure this, edit the `argcd-cm` config map:
```
kubectl edit configmap argocd-cm -n argocdconfigmap/argocd-cm edited
```
Add `resource.exclusions`, e.g.:
```yaml
apiVersion: v1
data:
resource.exclusions: |
- apiGroups:
- "*"
kinds:
- "*"
clusters:
- https://192.168.0.20
kind: ConfigMap
```
The `resource.exclusions` node is a list of objects. Each object can have:
- `apiGroups` A list of globs to match the API group.
- `kinds` A list of kinds to match. Can be "*" to match all.
- `cluster` A list of globs to match the cluster.
If all three match, then the resource is ignored.
Notes:
* Quote globs in your YAML to avoid parsing errors.
* Invalid globs result in the whole rule being ignored.
* If you add a rule that matches existing resources, these will appear in the interface as `OutOfSync`.
## SSO & RBAC
* SSO configuration details: [SSO](sso.md)
* RBAC configuration details: [RBAC](rbac.md)
## Manage Argo CD using Argo CD
Argo CD is able to manage itself since all settings are represented by Kubernetes manifests. The suggested way is to create [Kustomize](https://github.com/kubernetes-sigs/kustomize)
based application which uses base Argo CD manifests from https://github.com/argoproj/argo-cd and apply required changes on top.
Example of `kustomization.yaml`:
```yaml
bases:
- github.com/argoproj/argo-cd//manifests/cluster-install?ref=v0.10.6
# additional resources like ingress rules, cluster and repository secrets.
resources:
- clusters-secrets.yaml
- repos-secrets.yaml
# changes to config maps
patchesStrategicMerge:
- overlays/argo-cd-cm.yaml
```
The live example of self managed Argo CD config is available at https://cd.apps.argoproj.io and with configuration
stored at [argoproj/argoproj-deployments](https://github.com/argoproj/argoproj-deployments/tree/master/argocd).
> NOTE: You will need to sign-in using your github account to get access to https://cd.apps.argoproj.io

View File

@@ -1,59 +0,0 @@
# Diffing Customization
It is possible for an application to be `OutOfSync` even immediately after a successful Sync operation. Some reasons for this might be:
* There is a bug in the manifest, where it contains extra/unknown fields from the actual K8s spec. These extra fields would get dropped when querying Kubernetes for the live state,
resulting in an `OutOfSync` status indicating a missing field was detected.
* The sync was performed (with pruning disabled), and there are resources which need to be deleted.
* A controller or [mutating webhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) is altering the object after it was
submitted to Kubernetes in a manner which contradicts Git.
* A Helm chart is using a template function such as [`randAlphaNum`](https://github.com/helm/charts/blob/master/stable/redis/templates/secret.yaml#L16),
which generates different data every time `helm template` is invoked.
* For Horizontal Pod Autoscaling (HPA) objects, the HPA controller is known to reorder `spec.metrics`
in a specific order. See [kubernetes issue #74099](https://github.com/kubernetes/kubernetes/issues/74099).
To work around this, you can order `spec.replicas` in git in the same order that the controller
prefers.
In case it is impossible to fix the upstream issue, Argo CD allows you to optionally ignore differences of problematic resources.
The diffing customization can be configured for single or multiple application resources or at a system level.
## Application level configuration
Argo CD allows ignoring differences at a specific JSON path. The following sample application is configured to ignore differences in `spec.replicas` for all deployments:
```yaml
spec:
ignoreDifferences:
- group: apps
kind: Deployment
jsonPointers:
- /spec/replicas
```
The above customization could be narrowed to a resource with the specified name and optional namespace:
```yaml
spec:
ignoreDifferences:
- group: apps
kind: Deployment
name: guestbook
namespace: default
jsonPointers:
- /spec/replicas
```
## System-level configuration
The comparison of resources with well-known issues can be customized at a system level. Ignored differences can be configured for a specified group and kind
in `resource.customizations` key of `argocd-cm` ConfigMap. Following is an example of a customization which ignores the `caBundle` field
of a `MutatingWebhookConfiguration` webhooks:
```yaml
data:
resource.customizations: |
admissionregistration.k8s.io/MutatingWebhookConfiguration:
ignoreDifferences:
jsonPointers:
- webhooks/0/clientConfig/caBundle
```

View File

@@ -1,28 +0,0 @@
# FAQ
## Why is my application still `OutOfSync` immediately after a successful Sync?
See [Diffing](diffing.md) documentation for reasons resources can be OutOfSync, and ways to configure
Argo CD to ignore fields when differences are expected.
## Why is my application stuck in `Progressing` state?
Argo CD provides health for several standard Kubernetes types. The `Ingress` and `StatefulSet` types have known issues which might cause health check
to return `Progressing` state instead of `Healthy`.
* `Ingress` is considered healthy if `status.loadBalancer.ingress` list is non-empty, with at least one value for `hostname` or `IP`. Some ingress controllers
([contour](https://github.com/heptio/contour/issues/403), [traefik](https://github.com/argoproj/argo-cd/issues/968#issuecomment-451082913)) don't update
`status.loadBalancer.ingress` field which causes `Ingress` to stuck in `Progressing` state forever.
* `StatufulSet` is considered healthy if value of `status.updatedReplicas` field matches to `spec.replicas` field. Due to Kubernetes bug
[kubernetes/kubernetes#68573](https://github.com/kubernetes/kubernetes/issues/68573) the `status.updatedReplicas` is not populated. So unless you run Kubernetes version which
include the fix [kubernetes/kubernetes#67570](https://github.com/kubernetes/kubernetes/pull/67570) `StatefulSet` might stay in `Progressing` state.
As workaround Argo CD allows providing [health check](health.md) customization which overrides default behavior.
## I forgot the admin password, how do I reset it?
Edit the `argocd-secret` secret and update the `admin.password` field with a new bcrypt hash. You
can use a site like https://www.browserling.com/tools/bcrypt to generate a new hash. Another option
is to delete both the `admin.password` and `admin.passwordMtime` keys and restart argocd-server.

View File

@@ -1,168 +1,177 @@
# Argo CD Getting Started
# ArgoCD Getting Started
An example guestbook application is provided to demonstrate how ArgoCD works.
## Requirements
* Installed [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command-line tool
* Have a [kubeconfig](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) file (default location is `~/.kube/config`).
## 1. Install Argo CD
```bash
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
## 1. Install ArgoCD
```
This will create a new namespace, `argocd`, where Argo CD services and application resources will live.
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v0.7.1/manifests/install.yaml
```
This will create a new namespace, `argocd`, where ArgoCD services and application resources will live.
NOTE:
* On GKE, you will need grant your account the ability to create new cluster roles:
```bash
kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=cluster-admin --user=YOUREMAIL@gmail.com
```
* On GKE with RBAC enabled, you may need to grant your account the ability to create new cluster roles
```
kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=cluster-admin --user=YOUREMAIL@gmail.com
```
## 2. Download ArgoCD CLI
## 2. Download Argo CD CLI
Download the latest ArgoCD version:
Download the latest Argo CD version from https://github.com/argoproj/argo-cd/releases/latest.
Also available in Mac Homebrew:
```bash
brew tap argoproj/tap
On Mac:
```
brew install argoproj/tap/argocd
```
On Linux:
## 3. Access the Argo CD API server
```
curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v0.7.1/argocd-linux-amd64
chmod +x /usr/local/bin/argocd
```
By default, the Argo CD API server is not exposed with an external IP. To access the API server,
choose one of the following techniques to expose the Argo CD API server:
## 3. Open access to ArgoCD API server
### Service Type LoadBalancer
Change the argocd-server service type to `LoadBalancer`:
By default, the ArgoCD API server is not exposed with an external IP. To expose the API server,
change the service type to `LoadBalancer`:
```bash
```
kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
```
### Ingress
Follow the [ingress documentation](ingress.md) on how to configure Argo CD with ingress.
## 4. Login to the server from the CLI
### Port Forwarding
Kubectl port-forwarding can also be used to connect to the API server without exposing the service.
```bash
kubectl port-forward svc/argocd-server -n argocd 8080:443
Login with using the `admin` user. The initial password is autogenerated to be the pod name of the
ArgoCD API server. This can be retrieved with the command:
```
The API server can then be accessed using the localhost:8080
## 4. Login using the CLI
Login as the `admin` user. The initial password is autogenerated to be the pod name of the
Argo CD API server. This can be retrieved with the command:
```bash
kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2
kubectl get pods -n argocd -l app=argocd-server -o name | cut -d'/' -f 2
```
Using the above password, login to Argo CD's IP or hostname:
```bash
argocd login <ARGOCD_SERVER>
Using the above password, login to ArgoCD's external IP:
On Minikube:
```
argocd login $(minikube service argocd-server -n argocd --url | cut -d'/' -f 3) --name minikube
```
Other clusters:
```
kubectl get svc -n argocd argocd-server
argocd login <EXTERNAL-IP>
```
Change the password using the command:
```bash
After logging in, change the password using the command:
```
argocd account update-password
argocd relogin
```
## 5. Register a cluster to deploy apps to (optional)
## 5. Register a cluster to deploy apps to
This step registers a cluster's credentials to Argo CD, and is only necessary when deploying to
an external cluster. When deploying internally (to the same cluster that Argo CD is running in),
https://kubernetes.default.svc should be used as the application's K8s API server address.
First list all clusters contexts in your current kubconfig:
```bash
We will now register a cluster to deploy applications to. First list all clusters contexts in your
kubconfig:
```
argocd cluster add
```
Choose a context name from the list and supply it to `argocd cluster add CONTEXTNAME`. For example,
for docker-for-desktop context, run:
```bash
argocd cluster add docker-for-desktop
for minikube context, run:
```
argocd cluster add minikube --in-cluster
```
The above command installs a ServiceAccount (`argocd-manager`), into the kube-system namespace of
that kubectl context, and binds the service account to an admin-level ClusterRole. Argo CD uses this
service account token to perform its management tasks (i.e. deploy/monitoring).
The above command installs an `argocd-manager` ServiceAccount and ClusterRole into the cluster
associated with the supplied kubectl context. ArgoCD uses the service account token to perform its
management tasks (i.e. deploy/monitoring).
> NOTE: the rules of the `argocd-manager-role` role can be modified such that it only has
`create`, `update`, `patch`, `delete` privileges to a limited set of namespaces, groups, kinds.
However `get`, `list`, `watch` privileges are required at the cluster-scope for Argo CD to function.
The `--in-cluster` option indicates that the cluster we are registering, is the same cluster that
ArgoCD is running in. This allows ArgoCD to connect to the cluster using the internal kubernetes
hostname (kubernetes.default.svc). When registering a cluster external to ArgoCD, the `--in-cluster`
flag should be omitted.
## 6. Create an application from a git repository
An example git repository containing a guestbook application is available at
https://github.com/argoproj/argocd-example-apps.git to demonstrate how Argo CD works.
### Creating apps via CLI
```bash
argocd app create guestbook \
--repo https://github.com/argoproj/argocd-example-apps.git \
--path guestbook \
--dest-server https://kubernetes.default.svc \
--dest-namespace default
```
## 6. Create the application from a git repository
### Creating apps via UI
Open a browser to the Argo CD external UI, and login using the credentials, IP/hostname set in step 4.
Connect the https://github.com/argoproj/argocd-example-apps.git repo to Argo CD:
Open a browser to the ArgoCD external UI, and login using the credentials set in step 4.
On Minikube:
```
minikube service argocd-server -n argocd
```
Connect a git repository containing your apps. An example repository containing a sample
guestbook application is available at https://github.com/argoproj/argocd-example-apps.git.
![connect repo](assets/connect_repo.png)
After connecting a git repository, select the guestbook application for creation:
![select repo](assets/select_repo.png)
![select app](assets/select_app.png)
![select env](assets/select_env.png)
![create app](assets/create_app.png)
### Creating apps via CLI
Applications can be also be created using the ArgoCD CLI:
```
argocd app create guestbook-default --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --env default
```
## 7. Sync (deploy) the application
Once the guestbook application is created, you can now view its status:
```bash
$ argocd app get guestbook
Name: guestbook
Server: https://kubernetes.default.svc
Namespace: default
URL: https://10.97.164.88/applications/guestbook
Repo: https://github.com/argoproj/argocd-example-apps.git
Target:
Path: guestbook
Sync Policy: <none>
Sync Status: OutOfSync from (1ff8a67)
Health Status: Missing
From UI:
![create app](assets/guestbook-app.png)
GROUP KIND NAMESPACE NAME STATUS HEALTH
apps Deployment default guestbook-ui OutOfSync Missing
Service default guestbook-ui OutOfSync Missing
From CLI:
```
$ argocd app get guestbook-default
Name: guestbook-default
Server: https://kubernetes.default.svc
Namespace: default
URL: https://192.168.64.36:31880/applications/argocd/guestbook-default
Environment: default
Repo: https://github.com/argoproj/argocd-example-apps.git
Path: guestbook
Target: HEAD
KIND NAME STATUS HEALTH
Service guestbook-ui OutOfSync
Deployment guestbook-ui OutOfSync
```
The application status is initially `OutOfSync` state, since the application has yet to be
The application status is initially in an `OutOfSync` state, since the application has yet to be
deployed, and no Kubernetes resources have been created. To sync (deploy) the application, run:
```bash
argocd app sync guestbook
```
$ argocd app sync guestbook-default
Application: guestbook-default
Operation: Sync
Phase: Succeeded
Message: successfully synced
KIND NAME MESSAGE
Service guestbook-ui service "guestbook-ui" created
Deployment guestbook-ui deployment.apps "guestbook-ui" created
```
This command retrieves the manifests from git repository and performs a `kubectl apply` of the
manifests. The guestbook app is now running and you can now view its resource components, logs,
events, and assessed health status:
This command retrieves the manifests from git repository and performs a `kubectl apply` of the
manifests. The guestbook app is now running and you can now view its resource
components, logs, events, and assessed health:
### From UI:
![guestbook app](assets/guestbook-app.png)
![view app](assets/guestbook-tree.png)
## 8. Next Steps
Argo CD supports additional features such as automated sync, SSO, WebHooks, RBAC, Projects. See the
rest of the [documentation](./) for details.
ArgoCD supports additional features such as SSO, WebHooks, RBAC, Projects. See the rest of
the [documentation](./) for details.

View File

@@ -1,11 +1,12 @@
# Resource Health
## Overview
Argo CD provides built-in health assessment for several standard Kubernetes types, which is then
ArgoCD provides built-in health assessment for several standard Kubernetes types, which is then
surfaced to the overall Application health status as a whole. The following checks are made for
specific types of kuberentes resources:
### Deployment, ReplicaSet, StatefulSet DaemonSet
* Observed generation is equal to desired generation.
* Number of **updated** replicas equals the number of desired replicas.
@@ -15,76 +16,3 @@ with at least one value for `hostname` or `IP`.
### Ingress
* The `status.loadBalancer.ingress` list is non-empty, with at least one value for `hostname` or `IP`.
### PersistentVolumeClaim
* The `status.phase` is `Bound`
## Custom Health Checks
Argo CD supports custom health checks written in [Lua](https://www.lua.org/). This is useful if you:
* Are affected by known issues where your `Ingress` or `StatefulSet` resources are stuck in `Progressing` state because of bug in your resource controller.
* Have a custom resource for which Argo CD does not have a built-in health check.
There are two ways to configure a custom health check. The next two sections describe those ways.
### Way 1. Define a Custom Health Check in `argocd-cm` ConfigMap
Custom health checks can be defined in `resource.customizations` field of `argocd-cm`. Following example demonstrates a health check for `certmanager.k8s.io/Certificate`.
```yaml
data:
resource.customizations: |
certmanager.k8s.io/Certificate:
health.lua: |
hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
```
The `obj` is a global variable which contains the resource. The script must return an object with status and optional message field.
NOTE: as a security measure you don't have access to most of the standard Lua libraries.
### Way 2. Contribute a Custom Health Check to https://github.com/argoproj/argo-cd
A health check can be bundled into Argo CD. Custom health check scripts are located in the `resource_customizations` directory. This must have the following directory structure:
```
argo-cd
|-- resource_customizations
| |-- your.crd.group.io # CRD group
| | |-- MyKind # Resource kind
| | | |-- health.lua # Health check
| | | |-- health_test.yaml # Test inputs and expected results
| | | +-- testdata # Directory with test resource YAML definitions
```
Each health check must have tests defined in `health_test.yaml` file. The `health_test.yaml` is a YAML file with the following structure:
```yaml
tests:
- healthStatus:
status: ExpectedStatus
message: Expected message
inputPath: testdata/test-resource-definition.yaml
```
The [PR#1139](https://github.com/argoproj/argo-cd/pull/1139) is an example of Cert Manager CRDs custom health check.

View File

@@ -1,176 +0,0 @@
# Ingress Configuration
Argo CD runs both a gRPC server (used by the CLI), as well as a HTTP/HTTPS server (used by the UI).
Both protocols are exposed by the argocd-server service object on the following ports:
* 443 - gRPC/HTTPS
* 80 - HTTP (redirects to HTTPS)
There are several ways how Ingress can be configured.
## [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx)
### Option 1: ssl-passthrough
Because Argo CD serves multiple protocols (gRPC/HTTPS) on the same port (443), this provides a
challenge when attempting to define a single nginx ingress object and rule for the argocd-service,
since the `nginx.ingress.kubernetes.io/backend-protocol` [annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#backend-protocol)
accepts only a single value for the backend protocol (e.g. HTTP, HTTPS, GRPC, GRPCS).
In order to expose the Argo CD API server with a single ingress rule and hostname, the
`nginx.ingress.kubernetes.io/ssl-passthrough` [annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#ssl-passthrough)
must be used to passthrough TLS connections and terminate TLS at the Argo CD API server.
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server-ingress
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: argocd.example.com
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
```
The above rule terminates TLS at the Argo CD API server, which detects the protocol being used,
and responds appropriately. Note that the `nginx.ingress.kubernetes.io/ssl-passthrough` annotation
requires that the `--enable-ssl-passthrough` flag be added to the command line arguments to
`nginx-ingress-controller`.
### Option 2: Multiple ingress objects and hosts
Since ingress-nginx Ingress supports only a single protocol per Ingress object, an alternative
way would be to define two Ingress objects. One for HTTP/HTTPS, and the other for gRPC:
HTTP/HTTPS Ingress:
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server-http-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
rules:
- http:
paths:
- backend:
serviceName: argocd-server
servicePort: http
host: argocd.example.com
tls:
- hosts:
- argocd.example.com
secretName: argocd-secret
```
gRPC Ingress:
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server-grpc-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
spec:
rules:
- http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
host: grpc.argocd.example.com
tls:
- hosts:
- grpc.argocd.example.com
secretName: argocd-secret
```
The API server should then be run with TLS disabled. Edit the `argocd-server` deployment to add the
`--insecure` flag to the argocd-server command:
```yaml
spec:
template:
spec:
name: argocd-server
containers:
- command:
- /argocd-server
- --staticassets
- /shared/app
- --repo-server
- argocd-repo-server:8081
- --insecure
```
The obvious disadvantage to this approach is that this technique require two separate hostnames for
the API server -- one for gRPC and the other for HTTP/HTTPS. However it allow TLS termination to
happen at the ingress controller.
## AWS Application Load Balancers (ALBs) and Classic ELB (HTTP mode)
Neither ALBs and Classic ELB in HTTP mode, do not have full support for HTTP2/gRPC which is the
protocol used by the `argocd` CLI. Thus, when using an AWS load balancer, either Classic ELB in
passthrough mode is needed, or NLBs.
## UI base path
If Argo CD UI is available under non-root path (e.g. `/argo-cd` instead of `/`) then UI path should be configured in API server.
To configure UI path add `--basehref` flag into `argocd-server` deployment command:
```yaml
spec:
template:
spec:
name: argocd-server
containers:
- command:
- /argocd-server
- --staticassets
- /shared/app
- --repo-server
- argocd-repo-server:8081
- --basehref
- /argo-cd
```
NOTE: flag `--basehref` only changes UI base URL. API server keep using `/` path so you need to add URL rewrite rule to proxy config.
Example nginx.conf with URL rewrite:
```
worker_processes 1;
events { worker_connections 1024; }
http {
sendfile on;
server {
listen 443;
location /argo-cd {
rewrite /argo-cd/(.*) /$1 break;
proxy_pass https://localhost:8080;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}
}
```

View File

@@ -1,48 +0,0 @@
# Argo CD Release Instructions
1. Tag, build, and push argo-cd-ui
```bash
cd argo-cd-ui
git checkout -b release-X.Y
git tag vX.Y.Z
git push upstream release-X.Y --tags
IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z DOCKER_PUSH=true yarn docker
```
2. Create release-X.Y branch (if creating initial X.Y release)
```bash
git checkout -b release-X.Y
git push upstream release-X.Y
```
3. Update VERSION and manifests with new version
```bash
vi VERSION # ensure value is desired X.Y.Z semantic version
make manifests IMAGE_TAG=vX.Y.Z
git commit -a -m "Update manifests to vX.Y.Z"
git push upstream release-X.Y
```
4. Tag, build, and push release to docker hub
```bash
git tag vX.Y.Z
make release IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z DOCKER_PUSH=true
git push upstream vX.Y.Z
```
5. Update argocd brew formula
```bash
git clone https://github.com/argoproj/homebrew-tap
cd homebrew-tap
./update.sh ~/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64
git commit -a -m "Update argocd to vX.Y.Z"
git push
```
6. Update documentation:
* Edit CHANGELOG.md with release notes
* Update `stable` tag
```
git tag stable --force && git push upstream stable --force
```
* Create GitHub release from new tag and upload binaries (e.g. dist/argocd-darwin-amd64)

View File

@@ -1,14 +0,0 @@
# Prometheus Metrics
Argo CD exposes two sets of prometheus metrics
## Application Metrics
Metrics about applications. Scraped at the `argocd-metrics:8082/metrics` endpoint.
* Gauge for application health status
* Gauge for application sync status
* Counter for application sync history
## API Server Metrics
Metrics about API Server API request and response activity (request totals, response codes, etc...).
Scraped at the `argocd-server-metrics:8083/metrics` endpoint.

View File

@@ -1,14 +1,14 @@
# Parameter Overrides
Argo CD provides a mechanism to override the parameters of a ksonnet/helm app. This provides flexibility
in having most of the application manifests defined in git, while leaving room for *some* parts of the
k8s manifests determined dynamically, or outside of git. It also serves as an alternative way of
redeploying an application by changing application parameters via Argo CD, instead of making the
changes to the manifests in git.
ArgoCD provides a mechanism to override the parameters of a ksonnet/helm app. This gives some extra
flexibility in having most of the application manifests defined in git, while leaving room for
*some* parts of the k8s manifests determined dynamically, or outside of git. It also serves as an
alternative way of redeploying an application by changing application parameters via ArgoCD, instead
of making the changes to the manifests in git.
**NOTE:** many consider this mode of operation as an anti-pattern to GitOps, since the source of
truth becomes a union of the git repository, and the application overrides. The Argo CD parameter
overrides feature is provided mainly as a convenience to developers and is intended to be used in
truth becomes a union of the git repository, and the application overrides. The ArgoCD parameter
overrides feature is provided mainly convenience to developers and is intended to be used more for
dev/test environments, vs. production environments.
To use parameter overrides, run the `argocd app set -p (COMPONENT=)PARAM=VALUE` command:
@@ -17,28 +17,21 @@ argocd app set guestbook -p guestbook=image=example/guestbook:abcd123
argocd app sync guestbook
```
The `PARAM` is expected to be a normal YAML path
```bash
argocd app set guestbook -p guestbook=ingress.enabled=true
argocd app set guestbook -p guestbook=ingress.hosts[0]=guestbook.myclusterurl
```
The following are situations where parameter overrides would be useful:
The following are situations where parameter overrides would be useful:
1. A team maintains a "dev" environment, which needs to be continually updated with the latest
version of their guestbook application after every build in the tip of master. To address this use
case, the application would expose a parameter named `image`, whose value used in the `dev`
case, the application would expose an parameter named `image`, whose value used in the `dev`
environment contains a placeholder value (e.g. `example/guestbook:replaceme`). The placeholder value
would be determined externally (outside of git) such as a build system. Then, as part of the build
pipeline, the parameter value of the `image` would be continually updated to the freshly built image
would be determined externally (outside of git) such as a build systems. Then, as part of the build
pipeline, the parameter value of the `image` would be continually updated to the freshly built image
(e.g. `argocd app set guestbook -p guestbook=image=example/guestbook:abcd123`). A sync operation
would result in the application being redeployed with the new image.
2. A repository of Helm manifests is already publicly available (e.g. https://github.com/helm/charts).
2. A repository of helm manifests is already publicly available (e.g. https://github.com/helm/charts).
Since commit access to the repository is unavailable, it is useful to be able to install charts from
the public repository and customize the deployment with different parameters, without resorting to
forking the repository to make the changes. For example, to install Redis from the Helm chart
the public repository, customizing the deployment with different parameters, without resorting to
forking the repository to make the changes. For example, to install redis from the helm chart
repository and customize the the database password, you would run:
```

View File

@@ -1,51 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: my-project
spec:
# Project description
description: Example Project
# Allow manifests to deploy from any git repos
sourceRepos:
- '*'
# Only permit applications to deploy to the guestbook namespace in the same cluster
destinations:
- namespace: guestbook
server: https://kubernetes.default.svc
# Deny all cluster-scoped resources from being created, except for Namespace
clusterResourceWhitelist:
- group: ''
kind: Namespace
# Allow all namespaced-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy
clusterResourceWhitelist:
- group: ''
kind: ResourceQuota
- group: ''
kind: LimitRange
- group: ''
kind: NetworkPolicy
roles:
# A role which provides read-only access to all applications in the project
- name: read-only
description: Read-only privileges to my-project
policies:
- p, proj:my-project:read-only, applications, get, my-project/*, allow
groups:
- my-oidc-group
# A role which provides sync privileges to only the guestbook-dev application, e.g. to provide
# sync privileges to a CI system
- name: ci-role
description: Sync privileges for guestbook-dev
policies:
- p, proj:my-project:ci-role, applications, sync, my-project/guestbook-dev, allow
# NOTE: JWT tokens can only be generated by the API server and the token is not persisted
# anywhere by Argo CD. It can be prematurely revoked by removing the entry from this list.
jwtTokens:
- iat: 1535390316

View File

@@ -1,182 +0,0 @@
## Projects
Projects provide a logical grouping of applications, which is useful when Argo CD is used by multiple
teams. Projects provide the following features:
* restrict *what* may be deployed (trusted git source repositories)
* restrict *where* apps may be deployed to (destination clusters and namespaces)
* restrict what kinds of objects may or may not be deployed (e.g. RBAC, CRDs, DaemonSets, NetworkPolicy etc...)
* defining project roles to provide application RBAC (bound to OIDC groups and/or JWT tokens)
### The default project
Every application belongs to a single project. If unspecified, an application belongs to the
`default` project, which is created automatically and by default, permits deployments from any
source repo, to any cluster, and all resource Kinds. The default project can be modified, but not
deleted. When initially created, it's specification is configured to be the most permissive:
```yaml
spec:
sourceRepos:
- '*'
destinations:
- namespace: '*'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'
```
### Creating Projects
Additional projects can be created to give separate teams different levels of access to namespaces.
The following command creates a new project `myproject` which can deploy applications to namespace
`mynamespace` of cluster `https://kubernetes.default.svc`. The permitted git source repository is
set to `https://github.com/argoproj/argocd-example-apps.git` repository.
```
argocd proj create myproject -d https://kubernetes.default.svc,mynamespace -s https://github.com/argoproj/argocd-example-apps.git
```
### Managing Projects
Permitted source git repositories are managed using commands:
```bash
argocd proj add-source <PROJECT> <REPO>
argocd proj remove-source <PROJECT> <REPO>
```
Permitted destination clusters and namespaces are managed with the commands:
```
argocd proj add-destination <PROJECT> <CLUSTER>,<NAMESPACE>
argocd proj remove-destination <PROJECT> <CLUSTER>,<NAMESPACE>
```
Permitted destination K8s resource kinds are managed with the commands. Note that namespaced-scoped
resources are restricted via a blacklist, whereas cluster-scoped resources are restricted via
whitelist.
```
argocd proj allow-cluster-resource <PROJECT> <GROUP> <KIND>
argocd proj allow-namespace-resource <PROJECT> <GROUP> <KIND>
argocd proj deny-cluster-resource <PROJECT> <GROUP> <KIND>
argocd proj deny-namespace-resource <PROJECT> <GROUP> <KIND>
```
### Assign application to a project
The application project can be changed using `app set` command. In order to change the project of
an app, the user must have permissions to access the new project.
```
argocd app set guestbook-default --project myproject
```
### Configuring RBAC with projects
Once projects have been defined, RBAC rules can be written to restrict access to the applications
in the project. The following example configures RBAC for two GitHub teams: `team1` and `team2`,
both in the GitHub org, `some-github-org`. There are two projects, `project-a` and `project-b`.
`team1` can only manage applications in `project-a`, while `team2` can only manage applications in
`project-b`. Both `team1` and `team2` have the ability to manage repositories.
*ConfigMap `argocd-rbac-cm` example:*
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
data:
policy.default: ""
policy.csv: |
p, some-github-org:team1, applications, *, project-a/*, allow
p, some-github-org:team2, applications, *, project-a/*, allow
p, role:org-admin, repositories, get, *, allow
p, role:org-admin, repositories, create, *, allow
p, role:org-admin, repositories, update, *, allow
p, role:org-admin, repositories, delete, *, allow
g, some-github-org:team1, org-admin
g, some-github-org:team2, org-admin
```
## Project Roles
Projects include a feature called roles that enable automated access to a project's applications.
These can be used to give a CI pipeline a restricted set of permissions. For example, a CI system
may only be able to sync a single app (but not change its source or destination).
Projects can have multiple roles, and those roles can have different access granted to them. These
permissions are called policies, and they are stored within the role as a list of policy strings.
A role's policy can only grant access to that role and are limited to applications within the role's
project. However, the policies have an option for granting wildcard access to any application
within a project.
In order to create roles in a project and add policies to a role, a user will need permission to
update a project. The following commands can be used to manage a role.
```bash
argocd proj role list
argocd proj role get
argocd proj role create
argocd proj role delete
argocd proj role add-policy
argocd proj role remove-policy
```
Project roles in itself are not useful without generating a token to associate to that role. Argo CD
supports JWT tokens as the means to authenticate to a role. Since the JWT token is
associated with a role's policies, any changes to the role's policies will immediately take effect
for that JWT token.
The following commands are used to manage the JWT tokens.
```bash
argocd proj role create-token PROJECT ROLE-NAME
argocd proj role delete-token PROJECT ROLE-NAME ISSUED-AT
```
Since the JWT tokens aren't stored in Argo CD, they can only be retrieved when they are created. A
user can leverage them in the cli by either passing them in using the `--auth-token` flag or setting
the ARGOCD_AUTH_TOKEN environment variable. The JWT tokens can be used until they expire or are
revoked. The JWT tokens can created with or without an expiration, but the default on the cli is
creates them without an expirations date. Even if a token has not expired, it cannot be used if
the token has been revoked.
Below is an example of leveraging a JWT token to access a guestbook application. It makes the
assumption that the user already has a project named myproject and an application called
guestbook-default.
```bash
PROJ=myproject
APP=guestbook-default
ROLE=get-role
argocd proj role create $PROJ $ROLE
argocd proj role create-token $PROJ $ROLE -e 10m
JWT=<value from command above>
argocd proj role list $PROJ
argocd proj role get $PROJ $ROLE
# This command will fail because the JWT Token associated with the project role does not have a policy to allow access to the application
argocd app get $APP --auth-token $JWT
# Adding a policy to grant access to the application for the new role
argocd proj role add-policy $PROJ $ROLE --action get --permission allow --object $APP
argocd app get $PROJ-$ROLE --auth-token $JWT
# Removing the policy we added and adding one with a wildcard.
argocd proj role remove-policy $PROJ $TOKEN -a get -o $PROJ-$TOKEN
argocd proj role remove-policy $PROJ $TOKEN -a get -o '*'
# The wildcard allows us to access the application due to the wildcard.
argocd app get $PROJ-$TOKEN --auth-token $JWT
argocd proj role get $PROJ
argocd proj role get $PROJ $ROLE
# Revoking the JWT token
argocd proj role delete-token $PROJ $ROLE <id field from the last command>
# This will fail since the JWT Token was deleted for the project role.
argocd app get $APP --auth-token $JWT
```

View File

@@ -2,39 +2,102 @@
## Overview
The RBAC feature enables restriction of access to Argo CD resources. Argo CD does not have its own
user management system and has only one built-in user `admin`. The `admin` user is a superuser and
it has unrestricted access to the system. RBAC requires [SSO configuration](./sso.md). Once SSO is
configured, additional RBAC roles can be defined, and SSO groups can man be mapped to roles.
The feature RBAC allows restricting access to ArgoCD resources. ArgoCD does not have own user management system and has only one built-in user `admin`. The `admin` user is a
superuser and it has full access. RBAC requires configuring [SSO](./sso.md) integration. Once [SSO](./sso.md) is connected you can define RBAC roles and map roles to groups.
## Configure RBAC
RBAC configuration allows defining roles and groups. Argo CD has two pre-defined roles:
* `role:readonly` - read-only access to all resources
* `role:admin` - unrestricted access to all resources
These role definitions can be seen in [builtin-policy.csv](../assets/builtin-policy.csv)
RBAC configuration allows defining roles and groups. ArgoCD has two pre-defined roles: role `role:readonly` which provides read-only access to all resources and role `role:admin`
which provides full access. Role definitions are available in [builtin-policy.csv](../util/rbac/builtin-policy.csv) file.
Additional roles and groups can be configured in `argocd-rbac-cm` ConfigMap. The example below
configures a custom role, named `org-admin`. The role is assigned to any user which belongs to
`your-github-org:your-team` group. All other users get the default policy of `role:readonly`,
which cannot modify Argo CD settings.
Additional roles and groups can be configured in `argocd-rbac-cm` ConfigMap. The example below custom role `org-admin`. The role is assigned to any user which belongs to
`your-github-org:your-team` group. All other users get `role:readonly` and cannot modify ArgoCD settings.
*ConfigMap `argocd-rbac-cm` example:*
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
data:
policy.default: role:readonly
policy.csv: |
p, role:org-admin, applications, *, */*, allow
p, role:org-admin, clusters, get, *, allow
p, role:org-admin, repositories, get, *, allow
p, role:org-admin, repositories, create, *, allow
p, role:org-admin, repositories, update, *, allow
p, role:org-admin, repositories, delete, *, allow
p, role:org-admin, applications, *, */*
p, role:org-admin, applications/*, *, */*
p, role:org-admin, clusters, get, *
p, role:org-admin, repositories, get, *
p, role:org-admin, repositories/apps, get, *
p, role:org-admin, repositories, create, *
p, role:org-admin, repositories, update, *
p, role:org-admin, repositories, delete, *
g, your-github-org:your-team, role:org-admin
kind: ConfigMap
metadata:
name: argocd-rbac-cm
```
## Configure Projects
Argo projects allow grouping applications which is useful if ArgoCD is used by multiple teams. Additionally, projects restrict source repositories and destination
Kubernetes clusters which can be used by applications belonging to the project.
### 1. Create new project
Following command creates project `myproject` which can deploy applications to namespace `default` of cluster `https://kubernetes.default.svc`. The valid application source is defined in the `https://github.com/argoproj/argocd-example-apps.git` repository.
```
argocd proj create myproject -d https://kubernetes.default.svc,default -s https://github.com/argoproj/argocd-example-apps.git
```
Project sources and destinations can be managed using commands
```
argocd project add-destination
argocd project remove-destination
argocd project add-source
argocd project remove-source
```
### 2. Assign application to a project
Each application belongs to a project. By default, all application belongs to the default project which provides access to any source repo/cluster. The application project can be
changes using `app set` command:
```
argocd app set guestbook-default --project myproject
```
### 3. Update RBAC rules
Following example configure admin access for two teams. Each team has access only two application of one project (`team1` can access `default` project and `team2` can access
`myproject` project).
*ConfigMap `argocd-rbac-cm` example:*
```yaml
apiVersion: v1
data:
policy.default: ""
policy.csv: |
p, role:team1-admin, applications, *, default/*
p, role:team1-admin, applications/*, *, default/*
p, role:team1-admin, applications, *, myproject/*
p, role:team1-admin, applications/*, *, myproject/*
p, role:org-admin, clusters, get, *
p, role:org-admin, repositories, get, *
p, role:org-admin, repositories/apps, get, *
p, role:org-admin, repositories, create, *
p, role:org-admin, repositories, update, *
p, role:org-admin, repositories, delete, *
g, role:team1-admin, org-admin
g, role:team2-admin, org-admin
g, your-github-org:your-team1, role:team1-admin
g, your-github-org:your-team2, role:team2-admin
kind: ConfigMap
metadata:
name: argocd-rbac-cm
```

View File

@@ -2,8 +2,8 @@
## Overview
Synchronization can be configured using resource hooks. Hooks are ways to interject custom logic before, during,
and after a Sync operation. Some use cases for hooks are:
Hooks are ways to interject custom logic before, during, and after a Sync operation. Some use cases
for hooks are:
* Using a `PreSync` hook to perform a database schema migration before deploying a new version of the app.
* Using a `Sync` hook to orchestrate a complex deployment requiring more sophistication than the
kubernetes rolling update strategy (e.g. a blue/green deployment).
@@ -22,9 +22,9 @@ metadata:
argocd.argoproj.io/hook: PreSync
```
During a Sync operation, Argo CD will create the resource during the appropriate stage of the
deployment. Hooks can be any type of Kuberentes resource kind, but tend to be most useful as a Pod,
[Job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/)
During a Sync operation, ArgoCD will create the resource during the appropriate stage of the
deployment. Hooks can be any type of Kuberentes resource kind, but tend to be most useful as
[Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/)
or [Argo Workflows](https://github.com/argoproj/argo). Multiple hooks can be specified as a comma
separated list.
@@ -35,7 +35,7 @@ The following hooks are defined:
|------|-------------|
| `PreSync` | Executes prior to the apply of the manifests. |
| `Sync` | Executes after all `PreSync` hooks completed and were successful. Occurs in conjuction with the apply of the manifests. |
| `Skip` | Indicates to Argo CD to skip the apply of the manifest. This is typically used in conjunction with a `Sync` hook which is presumably handling the deployment in an alternate way (e.g. blue-green deployment) |
| `Skip` | Indicates to ArgoCD to skip the apply of the manifest. This is typically used in conjunction with a `Sync` hook which is presumably handling the deployment in an alternate way (e.g. blue-green deployment) |
| `PostSync` | Executes after all `Sync` hooks completed and were successful, a succcessful apply, and all resources in a `Healthy` state. |
@@ -50,21 +50,12 @@ metadata:
generateName: integration-test-
annotations:
argocd.argoproj.io/hook: PostSync
argocd.argoproj.io/hook-delete-policy: HookSucceeded
argocd.argoproj.io/hook-delete-policy: OnSuccess
```
The following policies define when the hook will be deleted.
| Policy | Description |
|--------|-------------|
| `HookSucceeded` | The hook resource is deleted after the hook succeeded (e.g. Job/Workflow completed successfully). |
| `HookFailed` | The hook resource is deleted after the hook failed. |
As an alternative to hook deletion policies, both Jobs and Argo Workflows support the
[`ttlSecondsAfterFinished`](https://kubernetes.io/docs/concepts/workloads/controllers/ttlafterfinished/)
field in the spec, which let their respective controllers delete the Job/Workflow after it completes.
```yaml
spec:
ttlSecondsAfterFinished: 600
```
| `OnSuccess` | The hook resource is deleted after the hook succeeded (e.g. Job/Workflow completed successfully). |
| `OnFailure` | The hook resource is deleted after the hook failed. |

View File

@@ -1,166 +0,0 @@
# Security
Argo CD has undergone rigourous internal security reviews and penetration testing to satisfy [PCI
compliance](https://www.pcisecuritystandards.org) requirements. The following are some security
topics and implementation details of Argo CD.
## Authentication
Authentication to Argo CD API server is performed exclusively using [JSON Web Tokens](https://jwt.io)
(JWTs). Username/password bearer tokens are not used for authentication. The JWT is obtained/managed
in one of the following ways:
1. For the local `admin` user, a username/password is exchanged for a JWT using the `/api/v1/session`
endpoint. This token is signed & issued by the Argo CD API server itself, and has no expiration.
When the admin password is updated, all existing admin JWT tokens are immediately revoked.
The password is stored as a bcrypt hash in the [`argocd-secret`](../manifests/base/argocd-secret.yaml) Secret.
2. For Single Sign-On users, the user completes an OAuth2 login flow to the configured OIDC identity
provider (either delegated through the bundled Dex provider, or directly to a self-managed OIDC
provider). This JWT is signed & issued by the IDP, and expiration and revokation is handled by
the provider. Dex tokens expire after 24 hours.
3. Automation tokens are generated for a project using the `/api/v1/projects/{project}/roles/{role}/token`
endpoint, and are signed & issued by Argo CD. These tokens are limited in scope and privilege,
and can only be used to manage application resources in the project which it belongs to. Project
JWTs have a configurable expiration and can be immediately revoked by deleting the JWT reference
ID from the project role.
## Authorization
Authorization is performed by iterating the list of group membership in a user's JWT groups claims,
and comparing each group against the roles/rules in the [RBAC](rbac.md) policy. Any matched rule
permits access to the API request.
## TLS
All network communication is performed over TLS including service-to-service communication between
the three components (argocd-server, argocd-repo-server, argocd-application-controller). The Argo CD
API server can enforce the use of TLS 1.2 using the flag: `--tlsminversion 1.2`.
## Sensitive Information
### Secrets
Argo CD never returns sensitive data from its API, and redacts all sensitive data in API payloads
and logs. This includes:
* cluster credentials
* git credentials
* OAuth2 client secrets
* Kubernetes Secret values
### External Cluster Credentials
To manage external clusters, Argo CD stores the credentials of the external cluster as a Kubernetes
Secret in the argocd namespace. This secret contains the K8s API bearer token associated with the
`argocd-manager` ServiceAccount created during `argocd cluster add`, along with connection options
to that API server (TLS configuration/certs, aws-iam-authenticator RoleARN, etc...).
The information is used to reconstruct a REST config and kubeconfig to the cluster used by Argo CD
services.
To rotate the bearer token used by Argo CD, the token can be deleted (e.g. using kubectl) which
causes kuberentes to generate a new secret with a new bearer token. The new token can be re-inputted
to Argo CD by re-running `argocd cluster add`. Run the following commands against the *_managed_*
cluster:
```bash
# run using a kubeconfig for the externally managed cluster
kubectl delete secret argocd-manager-token-XXXXXX -n kube-system
argocd cluster add CONTEXTNAME
```
To revoke Argo CD's access to a managed cluster, delete the RBAC artifacts against the *_managed_*
cluster, and remove the cluster entry from Argo CD:
```bash
# run using a kubeconfig for the externally managed cluster
kubectl delete sa argocd-manager -n kube-system
kubectl delete clusterrole argocd-manager-role
kubectl delete clusterrolebinding argocd-manager-role-binding
argocd cluster rm https://your-kubernetes-cluster-addr
```
> NOTE: for AWS EKS clusters, [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator)
is used to authenticate to the external cluster, which uses IAM roles in lieu of locally stored
tokens, so token rotation is not needed, and revokation is handled through IAM.
## Cluster RBAC
By default, Argo CD uses a [clusteradmin level role](../manifests/cluster-install/application-controller/argocd-application-controller-clusterrole.yaml)
in order to:
1. watch & operate on cluster state
2. deploy resources to the cluster
Although Argo CD requires cluster-wide **_read_** privileges to resources in the managed cluster to
function properly, it does not necessarily need full **_write_** privileges to the cluster. The
ClusterRole used by argocd-server and argocd-application-controller can be modified such
that write privileges are limited to only the namespaces and resources that you wish Argo CD to
manage.
To fine-tune privileges of externally managed clusters, edit the ClusterRole of the `argocd-manager-role`
```bash
# run using a kubeconfig for the externally managed cluster
kubectl edit clusterrole argocd-manager-role
```
To fine-tune privileges which Argo CD has against its own cluster (i.e. https://kubernetes.default.svc),
edit the following cluster roles where Argo CD is running in:
```bash
# run using a kubeconfig to the cluster Argo CD is running in
kubectl edit clusterrole argocd-server
kubectl edit clusterrole argocd-application-controller
```
Note:
* If you to deny ArgoCD access to a kind of resource then add it as an [excluded resource](declarative-setup.md#resource-exclusion).
## Auditing
As a GitOps deployment tool, the git commit history provides a natural audit log of what changes
were made to application configuration, when they were made, and by whom. However, this audit log
only applies to what happened in git and does not necessarily correlate one-to-one with events
that happen in a cluster. For example, User A could have made multiple commits to application
manifests, but User B could have just only synced those changes to the cluster sometime later.
To complement the git revision history, Argo CD emits Kubernetes Events of application activity,
indicating the responsible actor when applicable. For example:
```bash
$ kubectl get events
LAST SEEN FIRST SEEN COUNT NAME KIND SUBOBJECT TYPE REASON SOURCE MESSAGE
1m 1m 1 guestbook.157f7c5edd33aeac Application Normal ResourceCreated argocd-server admin created application
1m 1m 1 guestbook.157f7c5f0f747acf Application Normal ResourceUpdated argocd-application-controller Updated sync status: -> OutOfSync
1m 1m 1 guestbook.157f7c5f0fbebbff Application Normal ResourceUpdated argocd-application-controller Updated health status: -> Missing
1m 1m 1 guestbook.157f7c6069e14f4d Application Normal OperationStarted argocd-server admin initiated sync to HEAD (8a1cb4a02d3538e54907c827352f66f20c3d7b0d)
1m 1m 1 guestbook.157f7c60a55a81a8 Application Normal OperationCompleted argocd-application-controller Sync operation to 8a1cb4a02d3538e54907c827352f66f20c3d7b0d succeeded
1m 1m 1 guestbook.157f7c60af1ccae2 Application Normal ResourceUpdated argocd-application-controller Updated sync status: OutOfSync -> Synced
1m 1m 1 guestbook.157f7c60af5bc4f0 Application Normal ResourceUpdated argocd-application-controller Updated health status: Missing -> Progressing
1m 1m 1 guestbook.157f7c651990e848 Application Normal ResourceUpdated argocd-application-controller Updated health status: Progressing -> Healthy
```
These events can be then be persisted for longer periods of time using other tools as
[Event Exporter](https://github.com/GoogleCloudPlatform/k8s-stackdriver/tree/master/event-exporter) or
[Event Router](https://github.com/heptiolabs/eventrouter).
## WebHook Payloads
Payloads from webhook events are considered untrusted. Argo CD only examines the payload to infer
the involved applications of the webhook event (e.g. which repo was modified), then refreshes
the related application for reconciliation. This refresh is the same refresh which occurs regularly
at three minute intervals, just fast-tracked by the webhook event.
## Reporting Vulnerabilities
Please report security vulnerabilities by e-mailing:
* Jesse_Suen@intuit.com
* Alexander_Matyushentsev@intuit.com
* Edward_Lee@intuit.com

View File

@@ -2,54 +2,42 @@
## Overview
Argo CD does not have any local users other than the built-in `admin` user. All other users are
expected to login via SSO. There are two ways that SSO can be configured:
* Bundled Dex OIDC provider - use this option your current provider does not support OIDC (e.g. SAML,
LDAP) or if you wish to leverage any of Dex's connector features (e.g. the ability to map GitHub
organizations and teams to OIDC groups claims).
* Existing OIDC provider - use this if you already have an OIDC provider which you are using (e.g.
Okta, OneLogin, Auth0, Microsoft), where you manage your users, groups, and memberships.
## Dex
Argo CD embeds and bundles [Dex](https://github.com/coreos/dex) as part of its installation, for the
ArgoCD embeds and bundles [Dex](https://github.com/coreos/dex) as part of its installation, for the
purpose of delegating authentication to an external identity provider. Multiple types of identity
providers are supported (OIDC, SAML, LDAP, GitHub, etc...). SSO configuration of Argo CD requires
editing the `argocd-cm` ConfigMap with
[Dex connector](https://github.com/coreos/dex/tree/master/Documentation/connectors) settings.
providers are supported (OIDC, SAML, LDAP, GitHub, etc...). SSO configuration of ArgoCD requires
editing the `argocd-cm` ConfigMap with
[Dex connector](https://github.com/coreos/dex/tree/master/Documentation/connectors) settings.
This document describes how to configure Argo CD SSO using GitHub (OAuth2) as an example, but the
This document describes how to configure ArgoCD SSO using GitHub (OAuth2) as an example, but the
steps should be similar for other identity providers.
### 1. Register the application in the identity provider
In GitHub, register a new application. The callback address should be the `/api/dex/callback`
endpoint of your Argo CD URL (e.g. https://argocd.example.com/api/dex/callback).
endpoint of your ArgoCD URL (e.g. https://argocd.example.com/api/dex/callback).
![Register OAuth App](assets/register-app.png "Register OAuth App")
After registering the app, you will receive an OAuth2 client ID and secret. These values will be
inputted into the Argo CD configmap.
inputted into the ArgoCD configmap.
![OAuth2 Client Config](assets/oauth2-config.png "OAuth2 Client Config")
### 2. Configure Argo CD for SSO
### 2. Configure ArgoCD for SSO
Edit the argocd-cm configmap:
```
kubectl edit configmap argocd-cm -n argocd
kubectl edit configmap argocd-cm
```
* In the `url` key, input the base URL of Argo CD. In this example, it is https://argocd.example.com
* In the `url` key, input the base URL of ArgoCD. In this example, it is https://argocd.example.com
* In the `dex.config` key, add the `github` connector to the `connectors` sub field. See Dex's
[GitHub connector](https://github.com/coreos/dex/blob/master/Documentation/connectors/github.md)
documentation for explanation of the fields. A minimal config should populate the clientID,
clientSecret generated in Step 1.
* You will very likely want to restrict logins to one or more GitHub organization. In the
* You will very likely want to restrict logins to one ore more GitHub organization. In the
`connectors.config.orgs` list, add one or more GitHub organizations. Any member of the org will
then be able to login to Argo CD to perform management tasks.
then be able to login to ArgoCD to perform management tasks.
```
data:
@@ -77,6 +65,15 @@ data:
clientSecret: $dex.acme.clientSecret
orgs:
- name: your-github-org
# OIDC example (e.g. Okta)
- type: oidc
id: okta
name: Okta
config:
issuer: https://dev-123456.oktapreview.com
clientID: aaaabbbbccccddddeee
clientSecret: $dex.okta.clientSecret
```
After saving, the changes should take affect automatically.
@@ -85,28 +82,6 @@ NOTES:
* Any values which start with '$' will look to a key in argocd-secret of the same name (minus the $),
to obtain the actual value. This allows you to store the `clientSecret` as a kubernetes secret.
* There is no need to set `redirectURI` in the `connectors.config` as shown in the dex documentation.
Argo CD will automatically use the correct `redirectURI` for any OAuth2 connectors, to match the
ArgoCD will automatically use the correct `redirectURI` for any OAuth2 connectors, to match the
correct external callback URL (e.g. https://argocd.example.com/api/dex/callback)
## Existing OIDC provider
To configure Argo CD to delegate authenticate to your existing OIDC provider, add the OAuth2
configuration to the `argocd-cm` ConfigMap under the `oidc.config` key:
```
data:
url: https://argocd.example.com
oidc.config: |
name: Okta
issuer: https://dev-123456.oktapreview.com
clientID: aaaabbbbccccddddeee
clientSecret: $oidc.okta.clientSecret
# Some OIDC providers require a separate clientID for different callback URLs.
# For example, if configuring Argo CD with self-hosted Dex, you will need a separate client ID
# for the 'localhost' (CLI) client to Dex. This field is optional. If omitted, the CLI will
# use the same clientID as the Argo CD server
cliClientID: vvvvwwwwxxxxyyyyzzzz
```

View File

@@ -1,26 +1,25 @@
# Tracking and Deployment Strategies
An Argo CD application spec provides several different ways of track kubernetes resource manifests in
An ArgoCD application spec provides several different ways of track kubernetes resource manifests in
git. This document describes the different techniques and the means of deploying those manifests to
the target environment.
## HEAD / Branch Tracking
## Branch Tracking
If a branch name, or a symbolic reference (like HEAD) is specified, Argo CD will continually compare
live state against the resource manifests defined at the tip of the specified branch or the
dereferenced commit of the symbolic reference.
If a branch name is specified, ArgoCD will continually compare live state against the resource
manifests defined at the tip of the specified branch.
To redeploy an application, a user makes changes to the manifests, and commit/pushes those the
changes to the tracked branch/symbolic reference, which will then be detected by Argo CD controller.
changes to the tracked branch, which will then be detected by ArgoCD controller.
## Tag Tracking
If a tag is specified, the manifests at the specified git tag will be used to perform the sync
If a tag is specified, the manifests at the specified git tag will be used to perform the sync
comparison. This provides some advantages over branch tracking in that a tag is generally considered
more stable, and less frequently updated, with some manual judgement of what constitutes a tag.
To redeploy an application, the user uses git to change the meaning of a tag by retagging it to a
different commit SHA. Argo CD will detect the new meaning of the tag when performing the
different commit SHA. ArgoCD will detect the new meaning of the tag when performing the
comparison/sync.
## Commit Pinning
@@ -34,9 +33,9 @@ which is pinned to a commit, is by updating the tracking revision in the applica
commit containing the new manifests. Note that [parameter overrides](parameters.md) can still be set
on an application which is pinned to a revision.
## Automated Sync
## Auto-Sync [(Not Yet Implemented)]((https://github.com/argoproj/argo-cd/issues/79))
In all tracking strategies, the application has the option to sync automatically. If [auto-sync](auto_sync.md)
In all tracking strategies, the application will have the option to sync automatically. If auto-sync
is configured, the new resources manifests will be applied automatically -- as soon as a difference
is detected between the target state (git) and live state. If auto-sync is disabled, a manual sync
will be needed using the Argo UI, CLI, or API.

View File

@@ -2,30 +2,24 @@
## Overview
Argo CD polls git repositories every three minutes to detect changes to the manifests. To eliminate
this delay from polling, the API server can be configured to receive webhook events. Argo CD supports
ArgoCD will poll git repositories every three minutes for changes to the manifests. To eliminate
this delay from polling, the API server can be configured to receive webhook events. ArgoCD supports
git webhook notifications from GitHub, GitLab, and BitBucket. The following explains how to configure
a git webhook for GitHub, but the same process should be applicable to other providers.
### 1. Create the webhook in the git provider
In your git provider, navigate to the settings page where webhooks can be configured. The payload
URL configured in the git provider should use the `/api/webhook` endpoint of your Argo CD instance
(e.g. https://argocd.example.com/api/webhook). If you wish to use a shared secret, input an
arbitrary value in the secret. This value will be used when configuring the webhook in the next step.
URL configured in the git provider should use the /api/webhook endpoint of your ArgoCD instance
(e.g. https://argocd.example.com/api/webhook). Input an arbitrary value in the secret. The same
value will be used when configuring the webhook in step 2.
![Add Webhook](assets/webhook-config.png "Add Webhook")
### 2. Configure Argo CD with the webhook secret (optional)
### 2. Configure ArgoCD with the webhook secret
Configuring a webhook shared secret is optional, since Argo CD will still refresh applications
related to the git repository, even with unauthenticated webhook events. This is safe to do since
the contents of webhook payloads are considered untrusted, and will only result in a refresh of the
application (a process which already occurs at three-minute intervals). If Argo CD is publicly
accessible, then configuring a webhook secret is recommended to prevent a DDoS attack.
In the `argocd-secret` kubernetes secret, configure one of the following keys with the git
provider's webhook secret configured in step 1.
In the `argocd-secret` kubernetes secret, configure one of the following keys with the git provider
webhook secret configured in step 1.
| Provider | K8s Secret Key |
|---------- | ------------------------ |
@@ -33,16 +27,17 @@ provider's webhook secret configured in step 1.
| GitLab | `gitlab.webhook.secret` |
| BitBucket | `bitbucket.webhook.uuid` |
Edit the Argo CD kubernetes secret:
Edit the ArgoCD kubernetes secret:
```
kubectl edit secret argocd-secret -n argocd
kubectl edit secret argocd-secret
```
TIP: for ease of entering secrets, kubernetes supports inputting secrets in the `stringData` field,
which saves you the trouble of base64 encoding the values and copying it to the `data` field.
Simply copy the shared webhook secret created in step 1, to the corresponding
Simply copy the shared webhook secret created in step 1, to the corresponding
GitHub/GitLab/BitBucket key under the `stringData` field:
```
apiVersion: v1
kind: Secret

26
gometalinter.json Normal file
View File

@@ -0,0 +1,26 @@
{
"Vendor": true,
"DisableAll": true,
"Deadline": "8m",
"Enable": [
"vet",
"gofmt",
"deadcode",
"errcheck",
"varcheck",
"structcheck",
"ineffassign",
"unconvert",
"misspell"
],
"Skip": [
"pkg/client",
"vendor/",
".pb.go"
],
"Exclude": [
"pkg/client",
"vendor/",
".pb.go"
]
}

View File

@@ -1,19 +0,0 @@
#! /bin/sh
set -eu
CHANGED_GO_FILES=""
for file in $(git diff --name-only | grep ".go$" || true); do
if [[ -f ${file} ]] ; then
CHANGED_GO_FILES="${CHANGED_GO_FILES} ${file}"
fi
done
if [[ "${CHANGED_GO_FILES}" != "" ]]; then
echo "Formatting imports"
goimports -w -local github.com/argoproj/argo-cd ${CHANGED_GO_FILES} ;
echo "Formatting code"
gofmt -w ${CHANGED_GO_FILES} ;
else
echo "No changed files to format"
fi

View File

@@ -64,8 +64,8 @@ for i in ${PROTO_FILES}; do
# Path to the google API gateway annotations.proto will be different depending if we are
# building natively (e.g. from workspace) vs. part of a docker build.
if [ -f /.dockerenv ]; then
GOOGLE_PROTO_API_PATH=$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
GOGO_PROTOBUF_PATH=$GOPATH/src/github.com/gogo/protobuf
GOOGLE_PROTO_API_PATH=/root/go/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
GOGO_PROTOBUF_PATH=/root/go/src/github.com/gogo/protobuf
else
GOOGLE_PROTO_API_PATH=${PROJECT_ROOT}/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
GOGO_PROTOBUF_PATH=${PROJECT_ROOT}/vendor/github.com/gogo/protobuf
@@ -87,7 +87,7 @@ done
collect_swagger() {
SWAGGER_ROOT="$1"
EXPECTED_COLLISIONS="$2"
SWAGGER_OUT="${PROJECT_ROOT}/assets/swagger.json"
SWAGGER_OUT="${SWAGGER_ROOT}/swagger.json"
PRIMARY_SWAGGER=`mktemp`
COMBINED_SWAGGER=`mktemp`
@@ -117,7 +117,6 @@ clean_swagger() {
/usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete
}
collect_swagger server 24
collect_swagger server 15
clean_swagger server
clean_swagger reposerver
clean_swagger controller

View File

@@ -1,7 +0,0 @@
#!/bin/sh
# This script is used as the commaned supplied to GIT_ASKPASS as a way to supply username/password
# credentials to git, without having to use git credentials helpers, or having on-disk config.
case "$1" in
Username*) echo "${GIT_USERNAME}" ;;
Password*) echo "${GIT_PASSWORD}" ;;
esac

View File

@@ -0,0 +1,168 @@
package main
import (
"context"
"fmt"
"hash/fnv"
"log"
"os"
"strings"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/git"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
// origRepoURLToSecretName hashes repo URL to the secret name using a formula.
// Part of the original repo name is incorporated for debugging purposes
func origRepoURLToSecretName(repo string) string {
repo = git.NormalizeGitURL(repo)
h := fnv.New32a()
_, _ = h.Write([]byte(repo))
parts := strings.Split(strings.TrimSuffix(repo, ".git"), "/")
return fmt.Sprintf("repo-%s-%v", strings.ToLower(parts[len(parts)-1]), h.Sum32())
}
// repoURLToSecretName hashes repo URL to the secret name using a formula.
// Part of the original repo name is incorporated for debugging purposes
func repoURLToSecretName(repo string) string {
repo = strings.ToLower(git.NormalizeGitURL(repo))
h := fnv.New32a()
_, _ = h.Write([]byte(repo))
parts := strings.Split(strings.TrimSuffix(repo, ".git"), "/")
return fmt.Sprintf("repo-%s-%v", parts[len(parts)-1], h.Sum32())
}
// RenameSecret renames a Kubernetes secret in a given namespace.
func renameSecret(namespace, oldName, newName string) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := clientcmd.ConfigOverrides{}
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &overrides)
log.Printf("Renaming secret %q to %q in namespace %q\n", oldName, newName, namespace)
config, err := clientConfig.ClientConfig()
if err != nil {
log.Println("Could not retrieve client config: ", err)
return
}
kubeclientset := kubernetes.NewForConfigOrDie(config)
repoSecret, err := kubeclientset.CoreV1().Secrets(namespace).Get(oldName, metav1.GetOptions{})
if err != nil {
log.Println("Could not retrieve old secret: ", err)
return
}
repoSecret.ObjectMeta.Name = newName
repoSecret.ObjectMeta.ResourceVersion = ""
repoSecret, err = kubeclientset.CoreV1().Secrets(namespace).Create(repoSecret)
if err != nil {
log.Println("Could not create new secret: ", err)
return
}
err = kubeclientset.CoreV1().Secrets(namespace).Delete(oldName, &metav1.DeleteOptions{})
if err != nil {
log.Println("Could not remove old secret: ", err)
}
}
// RenameRepositorySecrets ensures that repository secrets use the new naming format.
func renameRepositorySecrets(clientOpts argocdclient.ClientOptions, namespace string) {
conn, repoIf := argocdclient.NewClientOrDie(&clientOpts).NewRepoClientOrDie()
defer util.Close(conn)
repos, err := repoIf.List(context.Background(), &repository.RepoQuery{})
if err != nil {
log.Println("An error occurred, so skipping secret renaming: ", err)
return
}
log.Println("Renaming repository secrets...")
for _, repo := range repos.Items {
oldSecretName := origRepoURLToSecretName(repo.Repo)
newSecretName := repoURLToSecretName(repo.Repo)
if oldSecretName != newSecretName {
log.Printf("Repo %q had its secret name change, so updating\n", repo.Repo)
renameSecret(namespace, oldSecretName, newSecretName)
}
}
}
/*
// PopulateAppDestinations ensures that apps have a Server and Namespace set explicitly.
func populateAppDestinations(clientOpts argocdclient.ClientOptions) {
conn, appIf := argocdclient.NewClientOrDie(&clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
apps, err := appIf.List(context.Background(), &application.ApplicationQuery{})
if err != nil {
log.Println("An error occurred, so skipping destination population: ", err)
return
}
log.Println("Populating app Destination fields")
for _, app := range apps.Items {
changed := false
log.Printf("Ensuring destination field is populated on app %q\n", app.ObjectMeta.Name)
if app.Spec.Destination.Server == "" {
if app.Status.ComparisonResult.Status == appv1.ComparisonStatusUnknown || app.Status.ComparisonResult.Status == appv1.ComparisonStatusError {
log.Printf("App %q was missing Destination.Server, but could not fill it in: %s", app.ObjectMeta.Name, app.Status.ComparisonResult.Status)
} else {
log.Printf("App %q was missing Destination.Server, so setting to %q\n", app.ObjectMeta.Name, app.Status.ComparisonResult.Server)
app.Spec.Destination.Server = app.Status.ComparisonResult.Server
changed = true
}
}
if app.Spec.Destination.Namespace == "" {
if app.Status.ComparisonResult.Status == appv1.ComparisonStatusUnknown || app.Status.ComparisonResult.Status == appv1.ComparisonStatusError {
log.Printf("App %q was missing Destination.Namespace, but could not fill it in: %s", app.ObjectMeta.Name, app.Status.ComparisonResult.Status)
} else {
log.Printf("App %q was missing Destination.Namespace, so setting to %q\n", app.ObjectMeta.Name, app.Status.ComparisonResult.Namespace)
app.Spec.Destination.Namespace = app.Status.ComparisonResult.Namespace
changed = true
}
}
if changed {
_, err = appIf.UpdateSpec(context.Background(), &application.ApplicationSpecRequest{
AppName: app.Name,
Spec: &app.Spec,
})
if err != nil {
log.Println("An error occurred (but continuing anyway): ", err)
}
}
}
}
*/
func main() {
if len(os.Args) < 3 {
log.Fatalf("USAGE: %s SERVER NAMESPACE\n", os.Args[0])
}
server, namespace := os.Args[1], os.Args[2]
log.Printf("Using argocd server %q and namespace %q\n", server, namespace)
isLocalhost := false
switch {
case strings.HasPrefix(server, "localhost:"):
isLocalhost = true
case strings.HasPrefix(server, "127.0.0.1:"):
isLocalhost = true
}
clientOpts := argocdclient.ClientOptions{
ServerAddr: server,
Insecure: true,
PlainText: isLocalhost,
}
renameRepositorySecrets(clientOpts, namespace)
//populateAppDestinations(clientOpts)
}

View File

@@ -1,8 +0,0 @@
# This file was automatically generated. DO NOT EDIT
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H

View File

@@ -1,25 +1,11 @@
#!/bin/sh -x -e
#!/bin/sh
SRCROOT="$( CDPATH='' cd -- "$(dirname "$0")/.." && pwd -P )"
AUTOGENMSG="# This is an auto-generated file. DO NOT EDIT"
IMAGE_NAMESPACE=${IMAGE_NAMESPACE:='argoproj'}
IMAGE_TAG=${IMAGE_TAG:='latest'}
cd ${SRCROOT}/manifests/ha/base/redis-ha && ./generate.sh
IMAGE_NAMESPACE="${IMAGE_NAMESPACE:-argoproj}"
IMAGE_TAG="${IMAGE_TAG:-latest}"
cd ${SRCROOT}/manifests/base && kustomize edit set image argoproj/argocd=${IMAGE_NAMESPACE}/argocd:${IMAGE_TAG} argoproj/argocd-ui=${IMAGE_NAMESPACE}/argocd-ui:${IMAGE_TAG}
cd ${SRCROOT}/manifests/ha/base && kustomize edit set image argoproj/argocd=${IMAGE_NAMESPACE}/argocd:${IMAGE_TAG} argoproj/argocd-ui=${IMAGE_NAMESPACE}/argocd-ui:${IMAGE_TAG}
echo "${AUTOGENMSG}" > "${SRCROOT}/manifests/install.yaml"
kustomize build "${SRCROOT}/manifests/cluster-install" >> "${SRCROOT}/manifests/install.yaml"
echo "${AUTOGENMSG}" > "${SRCROOT}/manifests/namespace-install.yaml"
kustomize build "${SRCROOT}/manifests/namespace-install" >> "${SRCROOT}/manifests/namespace-install.yaml"
echo "${AUTOGENMSG}" > "${SRCROOT}/manifests/ha/install.yaml"
kustomize build "${SRCROOT}/manifests/ha/cluster-install" >> "${SRCROOT}/manifests/ha/install.yaml"
echo "${AUTOGENMSG}" > "${SRCROOT}/manifests/ha/namespace-install.yaml"
kustomize build "${SRCROOT}/manifests/ha/namespace-install" >> "${SRCROOT}/manifests/ha/namespace-install.yaml"
for i in "$(ls manifests/components/*.yaml)"; do
sed -i '' 's@\( image: \(.*\)/\(argocd-.*\):.*\)@ image: '"${IMAGE_NAMESPACE}"'/\3:'"${IMAGE_TAG}"'@g' $i
done
echo "# This is an auto-generated file. DO NOT EDIT" > manifests/install.yaml
cat manifests/components/*.yaml >> manifests/install.yaml

View File

@@ -1,24 +0,0 @@
#!/bin/bash
set -e
KNOWN_HOSTS_FILE=$(dirname "$0")/ssh_known_hosts
HEADER="# This file was automatically generated. DO NOT EDIT"
echo "$HEADER" > $KNOWN_HOSTS_FILE
ssh-keyscan github.com gitlab.com bitbucket.org ssh.dev.azure.com vs-ssh.visualstudio.com | sort -u >> $KNOWN_HOSTS_FILE
chmod 0644 $KNOWN_HOSTS_FILE
# Public SSH keys can be verified at the following URLs:
# - github.com: https://help.github.com/articles/github-s-ssh-key-fingerprints/
# - gitlab.com: https://docs.gitlab.com/ee/user/gitlab_com/#ssh-host-keys-fingerprints
# - bitbucket.org: https://confluence.atlassian.com/bitbucket/ssh-keys-935365775.html
# - ssh.dev.azure.com, vs-ssh.visualstudio.com: https://docs.microsoft.com/en-us/azure/devops/repos/git/use-ssh-keys-to-authenticate?view=azure-devops
diff - <(ssh-keygen -l -f $KNOWN_HOSTS_FILE | sort -k 3) <<EOF
2048 SHA256:zzXQOXSRBEiUtuE8AikJYKwbHaxvSc0ojez9YXaGp1A bitbucket.org (RSA)
2048 SHA256:nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8 github.com (RSA)
256 SHA256:HbW3g8zUjNSksFbqTiUWPWg2Bq1x8xdGUrliXFzSnUw gitlab.com (ECDSA)
256 SHA256:eUXGGm1YGsMAS7vkcx6JOJdOGHPem5gQp4taiCfCLB8 gitlab.com (ED25519)
2048 SHA256:ROQFvPThGrW4RuWLoL9tq9I9zJ42fK4XywyRtbOz/EQ gitlab.com (RSA)
2048 SHA256:ohD8VZEXGWo6Ez8GSEJQ9WpafgLFsOfLOtGGQCQo6Og ssh.dev.azure.com (RSA)
2048 SHA256:ohD8VZEXGWo6Ez8GSEJQ9WpafgLFsOfLOtGGQCQo6Og vs-ssh.visualstudio.com (RSA)
EOF

View File

@@ -1,16 +0,0 @@
# Argo CD Installation Manifests
Two sets of installation manifests are provided:
* [install.yaml](install.yaml) - Standard Argo CD installation with cluster-admin access. Use this
manifest set if you plan to use Argo CD to deploy applications in the same cluster that Argo CD runs
in (i.e. kubernetes.svc.default). Will still be able to deploy to external clusters with inputted
credentials.
* [namespace-install.yaml](namespace-install.yaml) - Installation of Argo CD which requires only
namespace level privileges (does not need cluster roles). Use this manifest set if you do not
need Argo CD to deploy applications in the same cluster that Argo CD runs in, and will rely solely
on inputted cluster credentials. An example of using this set of manifests is if you run several
Argo CD instances for different teams, where each instance will bedeploying applications to
external clusters. Will still be possible to deploy to the same cluster (kubernetes.svc.default)
with inputted credentials (i.e. `argocd cluster add <CONTEXT> --in-cluster`).

View File

@@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: argocd-application-controller
app.kubernetes.io/part-of: argocd
app.kubernetes.io/component: application-controller
name: argocd-application-controller
spec:
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: argocd-application-controller
template:
metadata:
labels:
app.kubernetes.io/name: argocd-application-controller
spec:
containers:
- command:
- argocd-application-controller
- --status-processors
- "20"
- --operation-processors
- "10"
image: argoproj/argocd:latest
imagePullPolicy: Always
name: argocd-application-controller
ports:
- containerPort: 8082
readinessProbe:
tcpSocket:
port: 8082
initialDelaySeconds: 5
periodSeconds: 10
serviceAccountName: argocd-application-controller

View File

@@ -1,15 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: argocd-application-controller
app.kubernetes.io/part-of: argocd
app.kubernetes.io/component: application-controller
name: argocd-application-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argocd-application-controller
subjects:
- kind: ServiceAccount
name: argocd-application-controller

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: argocd-application-controller
app.kubernetes.io/part-of: argocd
app.kubernetes.io/component: application-controller
name: argocd-application-controller

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: argocd-metrics
app.kubernetes.io/part-of: argocd
app.kubernetes.io/component: metrics
name: argocd-metrics
spec:
ports:
- name: metrics
protocol: TCP
port: 8082
targetPort: 8082
selector:
app.kubernetes.io/name: argocd-application-controller

View File

@@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- argocd-application-controller-sa.yaml
- argocd-application-controller-role.yaml
- argocd-application-controller-rolebinding.yaml
- argocd-application-controller-deployment.yaml
- argocd-metrics.yaml

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
labels:
app.kubernetes.io/name: argocd-rbac-cm
app.kubernetes.io/part-of: argocd

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
labels:
app.kubernetes.io/name: argocd-secret
app.kubernetes.io/part-of: argocd
type: Opaque

Some files were not shown because too many files have changed in this diff Show More