Compare commits

..

112 Commits

Author SHA1 Message Date
Jesse Suen
21b64886ce Bump version to v0.5.4 2018-06-27 15:56:38 -07:00
Jesse Suen
eeb1036dcf Refresh flag to sync should be optional, not required 2018-06-27 15:54:35 -07:00
Jesse Suen
fa990ab0c9 Support cluster management using the internal k8s API address https://kubernetes.default.svc (#307) 2018-06-20 16:51:31 -07:00
Jesse Suen
933f3da538 Support diffing a local ksonnet app to the live application state (resolves #239) (#298) 2018-06-20 13:57:55 -07:00
Jesse Suen
a7fa2fd256 Add ability to show last operation result in app get. Show path in app list -o wide (#297) 2018-06-19 02:04:45 -07:00
Jesse Suen
0de1a3b20a Update dependencies: ksonnet v0.11, golang v1.10, debian v9.4 (#296) 2018-06-18 14:34:10 -07:00
Jesse Suen
bcc114ec60 Add ability to force a refresh of an app during get (resolves #269) (#293) 2018-06-18 10:22:58 -07:00
Jesse Suen
1148fae419 Add clean-debug make target to prevent packr from boxing debug artifacts into binaries 2018-06-15 14:31:26 -07:00
Jesse Suen
d7188c29f8 Remove redundant 'argocd' namespace from manifests 2018-06-15 14:20:28 -07:00
Jesse Suen
4b97732659 Automatically restart API server upon certificate changes (#292) 2018-06-15 14:16:50 -07:00
Jesse Suen
8ff98cc6e1 Add RBAC unit test for wildcards with sub-resources 2018-06-14 12:50:16 -07:00
Jesse Suen
cf0c324a74 Add unit test for using resource & action wildcards in a RBAC policy. Bump version to v0.5.2 2018-06-14 12:41:26 -07:00
Jesse Suen
69119a21cd Update getting_started.md to point to v0.5.1 2018-06-14 11:13:05 -07:00
Alexander Matyushentsev
16fa41d25b Issue #275 - Application controller fails to get app state if app has resource without name (#285) 2018-06-14 09:08:22 -07:00
Alexander Matyushentsev
4e170c2033 Update version to v0.5.1 2018-06-13 14:30:35 -07:00
Alexander Matyushentsev
3fbbe940a1 Issue #283 - API server incorrectly compose application fully qualified name for RBAC check (#284) 2018-06-13 13:05:39 -07:00
Alexander Matyushentsev
271b57e5c5 Issue #260 - Rate limiter is preventing force refreshes (e.g. webhook) from functioning (#282) 2018-06-13 11:34:33 -07:00
Andrew Merenbach
df0e2e4015 Fail app sync if prune flag is required (#276)
* Add status field to resource details

* Update generated code

* Set up const message responses

* Check number of resources requiring pruning

* Fix imports

* Use string, thanks @alexmt

* Update generated code
2018-06-12 10:54:11 -07:00
Alexander Matyushentsev
9fa622d63b Issue #280 - It is impossible to restrict application access by repository URL (#281)
* Issue #280 - It is impossible to restrict application access by repository URL

* Apply reviewer note
2018-06-12 10:43:16 -07:00
Alexander Matyushentsev
fed2149174 Add progressing deadline to test app to fix e2e tets slowness 2018-06-12 08:54:47 -07:00
Alexander Matyushentsev
aa4291183b Take into account number of unavailable replicas to decided if deployment is healthy or not (#270)
* Take into account number of unavailable replicas to decided if deployment is healthy or not

* Run one controller for all e2e tests to reduce tests duration

* Apply reviewer notes: use logic from kubectl/rollout_status.go to check deployment health
2018-06-07 11:05:46 -07:00
Alexander Matyushentsev
0d3fc9648f Issue #271 - perform three way diff only if resource has expected state and live state with last-applied-configuration annotation (#274) 2018-06-07 10:29:36 -07:00
Jesse Suen
339138b576 Remove hard requirement of initializing OIDC app during server startup (resolves #272) 2018-06-07 02:07:53 -07:00
Jesse Suen
666769f9d9 Fix issue preventing proper parsing of claims subject in RBAC enforcement 2018-06-07 00:17:00 -07:00
Jesse Suen
8fc594bd2b Add missing list, patch, update verbs to application-controller-role 2018-06-06 17:51:18 -07:00
Andrew Merenbach
8cf8ad7e24 Tweak flags for import/export, thanks @jessesuen (#268) 2018-06-06 16:32:30 -07:00
Andrew Merenbach
0818f698e6 Support resource import/export (#255)
* Add initial prototype for export

* Add client opts to argocd-util

* Make flags local

* Support output to file without piping

* Add comment to NewExportCommand

* Vastly clean up output, thanks @alexmt @jessesuen

* Nullify operation, thanks @alexmt

* Add additional error check

* Rm extraneous fmt.Sprint

* Clone export command to import

* Flesh out import feature

* Use const string for YAML separator

* Don't export enclosing lists

* Almost finished prototyping import

* Create settings now, too

* Create all resources now

* Nullify certificate before export

* Add JSON annotations, update comment

* Warn, don't fail, if cluster/repo already exist, thanks @alexmt

* Use minus instead of stdin/stdout, thanks @jessesuen
2018-06-06 14:53:14 -07:00
Jesse Suen
44a33b0a5f Repo names containing underscores were not being accepted (resolves #258) 2018-06-06 14:26:43 -07:00
wanghong230
85078bdb66 fix #120 refactor the rbac code to support customizable claims enforcement function (#265) 2018-06-06 14:20:34 -07:00
Jesse Suen
30a3dba7ad argocd-server needs to be built using packr to bundle RBAC policy files. Update packr (resolves #266) 2018-06-06 12:35:25 -07:00
Jesse Suen
0afc671723 Retry argocd app wait connection errors from EOF watch. Show detailed state changes 2018-06-06 11:24:49 -07:00
Jesse Suen
12e7447e9f Implement RBAC support (issue #120) (#263)
* introduce rbac library around casbin
* supports claims enforcement by iteration through user's groups
* supports filtering of resources by level of access
* policy loader and automatic updates from configmap
* support for builtin and userdefined policies
2018-06-05 21:44:13 -07:00
Alexander Matyushentsev
b675e79b89 Add path to API /application/{repo}/ksonnet response (#264)
* Add path to API /application/{repo}/ksonnet response

* Fix indentation
2018-06-05 14:37:26 -07:00
Jesse Suen
febdccfb58 Introduce argocd app manifests for printing the application manifests from git or live (#261) 2018-06-05 12:59:29 -07:00
Alexander Matyushentsev
54835a0d93 Implement workaround for https://github.com/golang/go/issues/21955 (#256) 2018-06-04 13:52:07 -07:00
Jesse Suen
423fe3487c REST payload of create/update for repos and cluster should be actual object 2018-05-31 18:15:33 -07:00
Jesse Suen
98cb3f7950 Bump version to v0.5.0 2018-05-31 17:56:23 -07:00
Jesse Suen
371492bf5c Handle case where upsert could be nil. Use proper error codes. More RESTful endpoints 2018-05-31 17:54:27 -07:00
Jesse Suen
7df831e96d Clean up .proto definitions for consistency and reduction of pointer usage (#253) 2018-05-31 17:21:09 -07:00
Alexander Matyushentsev
f0be1bd251 Fix bug secret controller which is causing update loop in secret controller (#251) 2018-05-31 16:06:41 -07:00
Alexander Matyushentsev
948341a885 ListDir should not fail if Redis is down (#252) 2018-05-31 16:06:30 -07:00
Alexander Matyushentsev
1b2bf8ce0e GET /cluster/<clustername> API should not panic if invalid cluster url is provided (#250) 2018-05-31 15:13:07 -07:00
Andrew Merenbach
4f68a0f634 Wrap method signatures (#249)
* Update application create to use upsert attribute

* Update CLI interface

* Use pointer to upsert

* Rename DeleteApplicationRequest for parity

* Add new ApplicationUpdateRequest wrapper

* Rename RepoUpdateRequest => RepoRESTUpdateRequest

* Add new RepositoryUpdateRequest

* Rename ClusterUpdateRequest -> ClusterRESTUpdateRequest

* Fix var names

* Update var use

* Use intermediate vars for clarity

* Update generated code

* Update mocks

* Update e2e cluster creation
2018-05-31 14:21:08 -07:00
Alexander Matyushentsev
e785abeb8f Issue #244 - Cluster/Repository connection status (#248) 2018-05-31 13:44:19 -07:00
Jesse Suen
3acca5095e Add argocd app unset command to unset parameter overrides. Bump version to v0.4.5 2018-05-31 02:55:35 -07:00
Jesse Suen
5a62286127 Cookie token was not parsed properly when mixed with other site cookies 2018-05-31 02:37:15 -07:00
Jesse Suen
5452aff0be Add ability to show parameters and overrides in CLI (resolves #240) (#247) 2018-05-30 15:41:11 -07:00
Andrew Merenbach
0f4f1262af Add Events API endpoint (#237)
* Flesh out initial endpoint

* Update generated code

* Update prototype for list of events

* Update endpoints

* Update initialization of app service

* Use proper interfaces here

* Use event list

* Use preexisting events list struct

* Simplify initial architecture significantly

* Rename ListDirResponse => FileList, thanks @jessesuen

* Rm unneeded error check

* Narrow down event query, thanks @alexmt

* Use tests to fix bug

* Don't reinvent the wheel

* Rm comment

* Add Uid field, thanks @alexmt @jessesuen

* Update generated files

* Support external clusters, thanks @alexmt

* Filter by proper namespace
2018-05-30 15:30:58 -07:00
Jesse Suen
4e7f68ccba Update version to 0.4.4 2018-05-30 14:03:16 -07:00
Alexander Matyushentsev
96c05babe0 Issue #238 - add upsert flag to 'argocd app create' command (#245) 2018-05-30 13:49:20 -07:00
Andrew Merenbach
6b78cddb19 Add repo browsing endpoint (#229)
* Add skeleton ListDir endpoint

* Update proto with path field

* Add first working file retrieval

* Update git client to support paths

* Update proto file

* Flesh out prototype code for retrieving files

* Create repo server with repoclientset

* Rm unneeded test code

* Update generated code

* Use HTTP queries instead of URL components

* Error out properly

* Add missing fixture test

* Rm commented endpoint, thanks @alexmt

* Skip invalid app specs
2018-05-24 19:09:52 -07:00
Alexander Matyushentsev
12596ff936 Issue #233 - Controller does not persist rollback operation result (#234) 2018-05-24 10:50:33 -07:00
Jesse Suen
a240f1b2b9 Bump version to 0.5.0 2018-05-23 11:18:50 -07:00
Jesse Suen
f6da19672e Support subscribing to settings updates and auto-restart of dex and API server (resolves #174) (#227) 2018-05-23 10:01:07 -07:00
Jesse Suen
e81d30be9b Update getting_started.md to point to v0.4.3 2018-05-22 15:02:51 -07:00
Alexander Matyushentsev
13b090e3bd Issue #147 - App sync frequently fails due to concurrent app modification (#226) 2018-05-22 09:43:17 -07:00
Alexander Matyushentsev
d0479e6ddc Issue # 223 - Remove app finalizers during e2e fixture teardown (#225) 2018-05-22 09:23:42 -07:00
Andrew Merenbach
1432827006 Add error fields to cluster/repo, shell output (#200)
* Add error fields to cluster/repo, shell output

* Add missing format strings, thanks @alexmt

* Rename Error => Message

* Set JSON keys, thanks @jessesuen

* Update generated code
2018-05-22 08:48:24 -07:00
Jesse Suen
89bf4eac71 Bump version to 0.4.3 2018-05-21 15:27:01 -07:00
Jesse Suen
07aac0bdae Move local branch deletion as part of git Reset() (resolves #185) (#222) 2018-05-21 15:21:09 -07:00
Andrew Merenbach
61220b8d0d Fix exit code for app wait (#219) 2018-05-21 10:17:10 -07:00
Jesse Suen
4e470aaf09 Remove context name prompt during login. (#218)
* Show URL in argocd app get
* Rename force flag to cascade in argocd app delete
* Remove interactive context name prompt during login which broke login automation
* Rename apiclient.ServerClient to Client
2018-05-21 01:10:02 -07:00
Jesse Suen
76922b620b Update version to 0.4.2 2018-05-21 01:05:47 -07:00
Andrew Merenbach
ac0f623eda Add argocd app wait command (#216)
* Update CLI, server for wait request

* Update generated code

* Remove generated code

* Add timeout function, and use it

* Get first working prototype

* Properly fail and print success/fail messages

* Add missing reference pointer

* Remove unreachable code

* Show current state of all checks

* Print atypical health output status now

* Update short command description, thanks @jessesuen

* Use server-side watch command

* Use watch API

* Clean up wait function to use new API better

* Rm unused const, satisfy linter on caps names

* Rename channel and set direction

* Add infinite timeout by default
2018-05-18 11:50:01 -07:00
Jesse Suen
afd5450882 Bump version to v0.4.1 2018-05-17 18:31:46 -07:00
Jesse Suen
c17266fc21 Add documentation on how to configure SSO and Webhooks 2018-05-17 18:28:04 -07:00
Andrew Merenbach
f62c825495 Manifest endpoint (#207)
* Add manifests endpoint

* Draft app.go changes

* Fix some issues with imports, symbols, args

* Reduce duplication between components

* Revert "Reduce duplication between components"

This reverts commit 87b166885d53778683bc0a0a826671c2c67dc082.

* Add ManifestQuery type, thanks @jessesuen

* Add required/optional flags to protobuf

* Update generated code

* Add missing pointer dereferences

* Default to app target revision, thanks @jessesuen

* Account for nil
2018-05-17 16:33:04 -07:00
Jesse Suen
45f44dd4be Add v0.4.0 changelog 2018-05-17 03:10:41 -07:00
Jesse Suen
9c0daebfe0 Fix diff falsely reporting OutOfSync due to namespace/annotation defaulting 2018-05-17 00:41:50 -07:00
Jesse Suen
f2a0ca5609 Add intelligence in diff libray to perform three-way diff from last-applied-configuration annotation (resolves #199) 2018-05-16 16:47:30 -07:00
Alexander Matyushentsev
e04d315853 Issue #118 - app delete should be done through controller using finalizers (#206)
* Issue #118 - app delete should be done through controller using finalizers

* Apply reviewer notes: introduce application

* Apply reviewer notes: fix app deletion
2018-05-16 16:30:28 -07:00
Jesse Suen
daec697658 Update ksonnet to v0.10.2 (resolves #208) 2018-05-15 23:25:38 -07:00
Alexander Matyushentsev
7ad5670710 Make sure api server started during fixture setup (#209) 2018-05-15 22:49:55 -07:00
Alexander Matyushentsev
8036423373 Implement App management and repo management e2e tests (#205)
* Implement App management and repo management e2e tests

* Apply reviewer notes; fix compilation error
2018-05-15 12:42:44 -07:00
Alexander Matyushentsev
8039228a9d Add last update time to operation status, fix operation status patching (#204)
* Add last update time to operation status, fix operation status patching

* Rename lastUpdateTime to startAt and finishedAt
2018-05-15 11:35:10 -07:00
Andrew Merenbach
b1103af429 Rename recent deployments to history (#201)
* Rename RecentDeployments to History

* Update generated code
2018-05-15 11:05:46 -07:00
Jesse Suen
d67ad5acfd Add connect timeouts when interacting with SSH git repos (resolves #131) (#203) 2018-05-15 10:30:31 -07:00
Jesse Suen
c9df9c17b7 Default Spec.Source.TargetRevision to HEAD server-side if unspecified (issue #190) 2018-05-15 03:21:21 -07:00
Jesse Suen
8fa46b02b0 Remove SyncPolicy (issue #190) 2018-05-15 03:05:34 -07:00
Jesse Suen
92c481330d App creation was not defaulting to server and namespace defined in app.yaml 2018-05-15 01:35:43 -07:00
Jesse Suen
2664db3e40 Refactor application controller sync/apply loop (#202)
* Refactor application controller sync/apply loop
* always run kubectl apply --dry-run before the actual apply
* remove incorrect logic skip apply if comparator reported Synced
* rename status to phase
* distinguish failures from errors
* consolidate fields between OperationState and SyncOperationResult
* Disable migration code which referenced removed fields
2018-05-15 00:36:11 -07:00
Andrew Merenbach
6b554e5f4e Add 0.3.0 to 0.4.0 migration utility (#186)
* Add temp migrate utility

* Fix errors, separate out migrate for now

* Update script with suggestions from @jessesuen

* Check for localhost, server address now; print with log, not fmt

* Add more log lines, standardize output, fix args

* Improve feedback, thanks @alexmt

* Rename migration script, thanks @jessesuen

* Don't run UpdateSpec unless a change has occurred

* Move migrate => hack/migrate, thanks @jessesuen
2018-05-14 14:35:01 -07:00
Alexander Matyushentsev
2bc0dff135 Issue #146 - Render health status information in 'app list' and 'app get' commands (#198) 2018-05-14 13:21:06 -07:00
Jesse Suen
c61795f71a Add 'database' library for CRUD operations against repos and clusters. Redact sensitive information (#196) 2018-05-14 11:36:08 -07:00
Jesse Suen
a8a7491bf0 Handle potential panic when argo install settings run against an empty configmap 2018-05-11 18:47:45 -07:00
Alexander Matyushentsev
d1c7c4fcaf Issue #187 - implement argo settings install command (#193) 2018-05-11 11:50:53 -07:00
Alexander Matyushentsev
3dbbcf8918 Move sync logic to contoller (#180)
* Issue #119 - Move sync logic to contoller

* Implement app compare/sycn e2e test

* Fix panic in kube ApplyResource method

* Apply reviewer notes: add separate rollback operation instead of reusing sync for sync and rollback
2018-05-11 11:50:32 -07:00
Jesse Suen
0cfd1ad05f Update feature list with SSO and Webhook integration 2018-05-10 17:51:51 -07:00
Jesse Suen
bfa4e233b7 cli will look to spec.destination.server and namespace when displaying apps 2018-05-10 17:39:13 -07:00
Jesse Suen
dc662da3d6 Support OAuth2 login flow from CLI (resolves #172) (#181)
* Support OAuth2 login flow from CLI (resolves #172)
* Refactor SessionManager to handle local and OAuth2 logins.
* argo login will request permanent credentials after OAuth2 flow
* Implement proper OIDC app state nonce. Add explicit `--sso` flag to `argo login`
2018-05-10 15:43:58 -07:00
Jesse Suen
4107d2422b Fix linting errors 2018-05-08 16:42:12 -07:00
Andrew Merenbach
b83eac5dc2 Make ApplicationSpec.Destination non-optional, non-pointer (#177)
* Make Destination a non-pointer field

* Rm nil checks for destination; update initialization

* Update codegen

* Rm ResolveServerNamespace function, thanks @jessesuen
2018-05-08 14:09:33 -07:00
Jesse Suen
bb51837c56 Do not delete namespace or CRD during uninstall unless explicitly stated (resolves #167) (#173) 2018-05-08 12:56:59 -07:00
Jesse Suen
5bbb4fe1a1 Cache kubernetes API resource discovery (resolves #170) (#176) 2018-05-08 12:56:15 -07:00
Andrew Merenbach
b5c20e9b46 Trim spaces server-side in GitHub usernames (#171) 2018-05-07 14:56:48 -07:00
Andrew Merenbach
1e1ab636e0 Don't fail when new app has same spec as old (#168) 2018-05-07 13:20:38 -07:00
Jesse Suen
7348553897 Improve CI build stability 2018-05-07 12:44:14 -07:00
Jesse Suen
5f65a5128a Introduce caching layer to repo server to improve query response times (#165) 2018-05-07 11:31:00 -07:00
Alexander Matyushentsev
d9c12e7271 Issue #146 - ArgoCD applications should have a rolled up health status (#164)
* Issue #146 - ArgoCD applications should have a rolled up health status

* Apply reviewer notes: rename healthState to health; rename HealthState to HealthStatus
2018-05-07 08:38:25 -07:00
Jesse Suen
fb2d6b4aff Refactor repo server and git client (#163)
* added a general purpose GetFile RPC and remove GetKsonnetApp RPC
* reposerver did not talk to kubernetes -- removed all k8s imports
* git client refactored to simply method signatures
2018-05-06 20:51:17 -07:00
Andrew Merenbach
3f4ec0ab22 Expand Git repo URL normalization (#162)
* Expand unit tests for Git functions

* Update tests for IsSSHUrl

* Add TODO

* Add EnsureSuffix tests

* Add EnsureSuffix function

* Lowercase repo name in secret name

* Expand normalization and related tests

* Add tests for EnsurePrefix

* Rm redundant strings.ToLower

* Update repository names to fix broken tests

* Expand tests some more to include missing .git suffix, thanks @jessesuen

* Add additional repository tests, thanks @jessesuen

* Fix typo in comment
2018-05-04 17:31:04 -07:00
Jesse Suen
ac938fe8a3 Add GitHub webhook handling to fast-track controller application reprocessing (#160)
* Add GitHub webhook handling to fast-track application controller reprocessing
* Add GitLab and Bitbucket webhook support. Add unit tests.
2018-05-04 17:01:57 -07:00
Alexander Matyushentsev
dc1e8796fb Disable authentication for settings service 2018-05-04 16:13:22 -07:00
Alexander Matyushentsev
8c5d59c60c Issue #157 - If argocd token is expired server should return 401 instead of 500 (#158) 2018-05-04 09:48:46 -07:00
Jesse Suen
13558b7ce8 Revert change to redact credentials since logic is reused by controller 2018-05-03 16:42:21 -07:00
Alexander Matyushentsev
3b2b3dacf5 Update version 2018-05-03 15:58:09 -07:00
Alexander Matyushentsev
1b2f89995c Issue #155 - Application update failes due to concurrent access (#156) 2018-05-03 15:55:01 -07:00
Jesse Suen
0479fcdf82 Add settings endpoint so frontend can show/hide SSO login button. Rename config to settings (#153) 2018-05-03 11:18:44 -07:00
Andrew Merenbach
a04465466d Add workflow for blue-green deployments (#148)
* Add prototype script and temp README

* Clean up code and support command-line args

* Flesh out logic more now

* Start workflow

* Update Blue-Green workflow

* Rm original example script

* Update comments

* Add argo parameters; use compact output for jq

* Fix some missing values, use workflow parameters

* Separate out necessary parameters/outputs

* Get bluegreen workflow working

* Mv bluegreen.yaml to workflows/, thanks @jessesuen

* Rm TODO statements
2018-05-03 09:52:46 -07:00
Jesse Suen
670921df90 SSO Support (#152)
This change implements SSO support.

dex is run as a sidecar to the ArgoCD API server, which fronts dex using a reverse proxy. The end result is that the ArgoCD acts as an OIDC provider serving under /api/dex. The login flow begins at /auth/login, which redirects to the Dex's OAuth2 consent page and ultimately directed to the IdP provider's login page, where they enter their credentials. After logging in, the OAuth2 redirect flows back to the client app, ultimately reaching /auth/callback, where the OIDC token claims are signed, and persisted in the users's cookie.

The dex configuration YAML is formulated during startup (through the argocd-util utility), with the configuration values taken from the argocd-cm configmap and the argocd-secret.

The build process was refactored to build argocd-util statically, so that it could be run inside off-the-shelf dex, which is built from alpine. Also, build speed was improved by expanding the default make targets in the Dockerfile, to avoid rebuilding each binary from scratch

Session management was refactored to use more bare-bones jwt library constructs, so we could reuse code from the user/password flow vs. OAuth2 flow.

* Initial SSO support. Run dex as sidecar. Generate dex config from ArgoCD cm and secret
* Sign and write SSO claims to JWT cookie during SSO login. Refactor session manager
* Build argo-util statically so it can run in dex sidecar. Redirect after SSO login
* Simplify app creation process to not require communication to dex gRPC server
2018-05-02 22:02:26 -07:00
Edward Lee
18f7e17d7a Added OWNERS file 2018-05-02 18:24:50 -07:00
Andrew Merenbach
a2aede0441 Redact sensitive repo/cluster information upon retrieval (#150)
* Redact sensitive cluster information upon retrieval

* Redact git username for now, too

* Revert "Redact git username for now, too"

This reverts commit d9e2eba37e.
2018-05-01 16:58:03 -07:00
133 changed files with 16938 additions and 5332 deletions

View File

@@ -21,9 +21,7 @@ spec:
- name: cmd
value: "{{item}}"
withItems:
- make controller-image
- make server-image
- make repo-server-image
- make controller-image server-image repo-server-image
- name: test
template: ci-builder
arguments:
@@ -31,8 +29,7 @@ spec:
- name: cmd
value: "{{item}}"
withItems:
- dep ensure && make lint
- dep ensure && make test test-e2e
- dep ensure && make cli lint test test-e2e
- name: ci-builder
inputs:
@@ -49,6 +46,10 @@ spec:
command: [sh, -c]
args: ["{{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
resources:
requests:
memory: 1024Mi
cpu: 200m
- name: ci-dind
inputs:
@@ -68,6 +69,10 @@ spec:
env:
- name: DOCKER_HOST
value: 127.0.0.1
resources:
requests:
memory: 1024Mi
cpu: 200m
sidecars:
- name: dind
image: docker:17.10-dind

14
CHANGELOG.md Normal file
View File

@@ -0,0 +1,14 @@
# Changelog
## v0.4.0 (2018-05-17)
+ SSO Integration
+ GitHub Webhook
+ Add application health status
+ Sync/Rollback/Delete is asynchronously handled by controller
* Refactor CRUD operation on clusters and repos
* Sync will always perform kubectl apply
* Synced Status considers last-applied-configuration annotatoin
* Server & namespace are mandatory fields (still inferred from app.yaml)
* Manifests are memoized in repo server
- Fix connection timeouts to SSH repos

View File

@@ -1,4 +1,4 @@
FROM debian:9.3 as builder
FROM debian:9.4 as builder
RUN apt-get update && apt-get install -y \
git \
@@ -10,7 +10,7 @@ RUN apt-get update && apt-get install -y \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Install go
ENV GO_VERSION 1.9.3
ENV GO_VERSION 1.10.3
ENV GO_ARCH amd64
ENV GOPATH /root/go
ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH}
@@ -25,7 +25,7 @@ RUN cd /usr/local && \
unzip protoc-*.zip && \
wget https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -O /usr/local/bin/dep && \
chmod +x /usr/local/bin/dep && \
wget https://github.com/gobuffalo/packr/releases/download/v1.10.4/packr_1.10.4_linux_amd64.tar.gz && \
wget https://github.com/gobuffalo/packr/releases/download/v1.11.0/packr_1.11.0_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
@@ -40,9 +40,9 @@ RUN cd ${GOPATH}/src/dummy && \
rmdir vendor
# Perform the build
ARG MAKE_TARGET
WORKDIR /root/go/src/github.com/argoproj/argo-cd
COPY . .
ARG MAKE_TARGET="cli server controller repo-server argocd-util"
RUN make ${MAKE_TARGET}
@@ -58,7 +58,7 @@ FROM golang:1.10 as cli-tooling
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /ks
# Option 2: use official tagged ksonnet release
env KSONNET_VERSION=0.10.1
env KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /ks
@@ -72,13 +72,12 @@ FROM debian:9.3
RUN apt-get update && apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
ARG BINARY
COPY --from=builder /root/go/src/github.com/argoproj/argo-cd/dist/${BINARY} /${BINARY}
COPY --from=cli-tooling /ks /usr/local/bin/ks
COPY --from=cli-tooling /kubectl /usr/local/bin/kubectl
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=root
ENV BINARY=$BINARY
CMD /$BINARY
COPY --from=builder /root/go/src/github.com/argoproj/argo-cd/dist/* /
ARG BINARY
CMD /${BINARY}

View File

@@ -1,4 +1,4 @@
FROM golang:1.9.2
FROM golang:1.10.3
WORKDIR /tmp
@@ -11,10 +11,11 @@ RUN curl -O https://get.docker.com/builds/Linux/x86_64/docker-1.13.1.tgz && \
gometalinter.v2 --install
# Install kubectl
RUN curl -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl && mv /kubectl /usr/local/bin/kubectl
# Install ksonnet
env KSONNET_VERSION=0.10.1
env KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks && \

290
Gopkg.lock generated
View File

@@ -15,6 +15,12 @@
]
revision = "c02ca9a983da5807ddf7d796784928f5be4afd09"
[[projects]]
name = "github.com/Knetic/govaluate"
packages = ["."]
revision = "d216395917cc49052c7c7094cf57f09657ca08a8"
version = "v3.0.0"
[[projects]]
name = "github.com/PuerkitoBio/purell"
packages = ["."]
@@ -32,6 +38,34 @@
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
version = "v3.5.1"
[[projects]]
name = "github.com/casbin/casbin"
packages = [
".",
"config",
"effect",
"model",
"persist",
"persist/file-adapter",
"rbac",
"rbac/default-role-manager",
"util"
]
revision = "d71629e497929858300c38cd442098c178121c30"
version = "v1.5.0"
[[projects]]
name = "github.com/coreos/dex"
packages = ["api"]
revision = "218d671a96865df2a4cf7f310efb99b8bfc5a5e2"
version = "v2.10.0"
[[projects]]
branch = "v2"
name = "github.com/coreos/go-oidc"
packages = ["."]
revision = "1180514eaf4d9f38d0d19eef639a1d695e066e72"
[[projects]]
branch = "master"
name = "github.com/daaku/go.zipexe"
@@ -50,20 +84,10 @@
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/emicklei/go-restful"
packages = [
".",
"log"
]
revision = "26b41036311f2da8242db402557a0dbd09dc83da"
version = "v2.6.0"
[[projects]]
name = "github.com/ghodss/yaml"
packages = ["."]
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
branch = "master"
@@ -89,11 +113,36 @@
packages = ["."]
revision = "84f4bee7c0a6db40e3166044c7983c1c32125429"
[[projects]]
name = "github.com/go-redis/cache"
packages = [
".",
"internal/lrucache",
"internal/singleflight"
]
revision = "c58ada1e23a3b66593f81c70572c20a0bb805a90"
version = "v6.3.5"
[[projects]]
name = "github.com/go-redis/redis"
packages = [
".",
"internal",
"internal/consistenthash",
"internal/hashtag",
"internal/pool",
"internal/proto",
"internal/singleflight",
"internal/util"
]
revision = "877867d2845fbaf86798befe410b6ceb6f5c29a3"
version = "v6.10.2"
[[projects]]
name = "github.com/gobuffalo/packr"
packages = ["."]
revision = "6434a292ac52e6964adebfdce3f9ce6d9f16be01"
version = "v1.10.4"
revision = "7f4074995d431987caaa35088199f13c44b24440"
version = "v1.11.0"
[[projects]]
name = "github.com/gogo/protobuf"
@@ -151,12 +200,6 @@
]
revision = "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b"
[[projects]]
branch = "master"
name = "github.com/google/btree"
packages = ["."]
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
[[projects]]
name = "github.com/google/go-jsonnet"
packages = [
@@ -182,15 +225,6 @@
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache"
]
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
[[projects]]
branch = "master"
name = "github.com/grpc-ecosystem/go-grpc-middleware"
@@ -249,12 +283,6 @@
revision = "e7c7f3b33712573affdcc7a107218e7926b9a05b"
version = "1.0.6"
[[projects]]
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
version = "1.0.1"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
@@ -269,6 +297,7 @@
"pkg/component",
"pkg/docparser",
"pkg/lib",
"pkg/log",
"pkg/node",
"pkg/params",
"pkg/prototype",
@@ -277,8 +306,8 @@
"pkg/util/kslib",
"pkg/util/strings"
]
revision = "8c44a5b1545d3d03135f610170ef0167129294bc"
version = "v0.10.1"
revision = "e943ae55d4fe256c8330a047ce8426ad9dac110c"
version = "v0.11.0"
[[projects]]
name = "github.com/ksonnet/ksonnet-lib"
@@ -291,8 +320,8 @@
"ksonnet-gen/nodemaker",
"ksonnet-gen/printer"
]
revision = "d15220fdcdd07fd377894abff6276d86cb2d776d"
version = "v0.1.3"
revision = "dfcaa3d01d0c4948cb596403c35e966c774f2678"
version = "v0.1.8"
[[projects]]
branch = "master"
@@ -305,16 +334,10 @@
revision = "32fa128f234d041f196a9f3e0fea5ac9772c08e1"
[[projects]]
branch = "master"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
name = "github.com/peterbourgon/diskv"
name = "github.com/patrickmn/go-cache"
packages = ["."]
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pkg/errors"
@@ -328,6 +351,21 @@
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/pquerna/cachecontrol"
packages = [
".",
"cacheobject"
]
revision = "525d0eb5f91d30e3b1548de401b7ef9ea6898520"
[[projects]]
branch = "master"
name = "github.com/qiangmzsx/string-adapter"
packages = ["."]
revision = "38f25303bb0cd40e674a6fac01e0171ab905f5a1"
[[projects]]
name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"]
@@ -337,8 +375,13 @@
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
version = "v1.0.5"
revision = "ea8897e79973357ba785ac2533559a6297e83c44"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
name = "github.com/soheilhy/cmux"
@@ -381,6 +424,15 @@
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
name = "github.com/vmihailenco/msgpack"
packages = [
".",
"codes"
]
revision = "a053f3dac71df214bfe8b367f34220f0029c9c02"
version = "v3.3.1"
[[projects]]
name = "github.com/yudai/gojsondiff"
packages = [
@@ -402,6 +454,8 @@
packages = [
"bcrypt",
"blowfish",
"ed25519",
"ed25519/internal/edwards25519",
"ssh/terminal"
]
revision = "432090b8f568c018896cd8a0fb0345872bbac6ce"
@@ -469,6 +523,12 @@
]
revision = "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
@@ -482,6 +542,7 @@
name = "google.golang.org/appengine"
packages = [
".",
"datastore",
"internal",
"internal/app_identity",
"internal/base",
@@ -536,21 +597,43 @@
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
version = "v1.10.0"
[[projects]]
name = "gopkg.in/go-playground/webhooks.v3"
packages = [
".",
"bitbucket",
"github",
"gitlab"
]
revision = "5580947e3ec83427ef5f6f2392eddca8dde5d99a"
version = "v3.11.0"
[[projects]]
name = "gopkg.in/inf.v0"
packages = ["."]
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
version = "v0.9.0"
[[projects]]
name = "gopkg.in/square/go-jose.v2"
packages = [
".",
"cipher",
"json"
]
revision = "76dd09796242edb5b897103a75df2645c028c960"
version = "v2.1.6"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[[projects]]
branch = "release-1.9"
branch = "release-1.10"
name = "k8s.io/api"
packages = [
"admission/v1beta1",
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
@@ -569,6 +652,7 @@
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"imagepolicy/v1alpha1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
@@ -580,10 +664,10 @@
"storage/v1alpha1",
"storage/v1beta1"
]
revision = "acf347b865f29325eb61f4cd2df11e86e073a5ee"
revision = "8b7507fac302640dd5f1efbf9643199952cc58db"
[[projects]]
branch = "release-1.9"
branch = "release-1.10"
name = "k8s.io/apiextensions-apiserver"
packages = [
"pkg/apis/apiextensions",
@@ -592,20 +676,23 @@
"pkg/client/clientset/clientset/scheme",
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
]
revision = "b89f5ce12ce6e022fc3e9d7586d61346e694d56e"
revision = "b13a681559816a9c14f93086bbeeed1c7baf2bcb"
[[projects]]
branch = "release-1.9"
branch = "release-1.10"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/equality",
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apimachinery",
"pkg/apimachinery/announced",
"pkg/apimachinery/registered",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1alpha1",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
@@ -627,27 +714,70 @@
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/reflect"
]
revision = "19e3f5aa3adca672c153d324e6b7d82ff8935f03"
revision = "f6313580a4d36c7c74a3d845dda6e116642c4f90"
[[projects]]
branch = "release-6.0"
branch = "release-7.0"
name = "k8s.io/client-go"
packages = [
"discovery",
"discovery/fake",
"dynamic",
"dynamic/fake",
"informers",
"informers/admissionregistration",
"informers/admissionregistration/v1alpha1",
"informers/admissionregistration/v1beta1",
"informers/apps",
"informers/apps/v1",
"informers/apps/v1beta1",
"informers/apps/v1beta2",
"informers/autoscaling",
"informers/autoscaling/v1",
"informers/autoscaling/v2beta1",
"informers/batch",
"informers/batch/v1",
"informers/batch/v1beta1",
"informers/batch/v2alpha1",
"informers/certificates",
"informers/certificates/v1beta1",
"informers/core",
"informers/core/v1",
"informers/events",
"informers/events/v1beta1",
"informers/extensions",
"informers/extensions/v1beta1",
"informers/internalinterfaces",
"informers/networking",
"informers/networking/v1",
"informers/policy",
"informers/policy/v1beta1",
"informers/rbac",
"informers/rbac/v1",
"informers/rbac/v1alpha1",
"informers/rbac/v1beta1",
"informers/scheduling",
"informers/scheduling/v1alpha1",
"informers/settings",
"informers/settings/v1alpha1",
"informers/storage",
"informers/storage/v1",
"informers/storage/v1alpha1",
"informers/storage/v1beta1",
"kubernetes",
"kubernetes/fake",
"kubernetes/scheme",
@@ -707,7 +837,34 @@
"kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
"kubernetes/typed/storage/v1beta1/fake",
"listers/admissionregistration/v1alpha1",
"listers/admissionregistration/v1beta1",
"listers/apps/v1",
"listers/apps/v1beta1",
"listers/apps/v1beta2",
"listers/autoscaling/v1",
"listers/autoscaling/v2beta1",
"listers/batch/v1",
"listers/batch/v1beta1",
"listers/batch/v2alpha1",
"listers/certificates/v1beta1",
"listers/core/v1",
"listers/events/v1beta1",
"listers/extensions/v1beta1",
"listers/networking/v1",
"listers/policy/v1beta1",
"listers/rbac/v1",
"listers/rbac/v1alpha1",
"listers/rbac/v1beta1",
"listers/scheduling/v1alpha1",
"listers/settings/v1alpha1",
"listers/storage/v1",
"listers/storage/v1alpha1",
"listers/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"plugin/pkg/client/auth/gcp",
"plugin/pkg/client/auth/oidc",
"rest",
@@ -730,19 +887,21 @@
"util/homedir",
"util/integer",
"util/jsonpath",
"util/retry",
"util/workqueue"
]
revision = "9389c055a838d4f208b699b3c7c51b70f2368861"
revision = "26a26f55b28aa1b338fbaf6fbbe0bcd76aed05e0"
[[projects]]
branch = "release-1.9"
branch = "release-1.10"
name = "k8s.io/code-generator"
packages = [
"cmd/go-to-protobuf",
"cmd/go-to-protobuf/protobuf",
"pkg/util",
"third_party/forked/golang/reflect"
]
revision = "91d3f6a57905178524105a085085901bb73bd3dc"
revision = "9de8e796a74d16d2a285165727d04c185ebca6dc"
[[projects]]
branch = "master"
@@ -759,12 +918,21 @@
[[projects]]
branch = "master"
name = "k8s.io/kube-openapi"
packages = ["pkg/common"]
packages = ["pkg/util/proto"]
revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf"
[[projects]]
name = "k8s.io/kubernetes"
packages = [
"pkg/apis/core",
"pkg/kubectl/scheme"
]
revision = "81753b10df112992bf51bbc2c2f85208aad78335"
version = "v1.10.2"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "8e7142f84554c6f1665ef18e0fb906f82de8cd802b0211c4a46ec1ad228b8b7e"
inputs-digest = "f2d179e0bbae6ede81f78cf6b0b16cb09fbeb5e97add78bdd97e3051238b86da"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -13,25 +13,20 @@ required = [
name = "github.com/grpc-ecosystem/grpc-gateway"
version = "v1.3.1"
# override ksonnet's release-1.8 dependency
[[override]]
branch = "release-1.9"
name = "k8s.io/apimachinery"
[[constraint]]
branch = "release-1.9"
branch = "release-1.10"
name = "k8s.io/api"
[[constraint]]
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.9"
branch = "release-1.10"
[[constraint]]
branch = "release-1.9"
branch = "release-1.10"
name = "k8s.io/code-generator"
[[constraint]]
branch = "release-6.0"
branch = "release-7.0"
name = "k8s.io/client-go"
[[constraint]]
@@ -40,9 +35,13 @@ required = [
[[constraint]]
name = "github.com/ksonnet/ksonnet"
version = "v0.10.1"
version = "v0.11.0"
[[constraint]]
name = "github.com/gobuffalo/packr"
version = "v1.11.0"
# override ksonnet's logrus dependency
[[override]]
name = "github.com/sirupsen/logrus"
version = "v1.0.3"
revision = "ea8897e79973357ba785ac2533559a6297e83c44"

View File

@@ -41,7 +41,7 @@ IMAGE_PREFIX=${IMAGE_NAMESPACE}/
endif
.PHONY: all
all: cli server-image controller-image repo-server-image
all: cli server-image controller-image repo-server-image argocd-util
.PHONY: protogen
protogen:
@@ -57,30 +57,34 @@ codegen: protogen clientgen
# NOTE: we use packr to do the build instead of go, since we embed .yaml files into the go binary.
# This enables ease of maintenance of the yaml files.
.PHONY: cli
cli:
cli: clean-debug
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
.PHONY: cli-linux
cli-linux:
cli-linux: clean-debug
docker build --iidfile /tmp/argocd-linux-id --target builder --build-arg MAKE_TARGET="cli IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-linux-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-linux `cat /tmp/argocd-linux-id`
docker cp tmp-argocd-linux:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-linux-amd64 dist/
docker rm tmp-argocd-linux
.PHONY: cli-darwin
cli-darwin:
cli-darwin: clean-debug
docker build --iidfile /tmp/argocd-darwin-id --target builder --build-arg MAKE_TARGET="cli GOOS=darwin IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-darwin-amd64" -f Dockerfile-argocd .
docker create --name tmp-argocd-darwin `cat /tmp/argocd-darwin-id`
docker cp tmp-argocd-darwin:/root/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64 dist/
docker rm tmp-argocd-darwin
.PHONY: argocd-util
argocd-util: clean-debug
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
.PHONY: server
server:
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
server: clean-debug
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
.PHONY: server-image
server-image:
docker build --build-arg BINARY=argocd-server --build-arg MAKE_TARGET=server -t $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) -f Dockerfile-argocd .
docker build --build-arg BINARY=argocd-server -t $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) ; fi
.PHONY: repo-server
@@ -89,7 +93,7 @@ repo-server:
.PHONY: repo-server-image
repo-server-image:
docker build --build-arg BINARY=argocd-repo-server --build-arg MAKE_TARGET=repo-server -t $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) -f Dockerfile-argocd .
docker build --build-arg BINARY=argocd-repo-server -t $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) ; fi
.PHONY: controller
@@ -98,12 +102,12 @@ controller:
.PHONY: controller-image
controller-image:
docker build --build-arg BINARY=argocd-application-controller --build-arg MAKE_TARGET=controller -t $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) -f Dockerfile-argocd .
docker build --build-arg BINARY=argocd-application-controller -t $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) ; fi
.PHONY: cli-image
cli-image:
docker build --build-arg BINARY=argocd --build-arg MAKE_TARGET=cli -t $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) -f Dockerfile-argocd .
docker build --build-arg BINARY=argocd -t $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) -f Dockerfile-argocd .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) ; fi
.PHONY: builder-image
@@ -122,8 +126,13 @@ test:
test-e2e:
go test ./test/e2e
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
.PHONY: clean-debug
clean-debug:
-find ${CURRENT_DIR} -name debug.test | xargs rm -f
.PHONY: clean
clean:
clean: clean-debug
-rm -rf ${CURRENT_DIR}/dist
.PHONY: precheckin

8
OWNERS Normal file
View File

@@ -0,0 +1,8 @@
owners:
- alexmt
- jessesuen
approvers:
- alexmt
- jessesuen
- merenbach

View File

@@ -1,3 +1,5 @@
controller: go run ./cmd/argocd-application-controller/main.go --app-resync 10
api-server: go run ./cmd/argocd-server/main.go --insecure
repo-server: go run ./cmd/argocd-repo-server/main.go
controller: go run ./cmd/argocd-application-controller/main.go --app-resync 60
api-server: go run ./cmd/argocd-server/main.go --insecure --disable-auth
repo-server: go run ./cmd/argocd-repo-server/main.go --loglevel debug
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -p 5557:5557 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/coreos/dex:v2.10.0 serve /dex.yaml"
redis: docker run --rm -p 6379:6379 redis:3.2.11

View File

@@ -44,6 +44,8 @@ For additional details, see [architecture overview](docs/architecture.md).
* Automated or manual syncing of applications to its target state
* Web and CLI based visualization of applications and differences between live vs. target state
* Rollback/Roll-anywhere to any application state committed in the git repository
* SSO Integration (OIDC, LDAP, SAML 2.0, GitLab, Microsoft, LinkedIn)
* Webhook Integration (GitHub, BitBucket, GitLab)
## What is ksonnet?
@@ -87,5 +89,3 @@ concise definition of kubernetes manifests
* PreSync, PostSync, OutOfSync hooks
* Customized application actions as Argo workflows
* Blue/Green & canary upgrades
* SSO Integration
* GitHub & Docker webhooks

View File

@@ -1 +1 @@
0.4.0
0.5.4

View File

@@ -2,18 +2,18 @@ package main
import (
"context"
"flag"
"fmt"
"os"
"strconv"
"time"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server/cluster"
apirepository "github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
@@ -22,6 +22,8 @@ import (
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
"github.com/argoproj/argo-cd/reposerver"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
@@ -34,11 +36,13 @@ const (
func newCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
workers int
logLevel string
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
repoServerAddress string
statusProcessors int
operationProcessors int
logLevel string
glogLevel int
)
var command = cobra.Command{
Use: cliName,
@@ -48,6 +52,11 @@ func newCommand() *cobra.Command {
errors.CheckError(err)
log.SetLevel(level)
// Set the glog level for the k8s go-client
_ = flag.CommandLine.Parse([]string{})
_ = flag.Lookup("logtostderr").Value.Set("true")
_ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel))
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
@@ -62,28 +71,30 @@ func newCommand() *cobra.Command {
Namespace: namespace,
InstanceID: "",
}
db := db.NewDB(namespace, kubeClient)
resyncDuration := time.Duration(appResyncPeriod) * time.Second
apiRepoServer := apirepository.NewServer(namespace, kubeClient, appClient)
apiClusterServer := cluster.NewServer(namespace, kubeClient, appClient)
clusterService := cluster.NewServer(namespace, kubeClient, appClient)
appComparator := controller.NewKsonnetAppComparator(clusterService)
repoClientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
appStateManager := controller.NewAppStateManager(db, appClient, repoClientset, namespace)
appHealthManager := controller.NewAppHealthManager(db, namespace)
appController := controller.NewApplicationController(
namespace,
kubeClient,
appClient,
reposerver.NewRepositoryServerClientset(repoServerAddress),
apiRepoServer,
apiClusterServer,
appComparator,
db,
appStateManager,
appHealthManager,
resyncDuration,
&controllerConfig)
secretController := controller.NewSecretController(kubeClient, repoClientset, resyncDuration, namespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
log.Infof("Application Controller (version: %s) starting (namespace: %s)", argocd.GetVersion(), namespace)
go appController.Run(ctx, workers)
go secretController.Run(ctx)
go appController.Run(ctx, statusProcessors, operationProcessors)
// Wait forever
select {}
},
@@ -92,8 +103,10 @@ func newCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", defaultAppResyncPeriod, "Time period in seconds for application resync.")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().IntVar(&workers, "workers", 1, "Number of application workers")
command.Flags().IntVar(&statusProcessors, "status-processors", 1, "Number of application status processors")
command.Flags().IntVar(&operationProcessors, "operation-processors", 1, "Number of application operation processors")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
return &command
}

View File

@@ -5,21 +5,16 @@ import (
"net"
"os"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/ksonnet"
"github.com/go-redis/redis"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/reposerver"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
const (
@@ -30,8 +25,7 @@ const (
func newCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
logLevel string
logLevel string
)
var command = cobra.Command{
Use: cliName,
@@ -41,25 +35,15 @@ func newCommand() *cobra.Command {
errors.CheckError(err)
log.SetLevel(level)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
server := reposerver.NewServer(kubeClientset, namespace)
nativeGitClient, err := git.NewNativeGitClient()
errors.CheckError(err)
grpc := server.CreateGRPC(nativeGitClient)
server := reposerver.NewServer(git.NewFactory(), newCache())
grpc := server.CreateGRPC()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
errors.CheckError(err)
ksVers, err := ksonnet.KsonnetVersion()
errors.CheckError(err)
log.Infof("argocd-repo-server %s serving on %s (namespace: %s)", argocd.GetVersion(), listener.Addr(), namespace)
log.Infof("argocd-repo-server %s serving on %s", argocd.GetVersion(), listener.Addr())
log.Infof("ksonnet version: %s", ksVers)
err = grpc.Serve(listener)
errors.CheckError(err)
@@ -67,11 +51,20 @@ func newCommand() *cobra.Command {
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return &command
}
func newCache() cache.Cache {
//return cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
client := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
return cache.NewRedisCache(client, repository.DefaultRepoCacheExpiration)
}
func main() {
if err := newCommand().Execute(); err != nil {
fmt.Println(err)

View File

@@ -1,6 +1,10 @@
package commands
import (
"context"
"flag"
"strconv"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
@@ -17,6 +21,7 @@ func NewCommand() *cobra.Command {
var (
insecure bool
logLevel string
glogLevel int
clientConfig clientcmd.ClientConfig
staticAssetsDir string
repoServerAddress string
@@ -31,6 +36,11 @@ func NewCommand() *cobra.Command {
errors.CheckError(err)
log.SetLevel(level)
// Set the glog level for the k8s go-client
_ = flag.CommandLine.Parse([]string{})
_ = flag.Lookup("logtostderr").Value.Set("true")
_ = flag.Lookup("v").Value.Set(strconv.Itoa(glogLevel))
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
@@ -50,8 +60,14 @@ func NewCommand() *cobra.Command {
RepoClientset: repoclientset,
DisableAuth: disableAuth,
}
argocd := server.NewServer(argoCDOpts)
argocd.Run()
for {
argocd := server.NewServer(argoCDOpts)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
argocd.Run(ctx, 8080)
cancel()
}
},
}
@@ -59,6 +75,7 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&insecure, "insecure", false, "Run server without TLS")
command.Flags().StringVar(&staticAssetsDir, "staticassets", "", "Static assets directory path")
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().BoolVar(&disableAuth, "disable-auth", false, "Disable client authentication")
command.AddCommand(cli.NewVersionCmd(cliName))

335
cmd/argocd-util/main.go Normal file
View File

@@ -0,0 +1,335 @@
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"syscall"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/dex"
"github.com/argoproj/argo-cd/util/settings"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
const (
// CLIName is the name of the CLI
cliName = "argocd-util"
// YamlSeparator separates sections of a YAML file
yamlSeparator = "\n---\n"
)
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
logLevel string
)
var command = &cobra.Command{
Use: cliName,
Short: "argocd-util has internal tools used by ArgoCD",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
}
command.AddCommand(cli.NewVersionCmd(cliName))
command.AddCommand(NewRunDexCommand())
command.AddCommand(NewGenDexConfigCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return command
}
func NewRunDexCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = cobra.Command{
Use: "rundex",
Short: "Runs dex generating a config using settings from the ArgoCD configmap and secret",
RunE: func(c *cobra.Command, args []string) error {
_, err := exec.LookPath("dex")
errors.CheckError(err)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
ctx := context.Background()
settingsMgr.StartNotifier(ctx, settings)
updateCh := make(chan struct{}, 1)
settingsMgr.Subscribe(updateCh)
for {
var cmd *exec.Cmd
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
} else {
err = ioutil.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0644)
errors.CheckError(err)
log.Info(string(dexCfgBytes))
cmd = exec.Command("dex", "serve", "/tmp/dex.yaml")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Start()
errors.CheckError(err)
}
// loop until the dex config changes
for {
<-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(dexCfgBytes) {
log.Infof("dex config modified. restarting dex")
if cmd != nil && cmd.Process != nil {
err = cmd.Process.Signal(syscall.SIGTERM)
errors.CheckError(err)
_, err = cmd.Process.Wait()
errors.CheckError(err)
}
break
} else {
log.Infof("dex config unmodified")
}
}
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
return &command
}
func NewGenDexConfigCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = cobra.Command{
Use: "gendexcfg",
Short: "Generates a dex config from ArgoCD settings",
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
dexCfgBytes, err := dex.GenerateDexConfigYAML(settings)
errors.CheckError(err)
if len(dexCfgBytes) == 0 {
log.Infof("dex is not configured")
return nil
}
if out == "" {
fmt.Printf(string(dexCfgBytes))
} else {
err = ioutil.WriteFile(out, dexCfgBytes, 0644)
errors.CheckError(err)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVarP(&out, "out", "o", "", "Output to the specified file instead of stdout")
return &command
}
// NewImportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewImportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = cobra.Command{
Use: "import SOURCE",
Short: "Import Argo CD data from stdin (specify `-') or a file",
RunE: func(c *cobra.Command, args []string) error {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
var (
input []byte
err error
newSettings *settings.ArgoCDSettings
newRepos []*v1alpha1.Repository
newClusters []*v1alpha1.Cluster
newApps []*v1alpha1.Application
)
if in := args[0]; in == "-" {
input, err = ioutil.ReadAll(os.Stdin)
errors.CheckError(err)
} else {
input, err = ioutil.ReadFile(in)
errors.CheckError(err)
}
inputStrings := strings.Split(string(input), yamlSeparator)
err = yaml.Unmarshal([]byte(inputStrings[0]), &newSettings)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[1]), &newRepos)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[2]), &newClusters)
errors.CheckError(err)
err = yaml.Unmarshal([]byte(inputStrings[3]), &newApps)
errors.CheckError(err)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
err = settingsMgr.SaveSettings(newSettings)
errors.CheckError(err)
db := db.NewDB(namespace, kubeClientset)
for _, repo := range newRepos {
_, err := db.CreateRepository(context.Background(), repo)
if err != nil {
log.Warn(err)
}
}
for _, cluster := range newClusters {
_, err := db.CreateCluster(context.Background(), cluster)
if err != nil {
log.Warn(err)
}
}
appClientset := appclientset.NewForConfigOrDie(config)
for _, app := range newApps {
out, err := appClientset.ArgoprojV1alpha1().Applications(namespace).Create(app)
errors.CheckError(err)
log.Println(out)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
return &command
}
// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources.
func NewExportCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
out string
)
var command = cobra.Command{
Use: "export",
Short: "Export all Argo CD data to stdout (default) or a file",
RunE: func(c *cobra.Command, args []string) error {
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
kubeClientset := kubernetes.NewForConfigOrDie(config)
settingsMgr := settings.NewSettingsManager(kubeClientset, namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
// certificate data is included in secrets that are exported alongside
settings.Certificate = nil
settingsData, err := yaml.Marshal(settings)
errors.CheckError(err)
db := db.NewDB(namespace, kubeClientset)
clusters, err := db.ListClusters(context.Background())
errors.CheckError(err)
clusterData, err := yaml.Marshal(clusters.Items)
errors.CheckError(err)
repos, err := db.ListRepositories(context.Background())
errors.CheckError(err)
repoData, err := yaml.Marshal(repos.Items)
errors.CheckError(err)
appClientset := appclientset.NewForConfigOrDie(config)
apps, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(metav1.ListOptions{})
errors.CheckError(err)
// remove extraneous cruft from output
for idx, app := range apps.Items {
apps.Items[idx].ObjectMeta = metav1.ObjectMeta{
Name: app.ObjectMeta.Name,
Finalizers: app.ObjectMeta.Finalizers,
}
apps.Items[idx].Status = v1alpha1.ApplicationStatus{
History: app.Status.History,
}
apps.Items[idx].Operation = nil
}
appsData, err := yaml.Marshal(apps.Items)
errors.CheckError(err)
outputStrings := []string{
string(settingsData),
string(repoData),
string(clusterData),
string(appsData),
}
output := strings.Join(outputStrings, yamlSeparator)
if out == "-" {
fmt.Println(output)
} else {
err = ioutil.WriteFile(out, []byte(output), 0644)
errors.CheckError(err)
}
return nil
},
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout")
return &command
}
func main() {
if err := NewCommand().Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@@ -2,13 +2,28 @@ package commands
import (
"context"
"encoding/json"
"fmt"
"io"
"net/url"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/yudai/gojsondiff/formatter"
"golang.org/x/crypto/ssh/terminal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
@@ -16,12 +31,8 @@ import (
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/diff"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/yudai/gojsondiff/formatter"
"golang.org/x/crypto/ssh/terminal"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/util/ksonnet"
kubeutil "github.com/argoproj/argo-cd/util/kube"
)
// NewApplicationCommand returns a new instance of an `argocd app` command
@@ -38,21 +49,24 @@ func NewApplicationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
command.AddCommand(NewApplicationGetCommand(clientOpts))
command.AddCommand(NewApplicationDiffCommand(clientOpts))
command.AddCommand(NewApplicationSetCommand(clientOpts))
command.AddCommand(NewApplicationUnsetCommand(clientOpts))
command.AddCommand(NewApplicationSyncCommand(clientOpts))
command.AddCommand(NewApplicationHistoryCommand(clientOpts))
command.AddCommand(NewApplicationRollbackCommand(clientOpts))
command.AddCommand(NewApplicationListCommand(clientOpts))
command.AddCommand(NewApplicationDeleteCommand(clientOpts))
command.AddCommand(NewApplicationWaitCommand(clientOpts))
command.AddCommand(NewApplicationManifestsCommand(clientOpts))
return command
}
// NewApplicationCreateCommand returns a new instance of an `argocd app create` command
func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
appOpts appOptions
fileURL string
appName string
syncPolicy string
appOpts appOptions
fileURL string
appName string
upsert bool
)
var command = &cobra.Command{
Use: "create",
@@ -64,8 +78,8 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
}
var app argoappv1.Application
if fileURL != "" {
_, err := url.ParseRequestURI(fileURL)
if err != nil {
parsedURL, err := url.ParseRequestURI(fileURL)
if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") {
err = cli.UnmarshalLocalFile(fileURL, &app)
} else {
err = cli.UnmarshalRemoteFile(fileURL, &app)
@@ -75,10 +89,6 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
}
} else {
if syncPolicy != "" && syncPolicy != "Always" {
c.HelpFunc()(c, args)
os.Exit(1)
}
if appOpts.repoURL == "" || appOpts.appPath == "" || appOpts.env == "" || appName == "" {
log.Fatal("name, repo, path, env are required")
os.Exit(1)
@@ -94,88 +104,176 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
Environment: appOpts.env,
TargetRevision: appOpts.revision,
},
SyncPolicy: syncPolicy,
},
}
}
if appOpts.destServer != "" || appOpts.destNamespace != "" {
app.Spec.Destination = &argoappv1.ApplicationDestination{
Server: appOpts.destServer,
Namespace: appOpts.destNamespace,
}
if appOpts.destServer != "" {
app.Spec.Destination.Server = appOpts.destServer
}
if appOpts.destNamespace != "" {
app.Spec.Destination.Namespace = appOpts.destNamespace
}
setParameterOverrides(&app, appOpts.parameters)
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
created, err := appIf.Create(context.Background(), &app)
appCreateRequest := application.ApplicationCreateRequest{
Application: app,
Upsert: &upsert,
}
created, err := appIf.Create(context.Background(), &appCreateRequest)
errors.CheckError(err)
fmt.Printf("application '%s' created\n", created.ObjectMeta.Name)
},
}
command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the app")
command.Flags().StringVar(&appName, "name", "", "A name for the app, ignored if a file is set")
command.Flags().BoolVar(&upsert, "upsert", false, "Allows to override application with the same name even if supplied application spec is different from existing spec")
addAppFlags(command, &appOpts)
//command.Flags().StringVar(&syncPolicy, "sync-policy", "", "Synchronization policy for application (e.g., Always)")
return command
}
// NewApplicationGetCommand returns a new instance of an `argocd app get` command
func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
showParams bool
showOperation bool
refresh bool
)
var command = &cobra.Command{
Use: "get",
Use: "get APPNAME",
Short: "Get application details",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
acdClient := argocdclient.NewClientOrDie(clientOpts)
conn, appIf := acdClient.NewApplicationClientOrDie()
defer util.Close(conn)
appName := args[0]
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: appName})
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: refresh})
errors.CheckError(err)
format := "%-15s%s\n"
fmt.Printf(format, "Name:", app.Name)
fmt.Printf(format, "Environment:", app.Spec.Source.Environment)
fmt.Printf(format, "Repo:", app.Spec.Source.RepoURL)
fmt.Printf(format, "Path:", app.Spec.Source.Path)
if app.Spec.Source.TargetRevision == "" {
fmt.Printf(format, "Target:", "HEAD")
} else {
switch output {
case "yaml":
yamlBytes, err := yaml.Marshal(app)
errors.CheckError(err)
fmt.Println(string(yamlBytes))
case "json":
jsonBytes, err := json.MarshalIndent(app, "", " ")
errors.CheckError(err)
fmt.Println(string(jsonBytes))
case "":
format := "%-15s%s\n"
fmt.Printf(format, "Name:", app.Name)
fmt.Printf(format, "Server:", app.Spec.Destination.Server)
fmt.Printf(format, "Namespace:", app.Spec.Destination.Namespace)
fmt.Printf(format, "URL:", appURL(acdClient, app))
fmt.Printf(format, "Environment:", app.Spec.Source.Environment)
fmt.Printf(format, "Repo:", app.Spec.Source.RepoURL)
fmt.Printf(format, "Path:", app.Spec.Source.Path)
fmt.Printf(format, "Target:", app.Spec.Source.TargetRevision)
}
if app.Status.ComparisonResult.Error != "" {
fmt.Printf(format, "Error:", app.Status.ComparisonResult.Error)
}
if app.Status.ComparisonResult.Server != "" {
fmt.Printf(format, "Server:", app.Status.ComparisonResult.Server)
}
if app.Status.ComparisonResult.Namespace != "" {
fmt.Printf(format, "Namespace:", app.Status.ComparisonResult.Namespace)
}
if len(app.Status.ComparisonResult.Resources) > 0 {
fmt.Println()
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "KIND\tNAME\tSTATUS\n")
for _, res := range app.Status.ComparisonResult.Resources {
targetObj, err := argoappv1.UnmarshalToUnstructured(res.TargetState)
errors.CheckError(err)
fmt.Fprintf(w, "%s\t%s\t%s\n", targetObj.GetKind(), targetObj.GetName(), res.Status)
if app.Status.ComparisonResult.Error != "" {
fmt.Printf(format, "Error:", app.Status.ComparisonResult.Error)
}
_ = w.Flush()
var opState *argoappv1.OperationState
if showOperation && app.Status.OperationState != nil {
opState = app.Status.OperationState
fmt.Println()
var opName string
if opState.SyncResult != nil {
opName = "Sync"
} else if opState.RollbackResult != nil {
opName = "Rollback"
}
fmt.Printf(format, "Operation:", opName)
fmt.Printf(format, " Phase:", opState.Phase)
fmt.Printf(format, " Start:", opState.StartedAt)
fmt.Printf(format, " Finished:", opState.FinishedAt)
var duration time.Duration
if !opState.FinishedAt.IsZero() {
duration = time.Second * time.Duration(opState.FinishedAt.Unix()-opState.StartedAt.Unix())
} else {
duration = time.Second * time.Duration(time.Now().UTC().Unix()-opState.StartedAt.Unix())
}
fmt.Printf(format, " Duration:", duration)
fmt.Printf(format, " Phase:", opState.Phase)
if opState.Message != "" {
fmt.Printf(format, " Message:", opState.Message)
}
}
if showParams {
printParams(app)
}
if len(app.Status.ComparisonResult.Resources) > 0 {
fmt.Println()
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
printAppResources(w, app, opState)
_ = w.Flush()
}
default:
log.Fatalf("Unknown output format: %s", output)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "", "Output format. One of: yaml, json")
command.Flags().BoolVar(&showOperation, "show-operation", false, "Show application operation")
command.Flags().BoolVar(&showParams, "show-params", false, "Show application parameters and overrides")
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
return command
}
// appURL returns the URL of an application
func appURL(acdClient argocdclient.Client, app *argoappv1.Application) string {
var scheme string
opts := acdClient.ClientOptions()
server := opts.ServerAddr
if opts.PlainText {
scheme = "http"
} else {
scheme = "https"
if strings.HasSuffix(opts.ServerAddr, ":443") {
server = server[0 : len(server)-4]
}
}
return fmt.Sprintf("%s://%s/applications/%s/%s", scheme, server, app.Namespace, app.Name)
}
func truncateString(str string, num int) string {
bnoden := str
if len(str) > num {
if num > 3 {
num -= 3
}
bnoden = str[0:num] + "..."
}
return bnoden
}
// printParams prints parameters and overrides
func printParams(app *argoappv1.Application) {
paramLenLimit := 80
overrides := make(map[string]string)
for _, p := range app.Spec.Source.ComponentParameterOverrides {
overrides[fmt.Sprintf("%s/%s", p.Component, p.Name)] = p.Value
}
fmt.Println()
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "COMPONENT\tNAME\tVALUE\tOVERRIDE\n")
for _, p := range app.Status.Parameters {
overrideValue := overrides[fmt.Sprintf("%s/%s", p.Component, p.Name)]
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", p.Component, p.Name, truncateString(p.Value, paramLenLimit), truncateString(overrideValue, paramLenLimit))
}
_ = w.Flush()
}
// NewApplicationSetCommand returns a new instance of an `argocd app set` command
func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
appOpts appOptions
)
var command = &cobra.Command{
Use: "set",
Use: "set APPNAME",
Short: "Set application parameters",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
@@ -185,7 +283,7 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
appName := args[0]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: appName})
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName})
errors.CheckError(err)
visited := 0
c.Flags().Visit(func(f *pflag.Flag) {
@@ -200,14 +298,8 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
case "revision":
app.Spec.Source.TargetRevision = appOpts.revision
case "dest-server":
if app.Spec.Destination == nil {
app.Spec.Destination = &argoappv1.ApplicationDestination{}
}
app.Spec.Destination.Server = appOpts.destServer
case "dest-namespace":
if app.Spec.Destination == nil {
app.Spec.Destination = &argoappv1.ApplicationDestination{}
}
app.Spec.Destination.Namespace = appOpts.destNamespace
}
})
@@ -217,7 +309,10 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
os.Exit(1)
}
setParameterOverrides(app, appOpts.parameters)
_, err = appIf.Update(context.Background(), app)
_, err = appIf.UpdateSpec(context.Background(), &application.ApplicationUpdateSpecRequest{
Name: &app.Name,
Spec: app.Spec,
})
errors.CheckError(err)
},
}
@@ -245,10 +340,63 @@ func addAppFlags(command *cobra.Command, opts *appOptions) {
command.Flags().StringArrayVarP(&opts.parameters, "parameter", "p", []string{}, "set a parameter override (e.g. -p guestbook=image=example/guestbook:latest)")
}
// NewApplicationUnsetCommand returns a new instance of an `argocd app unset` command
func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
parameters []string
)
var command = &cobra.Command{
Use: "unset APPNAME -p COMPONENT=PARAM",
Short: "Unset application parameters",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 || len(parameters) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName := args[0]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName})
errors.CheckError(err)
updated := false
for _, paramStr := range parameters {
parts := strings.SplitN(paramStr, "=", 2)
if len(parts) != 2 {
log.Fatalf("Expected parameter of the form: component=param. Received: %s", paramStr)
}
overrides := app.Spec.Source.ComponentParameterOverrides
for i, override := range overrides {
if override.Component == parts[0] && override.Name == parts[1] {
app.Spec.Source.ComponentParameterOverrides = append(overrides[0:i], overrides[i+1:]...)
updated = true
break
}
}
}
if !updated {
return
}
_, err = appIf.UpdateSpec(context.Background(), &application.ApplicationUpdateSpecRequest{
Name: &app.Name,
Spec: app.Spec,
})
errors.CheckError(err)
},
}
command.Flags().StringArrayVarP(&parameters, "parameter", "p", []string{}, "unset a parameter override (e.g. -p guestbook=image)")
return command
}
// NewApplicationDiffCommand returns a new instance of an `argocd app diff` command
func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
refresh bool
local string
env string
)
var command = &cobra.Command{
Use: "diff",
Use: "diff APPNAME",
Short: "Perform a diff against the target and live state",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
@@ -258,39 +406,83 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
appName := args[0]
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: appName})
errors.CheckError(err)
targetObjs, err := app.Status.ComparisonResult.TargetObjects()
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: refresh})
errors.CheckError(err)
liveObjs, err := app.Status.ComparisonResult.LiveObjects()
errors.CheckError(err)
diffResults, err := diff.DiffArray(targetObjs, liveObjs)
var compareObjs []*unstructured.Unstructured
if local != "" {
if env == "" {
log.Fatal("--env required when performing local diff")
}
ksApp, err := ksonnet.NewKsonnetApp(local)
errors.CheckError(err)
compareObjs, err = ksApp.Show(env)
errors.CheckError(err)
if len(app.Spec.Source.ComponentParameterOverrides) > 0 {
log.Warnf("Unable to display parameter overrides")
}
compareObjs, liveObjs = diff.MatchObjectLists(compareObjs, liveObjs)
} else {
if env != "" {
log.Fatal("--env option invalid when performing git diff")
}
compareObjs, err = app.Status.ComparisonResult.TargetObjects()
errors.CheckError(err)
}
// In order for the diff to be clean, need to set our app labels
setAppLabels(appName, compareObjs)
diffResults, err := diff.DiffArray(compareObjs, liveObjs)
errors.CheckError(err)
for i := 0; i < len(targetObjs); i++ {
targetObj := targetObjs[i]
for i := 0; i < len(compareObjs); i++ {
kind, name := getObjKindName(compareObjs[i], liveObjs[i])
diffRes := diffResults.Diffs[i]
fmt.Printf("===== %s %s ======\n", targetObj.GetKind(), targetObj.GetName())
fmt.Printf("===== %s %s ======\n", kind, name)
if diffRes.Modified {
formatOpts := formatter.AsciiFormatterConfig{
Coloring: terminal.IsTerminal(int(os.Stdout.Fd())),
}
out, err := diffResults.Diffs[i].ASCIIFormat(targetObj, formatOpts)
out, err := diffResults.Diffs[i].ASCIIFormat(compareObjs[i], formatOpts)
errors.CheckError(err)
fmt.Println(out)
}
}
if local != "" && len(app.Spec.Source.ComponentParameterOverrides) > 0 {
log.Warnf("Unable to display parameter overrides")
}
},
}
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
command.Flags().StringVar(&local, "local", "", "Compare live app to a local ksonnet app")
command.Flags().StringVar(&env, "env", "", "Compare live app to a specific environment")
return command
}
func getObjKindName(compare, live *unstructured.Unstructured) (string, string) {
if compare == nil {
return live.GetKind(), live.GetName()
}
return compare.GetKind(), compare.GetName()
}
func setAppLabels(appName string, compareObjs []*unstructured.Unstructured) {
for _, obj := range compareObjs {
if obj == nil {
continue
}
_ = kubeutil.SetLabel(obj, common.LabelApplicationName, appName)
}
}
// NewApplicationDeleteCommand returns a new instance of an `argocd app delete` command
func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
force bool
cascade bool
)
var command = &cobra.Command{
Use: "delete",
Use: "delete APPNAME",
Short: "Delete an application",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
@@ -300,21 +492,26 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
for _, appName := range args {
appDeleteReq := application.DeleteApplicationRequest{
Name: appName,
Force: force,
appDeleteReq := application.ApplicationDeleteRequest{
Name: &appName,
}
if c.Flag("cascade").Changed {
appDeleteReq.Cascade = &cascade
}
_, err := appIf.Delete(context.Background(), &appDeleteReq)
errors.CheckError(err)
}
},
}
command.Flags().BoolVar(&force, "force", false, "Force delete application even if cascaded deletion unsuccessful")
command.Flags().BoolVar(&cascade, "cascade", true, "Perform a cascaded deletion of all application resources")
return command
}
// NewApplicationListCommand returns a new instance of an `argocd app list` command
func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "list",
Short: "List applications",
@@ -324,27 +521,213 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
apps, err := appIf.List(context.Background(), &application.ApplicationQuery{})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tENVIRONMENT\tTARGET\tCLUSTER\tNAMESPACE\tSTATUS\n")
var fmtStr string
headers := []interface{}{"NAME", "CLUSTER", "NAMESPACE", "STATUS", "HEALTH"}
if output == "wide" {
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
headers = append(headers, "ENV", "REPO", "PATH", "TARGET")
} else {
fmtStr = "%s\t%s\t%s\t%s\t%s\n"
}
fmt.Fprintf(w, fmtStr, headers...)
for _, app := range apps.Items {
targetRev := app.Spec.Source.TargetRevision
if targetRev == "" {
targetRev = "HEAD"
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n",
vals := []interface{}{
app.Name,
app.Spec.Source.Environment,
targetRev,
app.Status.ComparisonResult.Server,
app.Status.ComparisonResult.Namespace,
app.Spec.Destination.Server,
app.Spec.Destination.Namespace,
app.Status.ComparisonResult.Status,
)
app.Status.Health.Status,
}
if output == "wide" {
vals = append(vals, app.Spec.Source.Environment, app.Spec.Source.RepoURL, app.Spec.Source.Path, app.Spec.Source.TargetRevision)
}
fmt.Fprintf(w, fmtStr, vals...)
}
_ = w.Flush()
},
}
command.Flags().StringVarP(&output, "output", "o", "", "Output format. One of: wide")
return command
}
// NewApplicationWaitCommand returns a new instance of an `argocd app wait` command
func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
syncOnly bool
healthOnly bool
timeout uint
)
const defaultCheckTimeoutSeconds = 0
var command = &cobra.Command{
Use: "wait APPNAME",
Short: "Wait for an application to reach a synced and healthy state",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
if syncOnly && healthOnly {
log.Fatalln("Please specify at most one of --sync-only or --health-only.")
}
appName := args[0]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if timeout != 0 {
time.AfterFunc(time.Duration(timeout)*time.Second, func() {
cancel()
})
}
// print the initial components to format the tabwriter columns
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &appName})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
printAppResources(w, app, nil)
_ = w.Flush()
prevCompRes := &app.Status.ComparisonResult
appEventCh := watchApp(ctx, appIf, appName)
for appEvent := range appEventCh {
app := appEvent.Application
printAppStateChange(w, prevCompRes, &app)
_ = w.Flush()
prevCompRes = &app.Status.ComparisonResult
synced := (app.Status.ComparisonResult.Status == argoappv1.ComparisonStatusSynced)
healthy := (app.Status.Health.Status == argoappv1.HealthStatusHealthy)
if (synced && healthy) || (synced && syncOnly) || (healthy && healthOnly) {
log.Printf("App %q matches desired state", appName)
return
}
}
log.Fatalf("Timed out (%ds) waiting for app %q match desired state", timeout, appName)
},
}
command.Flags().BoolVar(&syncOnly, "sync-only", false, "Wait only for sync")
command.Flags().BoolVar(&healthOnly, "health-only", false, "Wait only for health")
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
return command
}
func isCanceledContextErr(err error) bool {
if err == context.Canceled {
return true
}
if stat, ok := status.FromError(err); ok {
if stat.Code() == codes.Canceled {
return true
}
}
return false
}
// watchApp returns a channel of watch events for an app, retrying the watch upon errors. Closes
// the returned channel when the context is discovered to be canceled.
func watchApp(ctx context.Context, appIf application.ApplicationServiceClient, appName string) chan *argoappv1.ApplicationWatchEvent {
appEventsCh := make(chan *argoappv1.ApplicationWatchEvent)
go func() {
defer close(appEventsCh)
for {
wc, err := appIf.Watch(ctx, &application.ApplicationQuery{
Name: &appName,
})
if err != nil {
if isCanceledContextErr(err) {
return
}
if err != io.EOF {
log.Warnf("watch err: %v", err)
}
time.Sleep(1 * time.Second)
continue
}
for {
appEvent, err := wc.Recv()
if err != nil {
if isCanceledContextErr(err) {
return
}
if err != io.EOF {
log.Warnf("recv err: %v", err)
}
time.Sleep(1 * time.Second)
break
} else {
appEventsCh <- appEvent
}
}
}
}()
return appEventsCh
}
// printAppResources prints the resources of an application in a tabwriter table
// Optionally prints the message from the operation state
func printAppResources(w io.Writer, app *argoappv1.Application, opState *argoappv1.OperationState) {
messages := make(map[string]string)
if opState != nil && opState.SyncResult != nil {
for _, resDetails := range opState.SyncResult.Resources {
messages[fmt.Sprintf("%s/%s", resDetails.Kind, resDetails.Name)] = resDetails.Message
}
}
if opState != nil {
fmt.Fprintf(w, "KIND\tNAME\tSTATUS\tHEALTH\tOPERATIONMSG\n")
} else {
fmt.Fprintf(w, "KIND\tNAME\tSTATUS\tHEALTH\n")
}
for _, res := range app.Status.ComparisonResult.Resources {
obj, err := argoappv1.UnmarshalToUnstructured(res.TargetState)
errors.CheckError(err)
if obj == nil {
obj, err = argoappv1.UnmarshalToUnstructured(res.LiveState)
errors.CheckError(err)
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s", obj.GetKind(), obj.GetName(), res.Status, res.Health.Status)
if opState != nil {
message := messages[fmt.Sprintf("%s/%s", obj.GetKind(), obj.GetName())]
fmt.Fprintf(w, "\t%s", message)
}
fmt.Fprint(w, "\n")
}
}
// printAppStateChange prints a component state change if it was different from the last time we saw it
func printAppStateChange(w io.Writer, prevComp *argoappv1.ComparisonResult, app *argoappv1.Application) {
getPrevResState := func(kind, name string) (argoappv1.ComparisonStatus, argoappv1.HealthStatusCode) {
for _, res := range prevComp.Resources {
obj, err := argoappv1.UnmarshalToUnstructured(res.TargetState)
errors.CheckError(err)
if obj == nil {
obj, err = argoappv1.UnmarshalToUnstructured(res.LiveState)
errors.CheckError(err)
}
if obj.GetKind() == kind && obj.GetName() == name {
return res.Status, res.Health.Status
}
}
return "", ""
}
if len(app.Status.ComparisonResult.Resources) > 0 {
for _, res := range app.Status.ComparisonResult.Resources {
obj, err := argoappv1.UnmarshalToUnstructured(res.TargetState)
errors.CheckError(err)
if obj == nil {
obj, err = argoappv1.UnmarshalToUnstructured(res.LiveState)
errors.CheckError(err)
}
prevSync, prevHealth := getPrevResState(obj.GetKind(), obj.GetName())
if prevSync != res.Status || prevHealth != res.Health.Status {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", obj.GetKind(), obj.GetName(), res.Status, res.Health.Status)
}
}
}
}
// NewApplicationSyncCommand returns a new instance of an `argocd app sync` command
func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
@@ -353,7 +736,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
dryRun bool
)
var command = &cobra.Command{
Use: "sync",
Use: "sync APPNAME",
Short: "Sync an application to its target state",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
@@ -364,20 +747,20 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
defer util.Close(conn)
appName := args[0]
syncReq := application.ApplicationSyncRequest{
Name: appName,
Name: &appName,
DryRun: dryRun,
Revision: revision,
Prune: prune,
}
syncRes, err := appIf.Sync(context.Background(), &syncReq)
_, err := appIf.Sync(context.Background(), &syncReq)
errors.CheckError(err)
fmt.Printf("%s %s\n", appName, syncRes.Message)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tKIND\tMESSAGE\n")
for _, resDetails := range syncRes.Resources {
fmt.Fprintf(w, "%s\t%s\t%s\n", resDetails.Name, resDetails.Kind, resDetails.Message)
status, err := waitUntilOperationCompleted(appIf, appName)
errors.CheckError(err)
err = printOperationResult(appName, status)
errors.CheckError(err)
if !status.Phase.Successful() && !dryRun {
os.Exit(1)
}
_ = w.Flush()
},
}
command.Flags().BoolVar(&dryRun, "dry-run", false, "Preview apply without affecting cluster")
@@ -386,6 +769,28 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
return command
}
func waitUntilOperationCompleted(appClient application.ApplicationServiceClient, appName string) (*argoappv1.OperationState, error) {
wc, err := appClient.Watch(context.Background(), &application.ApplicationQuery{
Name: &appName,
})
if err != nil {
return nil, err
}
appEvent, err := wc.Recv()
if err != nil {
return nil, err
}
for {
if appEvent.Application.Status.OperationState != nil && appEvent.Application.Status.OperationState.Phase.Completed() {
return appEvent.Application.Status.OperationState, nil
}
appEvent, err = wc.Recv()
if err != nil {
return nil, err
}
}
}
// setParameterOverrides updates an existing or appends a new parameter override in the application
func setParameterOverrides(app *argoappv1.Application, parameters []string) {
if len(parameters) == 0 {
@@ -426,7 +831,7 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
// NewApplicationHistoryCommand returns a new instance of an `argocd app history` command
func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "history",
Use: "history APPNAME",
Short: "Show application deployment history",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
@@ -436,11 +841,11 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
appName := args[0]
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: appName})
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "ID\tDATE\tCOMMIT\tPARAMETERS\n")
for _, depInfo := range app.Status.RecentDeployments {
for _, depInfo := range app.Status.History {
paramStr := paramString(depInfo.Params)
fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", depInfo.ID, depInfo.DeployedAt, depInfo.Revision, paramStr)
}
@@ -467,7 +872,7 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
prune bool
)
var command = &cobra.Command{
Use: "rollback",
Use: "rollback APPNAME",
Short: "Rollback application to a previous deployed version",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
@@ -480,10 +885,10 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
ctx := context.Background()
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: appName})
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &appName})
errors.CheckError(err)
var depInfo *argoappv1.DeploymentInfo
for _, di := range app.Status.RecentDeployments {
for _, di := range app.Status.History {
if di.ID == int64(depID) {
depInfo = &di
break
@@ -492,21 +897,120 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
if depInfo == nil {
log.Fatalf("Application '%s' does not have deployment id '%d' in history\n", app.ObjectMeta.Name, depID)
}
syncRes, err := appIf.Rollback(ctx, &application.ApplicationRollbackRequest{
Name: appName,
_, err = appIf.Rollback(ctx, &application.ApplicationRollbackRequest{
Name: &appName,
ID: int64(depID),
Prune: prune,
})
errors.CheckError(err)
fmt.Printf("%s %s\n", appName, syncRes.Message)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tKIND\tMESSAGE\n")
for _, resDetails := range syncRes.Resources {
fmt.Fprintf(w, "%s\t%s\t%s\n", resDetails.Name, resDetails.Kind, resDetails.Message)
status, err := waitUntilOperationCompleted(appIf, appName)
errors.CheckError(err)
err = printOperationResult(appName, status)
errors.CheckError(err)
if !status.Phase.Successful() {
os.Exit(1)
}
_ = w.Flush()
},
}
command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources")
return command
}
const printOpFmtStr = "%-20s%s\n"
func printOperationResult(appName string, opState *argoappv1.OperationState) error {
fmt.Printf(printOpFmtStr, "Application:", appName)
var syncRes *argoappv1.SyncOperationResult
if opState.SyncResult != nil {
syncRes = opState.SyncResult
fmt.Printf(printOpFmtStr, "Operation:", "Sync")
} else if opState.RollbackResult != nil {
fmt.Printf(printOpFmtStr, "Operation:", "Rollback")
syncRes = opState.RollbackResult
}
fmt.Printf(printOpFmtStr, "Phase:", opState.Phase)
if opState.Message != "" {
fmt.Printf(printOpFmtStr, "Message:", opState.Message)
}
if syncRes != nil {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Printf("\n")
fmt.Fprintf(w, "KIND\tNAME\tMESSAGE\n")
pruningRequired := 0
for _, resDetails := range syncRes.Resources {
fmt.Fprintf(w, "%s\t%s\t%s\n", resDetails.Kind, resDetails.Name, resDetails.Message)
if resDetails.Status == argoappv1.ResourceDetailsPruningRequired {
pruningRequired++
}
}
_ = w.Flush()
if pruningRequired > 0 {
return fmt.Errorf("Some resources (%d) require pruning", pruningRequired)
}
}
return nil
}
// NewApplicationManifestsCommand returns a new instance of an `argocd app manifests` command
func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
source string
revision string
)
var command = &cobra.Command{
Use: "manifests APPNAME",
Short: "Print manifests of an application",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName := args[0]
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
ctx := context.Background()
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &appName})
errors.CheckError(err)
var unstructureds []*unstructured.Unstructured
switch source {
case "git":
if revision != "" {
q := application.ApplicationManifestQuery{
Name: &appName,
Revision: revision,
}
res, err := appIf.GetManifests(ctx, &q)
errors.CheckError(err)
for _, mfst := range res.Manifests {
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
errors.CheckError(err)
unstructureds = append(unstructureds, obj)
}
} else {
targetObjs, err := app.Status.ComparisonResult.TargetObjects()
errors.CheckError(err)
unstructureds = targetObjs
}
case "live":
liveObjs, err := app.Status.ComparisonResult.LiveObjects()
errors.CheckError(err)
unstructureds = liveObjs
default:
log.Fatalf("Unknown source type '%s'", source)
}
for _, obj := range unstructureds {
fmt.Println("---")
yamlBytes, err := yaml.Marshal(obj)
errors.CheckError(err)
fmt.Printf("%s\n", yamlBytes)
}
},
}
command.Flags().StringVar(&source, "source", "git", "Source of manifests. One of: live|git")
command.Flags().StringVar(&revision, "revision", "", "Show manifests at a specific revision")
return command
}

View File

@@ -42,6 +42,9 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
// NewClusterAddCommand returns a new instance of an `argocd cluster add` command
func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
var (
inCluster bool
)
var command = &cobra.Command{
Use: "add",
Short: fmt.Sprintf("%s cluster add CONTEXT", cliName),
@@ -71,12 +74,17 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
clst := NewCluster(args[0], conf, managerBearerToken)
clst, err = clusterIf.Create(context.Background(), clst)
if inCluster {
clst.Server = common.KubernetesInternalAPIServerAddr
}
clstCreateReq := cluster.ClusterCreateRequest{Cluster: clst}
clst, err = clusterIf.Create(context.Background(), &clstCreateReq)
errors.CheckError(err)
fmt.Printf("Cluster '%s' added\n", clst.Name)
},
}
command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file")
command.Flags().BoolVar(&inCluster, "in-cluster", false, "Indicates ArgoCD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
return command
}
@@ -200,9 +208,9 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
clusters, err := clusterIf.List(context.Background(), &cluster.ClusterQuery{})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "SERVER\tNAME\n")
fmt.Fprintf(w, "SERVER\tNAME\tSTATUS\tMESSAGE\n")
for _, c := range clusters.Items {
fmt.Fprintf(w, "%s\t%s\n", c.Server, c.Name)
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.Server, c.Name, c.ConnectionState.Status, c.ConnectionState.Message)
}
_ = w.Flush()
},

View File

@@ -33,14 +33,43 @@ func NewInstallCommand() *cobra.Command {
}
command.Flags().BoolVar(&installOpts.Upgrade, "upgrade", false, "upgrade controller/ui deployments and configmap if already installed")
command.Flags().BoolVar(&installOpts.DryRun, "dry-run", false, "print the kubernetes manifests to stdout instead of installing")
command.Flags().BoolVar(&installOpts.ConfigSuperuser, "config-superuser", false, "create or update a superuser username and password")
command.Flags().BoolVar(&installOpts.CreateSignature, "create-signature", false, "create or update the server-side token signing signature")
command.Flags().StringVar(&installOpts.ConfigMap, "config-map", "", "apply settings from a Kubernetes config map")
command.Flags().StringVar(&installOpts.SuperuserPassword, "superuser-password", "", "password for super user")
command.Flags().StringVar(&installOpts.ControllerImage, "controller-image", install.DefaultControllerImage, "use a specified controller image")
command.Flags().StringVar(&installOpts.ServerImage, "server-image", install.DefaultServerImage, "use a specified api server image")
command.Flags().StringVar(&installOpts.UIImage, "ui-image", install.DefaultUIImage, "use a specified ui image")
command.Flags().StringVar(&installOpts.RepoServerImage, "repo-server-image", install.DefaultRepoServerImage, "use a specified repo server image")
command.Flags().StringVar(&installOpts.ImagePullPolicy, "image-pull-policy", "", "set the image pull policy of the pod specs")
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.AddCommand(newSettingsCommand())
return command
}
// newSettingsCommand returns a new instance of `argocd install settings` command
func newSettingsCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
installOpts install.InstallOptions
)
var command = &cobra.Command{
Use: "settings",
Short: "Creates or updates ArgoCD settings",
Long: "Creates or updates ArgoCD settings",
Run: func(c *cobra.Command, args []string) {
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if wasSpecified {
installOpts.Namespace = namespace
}
installer, err := install.NewInstaller(conf, installOpts)
errors.CheckError(err)
installer.InstallSettings()
},
}
command.Flags().BoolVar(&installOpts.UpdateSuperuser, "update-superuser", false, "force updating the superuser password")
command.Flags().StringVar(&installOpts.SuperuserPassword, "superuser-password", "", "password for super user")
command.Flags().BoolVar(&installOpts.UpdateSignature, "update-signature", false, "force updating the server-side token signing signature")
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}

View File

@@ -1,28 +1,38 @@
package commands
import (
"bufio"
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"strings"
"strconv"
"time"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/session"
"github.com/argoproj/argo-cd/server/settings"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cli"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/localconfig"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
)
// NewLoginCommand returns a new instance of `argocd login` command
func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
name string
ctxName string
username string
password string
sso bool
)
var command = &cobra.Command{
Use: "login SERVER",
@@ -38,35 +48,62 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
errors.CheckError(err)
if !tlsTestResult.TLS {
if !globalClientOpts.PlainText {
askToProceed("WARNING: server is not configured with TLS. Proceed (y/n)? ")
if !cli.AskToProceed("WARNING: server is not configured with TLS. Proceed (y/n)? ") {
os.Exit(1)
}
globalClientOpts.PlainText = true
}
} else if tlsTestResult.InsecureErr != nil {
if !globalClientOpts.Insecure {
askToProceed(fmt.Sprintf("WARNING: server certificate had error: %s. Proceed insecurely (y/n)? ", tlsTestResult.InsecureErr))
if !cli.AskToProceed(fmt.Sprintf("WARNING: server certificate had error: %s. Proceed insecurely (y/n)? ", tlsTestResult.InsecureErr)) {
os.Exit(1)
}
globalClientOpts.Insecure = true
}
}
username, password = cli.PromptCredentials(username, password)
clientOpts := argocdclient.ClientOptions{
ConfigPath: "",
ServerAddr: server,
Insecure: globalClientOpts.Insecure,
PlainText: globalClientOpts.PlainText,
}
conn, sessionIf := argocdclient.NewClientOrDie(&clientOpts).NewSessionClientOrDie()
defer util.Close(conn)
acdClient := argocdclient.NewClientOrDie(&clientOpts)
setConn, setIf := acdClient.NewSettingsClientOrDie()
defer util.Close(setConn)
sessionRequest := session.SessionCreateRequest{
Username: username,
Password: password,
if ctxName == "" {
ctxName = server
}
createdSession, err := sessionIf.Create(context.Background(), &sessionRequest)
errors.CheckError(err)
fmt.Printf("user %q logged in successfully\n", username)
// Perform the login
var tokenString string
if !sso {
tokenString = passwordLogin(acdClient, username, password)
} else {
acdSet, err := setIf.Get(context.Background(), &settings.SettingsQuery{})
errors.CheckError(err)
if !ssoConfigured(acdSet) {
log.Fatalf("ArgoCD instance is not configured with SSO")
}
tokenString = oauth2Login(server, clientOpts.PlainText)
// The token which we just received from the OAuth2 flow, was from dex. ArgoCD
// currently does not back dex with any kind of persistent storage (it is run
// in-memory). As a result, this token cannot be used in any permanent capacity.
// Restarts of dex will result in a different signing key, and sessions becoming
// invalid. Instead we turn-around and ask ArgoCD to re-sign the token (who *does*
// have persistence of signing keys), and is what we store in the config. Should we
// ever decide to have a database layer for dex, the next line can be removed.
tokenString = tokenLogin(acdClient, tokenString)
}
parser := &jwt.Parser{
SkipClaimsValidation: true,
}
claims := jwt.MapClaims{}
_, _, err = parser.ParseUnverified(tokenString, &claims)
errors.CheckError(err)
fmt.Printf("'%s' logged in successfully\n", userDisplayName(claims))
// login successful. Persist the config
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
errors.CheckError(err)
@@ -79,46 +116,161 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
Insecure: globalClientOpts.Insecure,
})
localCfg.UpsertUser(localconfig.User{
Name: server,
AuthToken: createdSession.Token,
Name: ctxName,
AuthToken: tokenString,
})
if name == "" {
name = server
if ctxName == "" {
ctxName = server
}
localCfg.CurrentContext = name
localCfg.CurrentContext = ctxName
localCfg.UpsertContext(localconfig.ContextRef{
Name: name,
User: server,
Name: ctxName,
User: ctxName,
Server: server,
})
err = localconfig.WriteLocalConfig(*localCfg, globalClientOpts.ConfigPath)
errors.CheckError(err)
fmt.Printf("Context '%s' updated\n", ctxName)
},
}
command.Flags().StringVar(&name, "name", "", "name to use for the context")
command.Flags().StringVar(&ctxName, "name", "", "name to use for the context")
command.Flags().StringVar(&username, "username", "", "the username of an account to authenticate")
command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate")
command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login")
return command
}
func askToProceed(message string) {
proceed := ""
acceptedAnswers := map[string]bool{
"y": true,
"yes": true,
"n": true,
"no": true,
func userDisplayName(claims jwt.MapClaims) string {
if email, ok := claims["email"]; ok && email != nil {
return email.(string)
}
for !acceptedAnswers[proceed] {
fmt.Print(message)
reader := bufio.NewReader(os.Stdin)
proceedRaw, err := reader.ReadString('\n')
errors.CheckError(err)
proceed = strings.TrimSpace(proceedRaw)
}
if proceed == "no" || proceed == "n" {
os.Exit(1)
if name, ok := claims["name"]; ok && name != nil {
return name.(string)
}
return claims["sub"].(string)
}
func ssoConfigured(set *settings.Settings) bool {
return set.DexConfig != nil && len(set.DexConfig.Connectors) > 0
}
// getFreePort asks the kernel for a free open port that is ready to use.
func getFreePort() (int, error) {
ln, err := net.Listen("tcp", "[::]:0")
if err != nil {
return 0, err
}
return ln.Addr().(*net.TCPAddr).Port, ln.Close()
}
// oauth2Login opens a browser, runs a temporary HTTP server to delegate OAuth2 login flow and returns the JWT token
func oauth2Login(host string, plaintext bool) string {
ctx := context.Background()
port, err := getFreePort()
errors.CheckError(err)
var scheme = "https"
if plaintext {
scheme = "http"
}
conf := &oauth2.Config{
ClientID: common.ArgoCDCLIClientAppID,
Scopes: []string{"openid", "profile", "email", "groups", "offline_access"},
Endpoint: oauth2.Endpoint{
AuthURL: fmt.Sprintf("%s://%s%s/auth", scheme, host, common.DexAPIEndpoint),
TokenURL: fmt.Sprintf("%s://%s%s/token", scheme, host, common.DexAPIEndpoint),
},
RedirectURL: fmt.Sprintf("http://localhost:%d/auth/callback", port),
}
srv := &http.Server{Addr: ":" + strconv.Itoa(port)}
var tokenString string
loginCompleted := make(chan struct{})
callbackHandler := func(w http.ResponseWriter, r *http.Request) {
defer func() {
loginCompleted <- struct{}{}
}()
// Authorization redirect callback from OAuth2 auth flow.
if errMsg := r.FormValue("error"); errMsg != "" {
http.Error(w, errMsg+": "+r.FormValue("error_description"), http.StatusBadRequest)
log.Fatal(errMsg)
return
}
code := r.FormValue("code")
if code == "" {
errMsg := fmt.Sprintf("no code in request: %q", r.Form)
http.Error(w, errMsg, http.StatusBadRequest)
log.Fatal(errMsg)
return
}
tok, err := conf.Exchange(ctx, code)
errors.CheckError(err)
log.Info("Authentication successful")
var ok bool
tokenString, ok = tok.Extra("id_token").(string)
if !ok {
errMsg := "no id_token in token response"
http.Error(w, errMsg, http.StatusInternalServerError)
log.Fatal(errMsg)
return
}
log.Debugf("Token: %s", tokenString)
successPage := `
<div style="height:100px; width:100%!; display:flex; flex-direction: column; justify-content: center; align-items:center; background-color:#2ecc71; color:white; font-size:22"><div>Authentication successful!</div></div>
<p style="margin-top:20px; font-size:18; text-align:center">Authentication was successful, you can now return to CLI. This page will close automatically</p>
<script>window.onload=function(){setTimeout(this.close, 4000)}</script>
`
fmt.Fprintf(w, successPage)
}
http.HandleFunc("/auth/callback", callbackHandler)
// add transport for self-signed certificate to context
sslcli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
ctx = context.WithValue(ctx, oauth2.HTTPClient, sslcli)
// Redirect user to login & consent page to ask for permission for the scopes specified above.
log.Info("Opening browser for authentication")
url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
log.Infof("Authentication URL: %s", url)
time.Sleep(1 * time.Second)
err = open.Run(url)
errors.CheckError(err)
go func() {
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
log.Fatalf("listen: %s\n", err)
}
}()
<-loginCompleted
_ = srv.Shutdown(ctx)
return tokenString
}
func passwordLogin(acdClient argocdclient.Client, username, password string) string {
username, password = cli.PromptCredentials(username, password)
sessConn, sessionIf := acdClient.NewSessionClientOrDie()
defer util.Close(sessConn)
sessionRequest := session.SessionCreateRequest{
Username: username,
Password: password,
}
createdSession, err := sessionIf.Create(context.Background(), &sessionRequest)
errors.CheckError(err)
return createdSession.Token
}
func tokenLogin(acdClient argocdclient.Client, token string) string {
sessConn, sessionIf := acdClient.NewSessionClientOrDie()
defer util.Close(sessConn)
sessionRequest := session.SessionCreateRequest{
Token: token,
}
createdSession, err := sessionIf.Create(context.Background(), &sessionRequest)
errors.CheckError(err)
return createdSession.Token
}

View File

@@ -42,7 +42,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
sshPrivateKeyPath string
)
var command = &cobra.Command{
Use: "add",
Use: "add REPO",
Short: "Add git repository credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
@@ -59,7 +59,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
err := git.TestRepo(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey)
if err != nil {
if repo.Username != "" && repo.Password != "" || git.IsSshURL(repo.Repo) {
if repo.Username != "" && repo.Password != "" || git.IsSSHURL(repo.Repo) {
// if everything was supplied or repo URL is SSH url, one of the inputs was definitely bad
log.Fatal(err)
}
@@ -70,7 +70,8 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
errors.CheckError(err)
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
defer util.Close(conn)
createdRepo, err := repoIf.Create(context.Background(), &repo)
repoCreateReq := repository.RepoCreateRequest{Repo: &repo}
createdRepo, err := repoIf.Create(context.Background(), &repoCreateReq)
errors.CheckError(err)
fmt.Printf("repository '%s' added\n", createdRepo.Repo)
},
@@ -84,7 +85,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
// NewRepoRemoveCommand returns a new instance of an `argocd repo list` command
func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "rm",
Use: "rm REPO",
Short: "Remove git repository credentials",
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
@@ -113,9 +114,9 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
repos, err := repoIf.List(context.Background(), &repository.RepoQuery{})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "REPO\tUSER\n")
fmt.Fprintf(w, "REPO\tUSER\tSTATUS\tMESSAGE\n")
for _, r := range repos.Items {
fmt.Fprintf(w, "%s\t%s\n", r.Repo, r.Username)
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", r.Repo, r.Username, r.ConnectionState.Status, r.ConnectionState.Message)
}
_ = w.Flush()
},

View File

@@ -11,8 +11,10 @@ import (
// NewUninstallCommand returns a new instance of `argocd install` command
func NewUninstallCommand() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
installOpts install.InstallOptions
clientConfig clientcmd.ClientConfig
installOpts install.InstallOptions
deleteNamespace bool
deleteCRD bool
)
var command = &cobra.Command{
Use: "uninstall",
@@ -28,9 +30,11 @@ func NewUninstallCommand() *cobra.Command {
}
installer, err := install.NewInstaller(conf, installOpts)
errors.CheckError(err)
installer.Uninstall()
installer.Uninstall(deleteNamespace, deleteCRD)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().BoolVar(&deleteNamespace, "delete-namespace", false, "Also delete the namespace during uninstall")
command.Flags().BoolVar(&deleteCRD, "delete-crd", false, "Also delete the Application CRD during uninstall")
return command
}

View File

@@ -1,8 +1,9 @@
package common
import (
"github.com/argoproj/argo-cd/pkg/apis/application"
rbacv1 "k8s.io/api/rbac/v1"
"github.com/argoproj/argo-cd/pkg/apis/application"
)
const (
@@ -14,12 +15,42 @@ const (
// SecretTypeCluster indicates a secret type of cluster
SecretTypeCluster = "cluster"
// AuthCookieName is the HTTP cookie name where we store our auth token
AuthCookieName = "argocd.token"
// ResourcesFinalizerName is a number of application CRD finalizer
ResourcesFinalizerName = "resources-finalizer." + MetadataPrefix
// KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster
KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc"
)
const (
ArgoCDAdminUsername = "admin"
ArgoCDSecretName = "argocd-secret"
ArgoCDConfigMapName = "argocd-cm"
ArgoCDAdminUsername = "admin"
ArgoCDSecretName = "argocd-secret"
ArgoCDConfigMapName = "argocd-cm"
ArgoCDRBACConfigMapName = "argocd-rbac-cm"
)
const (
// DexAPIEndpoint is the endpoint where we serve the Dex API server
DexAPIEndpoint = "/api/dex"
// LoginEndpoint is ArgoCD's shorthand login endpoint which redirects to dex's OAuth 2.0 provider's consent page
LoginEndpoint = "/auth/login"
// CallbackEndpoint is ArgoCD's final callback endpoint we reach after OAuth 2.0 login flow has been completed
CallbackEndpoint = "/auth/callback"
// ArgoCDClientAppName is name of the Oauth client app used when registering our web app to dex
ArgoCDClientAppName = "ArgoCD"
// ArgoCDClientAppID is the Oauth client ID we will use when registering our app to dex
ArgoCDClientAppID = "argo-cd"
// ArgoCDCLIClientAppName is name of the Oauth client app used when registering our CLI to dex
ArgoCDCLIClientAppName = "ArgoCD CLI"
// ArgoCDCLIClientAppID is the Oauth client ID we will use when registering our CLI to dex
ArgoCDCLIClientAppID = "argo-cd-cli"
// EnvVarSSODebug is an environment variable to enable additional OAuth debugging in the API server
EnvVarSSODebug = "ARGOCD_SSO_DEBUG"
// EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server
EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG"
)
var (
@@ -29,11 +60,23 @@ var (
// LabelKeySecretType contains the type of argocd secret (either 'cluster' or 'repo')
LabelKeySecretType = MetadataPrefix + "/secret-type"
// AnnotationConnectionStatus contains connection state status
AnnotationConnectionStatus = MetadataPrefix + "/connection-status"
// AnnotationConnectionMessage contains additional information about connection status
AnnotationConnectionMessage = MetadataPrefix + "/connection-message"
// AnnotationConnectionModifiedAt contains timestamp when connection state had been modified
AnnotationConnectionModifiedAt = MetadataPrefix + "/connection-modified-at"
// LabelKeyApplicationControllerInstanceID is the label which allows to separate application among multiple running application controllers.
LabelKeyApplicationControllerInstanceID = application.ApplicationFullName + "/controller-instanceid"
// LabelApplicationName is the label which indicates that resource belongs to application with the specified name
LabelApplicationName = application.ApplicationFullName + "/app-name"
// AnnotationKeyRefresh is the annotation key in the application which is updated with an
// arbitrary value (i.e. timestamp) on a git event, to force the controller to wake up and
// re-evaluate the application
AnnotationKeyRefresh = application.ApplicationFullName + "/refresh"
)
// ArgoCDManagerServiceAccount is the name of the service account for managing a cluster

560
controller/appcontroller.go Normal file
View File

@@ -0,0 +1,560 @@
package controller
import (
"context"
"encoding/json"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/argoproj/argo-cd/common"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
appinformers "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
watchResourcesRetryTimeout = 10 * time.Second
updateOperationStateTimeout = 1 * time.Second
)
// ApplicationController is the controller for application resources.
type ApplicationController struct {
namespace string
kubeClientset kubernetes.Interface
applicationClientset appclientset.Interface
appRefreshQueue workqueue.RateLimitingInterface
appOperationQueue workqueue.RateLimitingInterface
appInformer cache.SharedIndexInformer
appStateManager AppStateManager
appHealthManager AppHealthManager
statusRefreshTimeout time.Duration
db db.ArgoDB
forceRefreshApps map[string]bool
forceRefreshAppsMutex *sync.Mutex
}
type ApplicationControllerConfig struct {
InstanceID string
Namespace string
}
// NewApplicationController creates new instance of ApplicationController.
func NewApplicationController(
namespace string,
kubeClientset kubernetes.Interface,
applicationClientset appclientset.Interface,
db db.ArgoDB,
appStateManager AppStateManager,
appHealthManager AppHealthManager,
appResyncPeriod time.Duration,
config *ApplicationControllerConfig,
) *ApplicationController {
appRefreshQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
appOperationQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
return &ApplicationController{
namespace: namespace,
kubeClientset: kubeClientset,
applicationClientset: applicationClientset,
appRefreshQueue: appRefreshQueue,
appOperationQueue: appOperationQueue,
appStateManager: appStateManager,
appHealthManager: appHealthManager,
appInformer: newApplicationInformer(applicationClientset, appRefreshQueue, appOperationQueue, appResyncPeriod, config),
db: db,
statusRefreshTimeout: appResyncPeriod,
forceRefreshApps: make(map[string]bool),
forceRefreshAppsMutex: &sync.Mutex{},
}
}
// Run starts the Application CRD controller.
func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int, operationProcessors int) {
defer runtime.HandleCrash()
defer ctrl.appRefreshQueue.ShutDown()
go ctrl.appInformer.Run(ctx.Done())
go ctrl.watchAppsResources()
if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced) {
log.Error("Timed out waiting for caches to sync")
return
}
for i := 0; i < statusProcessors; i++ {
go wait.Until(func() {
for ctrl.processAppRefreshQueueItem() {
}
}, time.Second, ctx.Done())
}
for i := 0; i < operationProcessors; i++ {
go wait.Until(func() {
for ctrl.processAppOperationQueueItem() {
}
}, time.Second, ctx.Done())
}
<-ctx.Done()
}
func (ctrl *ApplicationController) forceAppRefresh(appName string) {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
ctrl.forceRefreshApps[appName] = true
}
func (ctrl *ApplicationController) isRefreshForced(appName string) bool {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
_, ok := ctrl.forceRefreshApps[appName]
if ok {
delete(ctrl.forceRefreshApps, appName)
}
return ok
}
// watchClusterResources watches for resource changes annotated with application label on specified cluster and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchClusterResources(ctx context.Context, item appv1.Cluster) {
config := item.RESTConfig()
retryUntilSucceed(func() error {
ch, err := kube.WatchResourcesWithLabel(ctx, config, "", common.LabelApplicationName)
if err != nil {
return err
}
for event := range ch {
eventObj := event.Object.(*unstructured.Unstructured)
objLabels := eventObj.GetLabels()
if objLabels == nil {
objLabels = make(map[string]string)
}
if appName, ok := objLabels[common.LabelApplicationName]; ok {
ctrl.forceAppRefresh(appName)
ctrl.appRefreshQueue.Add(ctrl.namespace + "/" + appName)
}
}
return fmt.Errorf("resource updates channel has closed")
}, fmt.Sprintf("watch app resources on %s", config.Host), ctx, watchResourcesRetryTimeout)
}
// WatchAppsResources watches for resource changes annotated with application label on all registered clusters and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchAppsResources() {
watchingClusters := make(map[string]context.CancelFunc)
retryUntilSucceed(func() error {
return ctrl.db.WatchClusters(context.Background(), func(event *db.ClusterEvent) {
cancel, ok := watchingClusters[event.Cluster.Server]
if event.Type == watch.Deleted && ok {
cancel()
delete(watchingClusters, event.Cluster.Server)
} else if event.Type != watch.Deleted && !ok {
ctx, cancel := context.WithCancel(context.Background())
watchingClusters[event.Cluster.Server] = cancel
go ctrl.watchClusterResources(ctx, *event.Cluster)
}
})
}, "watch clusters", context.Background(), watchResourcesRetryTimeout)
<-context.Background().Done()
}
// retryUntilSucceed keep retrying given action with specified timeout until action succeed or specified context is done.
func retryUntilSucceed(action func() error, desc string, ctx context.Context, timeout time.Duration) {
ctxCompleted := false
go func() {
select {
case <-ctx.Done():
ctxCompleted = true
}
}()
for {
err := action()
if err == nil {
return
}
if err != nil {
if ctxCompleted {
log.Infof("Stop retrying %s", desc)
return
} else {
log.Warnf("Failed to %s: %v, retrying in %v", desc, err, timeout)
time.Sleep(timeout)
}
}
}
}
func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext bool) {
appKey, shutdown := ctrl.appOperationQueue.Get()
if shutdown {
processNext = false
return
} else {
processNext = true
}
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
ctrl.appOperationQueue.Done(appKey)
}()
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
if err != nil {
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
return
}
if !exists {
// This happens after app was deleted, but the work queue still had an entry for it.
return
}
app, ok := obj.(*appv1.Application)
if !ok {
log.Warnf("Key '%s' in index is not an application", appKey)
return
}
if app.Operation != nil {
ctrl.processRequestedAppOperation(app)
} else if app.DeletionTimestamp != nil && app.CascadedDeletion() {
ctrl.finalizeApplicationDeletion(app)
}
return
}
func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application) {
log.Infof("Deleting resources for application %s", app.Name)
// Get refreshed application info, since informer app copy might be stale
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
log.Errorf("Unable to get refreshed application info prior deleting resources: %v", err)
}
return
}
clst, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
if err == nil {
config := clst.RESTConfig()
err = kube.DeleteResourceWithLabel(config, app.Spec.Destination.Namespace, common.LabelApplicationName, app.Name)
if err == nil {
app.SetCascadedDeletion(false)
var patch []byte
patch, err = json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": app.Finalizers,
},
})
if err == nil {
_, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(app.Name, types.MergePatchType, patch)
}
}
}
if err != nil {
log.Errorf("Unable to delete application resources: %v", err)
ctrl.setAppCondition(app, appv1.ApplicationCondition{
Type: appv1.ApplicationConditionDeletionError,
Message: err.Error(),
})
} else {
log.Infof("Successfully deleted resources for application %s", app.Name)
}
}
func (ctrl *ApplicationController) setAppCondition(app *appv1.Application, condition appv1.ApplicationCondition) {
index := -1
for i, exiting := range app.Status.Conditions {
if exiting.Type == condition.Type {
index = i
break
}
}
if index > -1 {
app.Status.Conditions[index] = condition
} else {
app.Status.Conditions = append(app.Status.Conditions, condition)
}
var patch []byte
patch, err := json.Marshal(map[string]interface{}{
"status": map[string]interface{}{
"conditions": app.Status.Conditions,
},
})
if err == nil {
_, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(app.Name, types.MergePatchType, patch)
}
if err != nil {
log.Errorf("Unable to set application condition: %v", err)
}
}
func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Application) {
state := appv1.OperationState{Phase: appv1.OperationRunning, Operation: *app.Operation, StartedAt: metav1.Now()}
// Recover from any unexpected panics and automatically set the status to be failed
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
// TODO: consider adding Error OperationStatus in addition to Failed
state.Phase = appv1.OperationError
if rerr, ok := r.(error); ok {
state.Message = rerr.Error()
} else {
state.Message = fmt.Sprintf("%v", r)
}
ctrl.setOperationState(app.Name, state, app.Operation)
}
}()
if app.Status.OperationState != nil && !app.Status.OperationState.Phase.Completed() {
// If we get here, we are about process an operation but we notice it is already Running.
// We need to detect if the controller crashed before completing the operation, or if the
// the app object we pulled off the informer is simply stale and doesn't reflect the fact
// that the operation is completed. We don't want to perform the operation again. To detect
// this, always retrieve the latest version to ensure it is not stale.
freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
log.Errorf("Failed to retrieve latest application state: %v", err)
return
}
if freshApp.Status.OperationState == nil || freshApp.Status.OperationState.Phase.Completed() {
log.Infof("Skipping operation on stale application state (%s)", app.ObjectMeta.Name)
return
}
log.Warnf("Found interrupted application operation %s %v", app.ObjectMeta.Name, app.Status.OperationState)
} else {
ctrl.setOperationState(app.Name, state, app.Operation)
}
if app.Operation.Sync != nil {
opRes := ctrl.appStateManager.SyncAppState(app, app.Operation.Sync.Revision, nil, app.Operation.Sync.DryRun, app.Operation.Sync.Prune)
state.Phase = opRes.Phase
state.Message = opRes.Message
state.SyncResult = opRes.SyncResult
} else if app.Operation.Rollback != nil {
var deploymentInfo *appv1.DeploymentInfo
for _, info := range app.Status.History {
if info.ID == app.Operation.Rollback.ID {
deploymentInfo = &info
break
}
}
if deploymentInfo == nil {
state.Phase = appv1.OperationFailed
state.Message = fmt.Sprintf("application %s does not have deployment with id %v", app.Name, app.Operation.Rollback.ID)
} else {
opRes := ctrl.appStateManager.SyncAppState(app, deploymentInfo.Revision, &deploymentInfo.ComponentParameterOverrides, app.Operation.Rollback.DryRun, app.Operation.Rollback.Prune)
state.Phase = opRes.Phase
state.Message = opRes.Message
state.RollbackResult = opRes.SyncResult
}
} else {
state.Phase = appv1.OperationFailed
state.Message = "Invalid operation request"
}
ctrl.setOperationState(app.Name, state, app.Operation)
}
func (ctrl *ApplicationController) setOperationState(appName string, state appv1.OperationState, operation *appv1.Operation) {
retryUntilSucceed(func() error {
var inProgressOpValue *appv1.Operation
if state.Phase == "" {
// expose any bugs where we neglect to set phase
panic("no phase was set")
}
if !state.Phase.Completed() {
// If operation is still running, we populate the app.operation field, which prevents
// any other operation from running at the same time. Otherwise, it is cleared by setting
// it to nil which indicates no operation is in progress.
inProgressOpValue = operation
} else {
nowTime := metav1.Now()
state.FinishedAt = &nowTime
}
patch, err := json.Marshal(map[string]interface{}{
"status": map[string]interface{}{
"operationState": state,
},
"operation": inProgressOpValue,
})
if err != nil {
return err
}
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace)
_, err = appClient.Patch(appName, types.MergePatchType, patch)
if err != nil {
return err
}
log.Infof("updated '%s' operation (phase: %s)", appName, state.Phase)
return nil
}, "Update application operation state", context.Background(), updateOperationStateTimeout)
}
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
appKey, shutdown := ctrl.appRefreshQueue.Get()
if shutdown {
processNext = false
return
} else {
processNext = true
}
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
ctrl.appRefreshQueue.Done(appKey)
}()
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
if err != nil {
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
return
}
if !exists {
// This happens after app was deleted, but the work queue still had an entry for it.
return
}
app, ok := obj.(*appv1.Application)
if !ok {
log.Warnf("Key '%s' in index is not an application", appKey)
return
}
isForceRefreshed := ctrl.isRefreshForced(app.Name)
if isForceRefreshed || app.NeedRefreshAppStatus(ctrl.statusRefreshTimeout) {
log.Infof("Refreshing application '%s' status (force refreshed: %v)", app.Name, isForceRefreshed)
comparisonResult, parameters, healthState, err := ctrl.tryRefreshAppStatus(app.DeepCopy())
if err != nil {
comparisonResult = &appv1.ComparisonResult{
Status: appv1.ComparisonStatusError,
Error: fmt.Sprintf("Failed to get application status for application '%s': %v", app.Name, err),
ComparedTo: app.Spec.Source,
ComparedAt: metav1.Time{Time: time.Now().UTC()},
}
parameters = nil
healthState = &appv1.HealthStatus{Status: appv1.HealthStatusUnknown}
}
ctrl.updateAppStatus(app.Name, app.Namespace, comparisonResult, parameters, *healthState)
}
return
}
func (ctrl *ApplicationController) tryRefreshAppStatus(app *appv1.Application) (*appv1.ComparisonResult, *[]appv1.ComponentParameter, *appv1.HealthStatus, error) {
comparisonResult, manifestInfo, err := ctrl.appStateManager.CompareAppState(app)
if err != nil {
return nil, nil, nil, err
}
log.Infof("App %s comparison result: prev: %s. current: %s", app.Name, app.Status.ComparisonResult.Status, comparisonResult.Status)
parameters := make([]appv1.ComponentParameter, len(manifestInfo.Params))
for i := range manifestInfo.Params {
parameters[i] = *manifestInfo.Params[i]
}
healthState, err := ctrl.appHealthManager.GetAppHealth(app.Spec.Destination.Server, app.Spec.Destination.Namespace, comparisonResult)
if err != nil {
return nil, nil, nil, err
}
return comparisonResult, &parameters, healthState, nil
}
func (ctrl *ApplicationController) updateAppStatus(
appName string, namespace string, comparisonResult *appv1.ComparisonResult, parameters *[]appv1.ComponentParameter, healthState appv1.HealthStatus) {
statusPatch := make(map[string]interface{})
statusPatch["comparisonResult"] = comparisonResult
statusPatch["parameters"] = parameters
statusPatch["health"] = healthState
patch, err := json.Marshal(map[string]interface{}{
"status": statusPatch,
})
if err == nil {
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(namespace)
_, err = appClient.Patch(appName, types.MergePatchType, patch)
}
if err != nil {
log.Warnf("Error updating application: %v", err)
} else {
log.Info("Application update successful")
}
}
func newApplicationInformer(
appClientset appclientset.Interface,
appQueue workqueue.RateLimitingInterface,
appOperationQueue workqueue.RateLimitingInterface,
appResyncPeriod time.Duration,
config *ApplicationControllerConfig) cache.SharedIndexInformer {
appInformerFactory := appinformers.NewFilteredSharedInformerFactory(
appClientset,
appResyncPeriod,
config.Namespace,
func(options *metav1.ListOptions) {
var instanceIDReq *labels.Requirement
var err error
if config.InstanceID != "" {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.Equals, []string{config.InstanceID})
} else {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.DoesNotExist, nil)
}
if err != nil {
panic(err)
}
options.FieldSelector = fields.Everything().String()
labelSelector := labels.NewSelector().Add(*instanceIDReq)
options.LabelSelector = labelSelector.String()
},
)
informer := appInformerFactory.Argoproj().V1alpha1().Applications().Informer()
informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
appOperationQueue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
appQueue.Add(key)
appOperationQueue.Add(key)
}
},
DeleteFunc: func(obj interface{}) {
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
}
},
},
)
return informer
}

View File

@@ -1,253 +0,0 @@
package controller
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/util/diff"
kubeutil "github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
)
// AppComparator defines methods which allow to compare application spec and actual application state.
type AppComparator interface {
CompareAppState(server string, namespace string, targetObjs []*unstructured.Unstructured, app *v1alpha1.Application) (*v1alpha1.ComparisonResult, error)
}
// KsonnetAppComparator allows to compare application using KSonnet CLI
type KsonnetAppComparator struct {
clusterService cluster.ClusterServiceServer
}
// groupLiveObjects deduplicate list of kubernetes resources and choose correct version of resource: if resource has corresponding expected application resource then method pick
// kubernetes resource with matching version, otherwise chooses single kubernetes resource with any version
func (ks *KsonnetAppComparator) groupLiveObjects(liveObjs []*unstructured.Unstructured, targetObjs []*unstructured.Unstructured) map[string]*unstructured.Unstructured {
targetByFullName := make(map[string]*unstructured.Unstructured)
for _, obj := range targetObjs {
targetByFullName[getResourceFullName(obj)] = obj
}
liveListByFullName := make(map[string][]*unstructured.Unstructured)
for _, obj := range liveObjs {
list := liveListByFullName[getResourceFullName(obj)]
if list == nil {
list = make([]*unstructured.Unstructured, 0)
}
list = append(list, obj)
liveListByFullName[getResourceFullName(obj)] = list
}
liveByFullName := make(map[string]*unstructured.Unstructured)
for fullName, list := range liveListByFullName {
targetObj := targetByFullName[fullName]
var liveObj *unstructured.Unstructured
if targetObj != nil {
for i := range list {
if list[i].GetAPIVersion() == targetObj.GetAPIVersion() {
liveObj = list[i]
break
}
}
} else {
liveObj = list[0]
}
if liveObj != nil {
liveByFullName[getResourceFullName(liveObj)] = liveObj
}
}
return liveByFullName
}
// CompareAppState compares application spec and real app state using KSonnet
func (ks *KsonnetAppComparator) CompareAppState(
server string,
namespace string,
targetObjs []*unstructured.Unstructured,
app *v1alpha1.Application) (*v1alpha1.ComparisonResult, error) {
log.Infof("Comparing app %s state in cluster %s (namespace: %s)", app.ObjectMeta.Name, server, namespace)
// Get the REST config for the cluster corresponding to the environment
clst, err := ks.clusterService.Get(context.Background(), &cluster.ClusterQuery{Server: server})
if err != nil {
return nil, err
}
restConfig := clst.RESTConfig()
// Retrieve the live versions of the objects
liveObjs, err := kubeutil.GetResourcesWithLabel(restConfig, namespace, common.LabelApplicationName, app.Name)
if err != nil {
return nil, err
}
liveObjByFullName := ks.groupLiveObjects(liveObjs, targetObjs)
controlledLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
// Move live resources which have corresponding target object to controlledLiveObj
dynClientPool := dynamic.NewDynamicClientPool(restConfig)
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, err
}
for i, targetObj := range targetObjs {
fullName := getResourceFullName(targetObj)
liveObj := liveObjByFullName[fullName]
if liveObj == nil {
// If we get here, it indicates we did not find the live resource when querying using
// our app label. However, it is possible that the resource was created/modified outside
// of ArgoCD. In order to determine that it is truly missing, we fall back to perform a
// direct lookup of the resource by name. See issue #141
gvk := targetObj.GroupVersionKind()
dclient, err := dynClientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return nil, err
}
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(disco, gvk)
if err != nil {
return nil, err
}
liveObj, err = kubeutil.GetLiveResource(dclient, targetObj, apiResource, namespace)
if err != nil {
return nil, err
}
}
controlledLiveObj[i] = liveObj
delete(liveObjByFullName, fullName)
}
// Move root level live resources to controlledLiveObj and add nil to targetObjs to indicate that target object is missing
for fullName := range liveObjByFullName {
liveObj := liveObjByFullName[fullName]
if !hasParent(liveObj) {
targetObjs = append(targetObjs, nil)
controlledLiveObj = append(controlledLiveObj, liveObj)
}
}
// Do the actual comparison
diffResults, err := diff.DiffArray(targetObjs, controlledLiveObj)
if err != nil {
return nil, err
}
comparisonStatus := v1alpha1.ComparisonStatusSynced
resources := make([]v1alpha1.ResourceState, len(targetObjs))
for i := 0; i < len(targetObjs); i++ {
resState := v1alpha1.ResourceState{
ChildLiveResources: make([]v1alpha1.ResourceNode, 0),
}
diffResult := diffResults.Diffs[i]
if diffResult.Modified {
// Set resource state to 'OutOfSync' since target and corresponding live resource are different
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
resState.Status = v1alpha1.ComparisonStatusSynced
}
if targetObjs[i] == nil {
resState.TargetState = "null"
// Set resource state to 'OutOfSync' since target resource is missing and live resource is unexpected
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
targetObjBytes, err := json.Marshal(targetObjs[i].Object)
if err != nil {
return nil, err
}
resState.TargetState = string(targetObjBytes)
}
if controlledLiveObj[i] == nil {
resState.LiveState = "null"
// Set resource state to 'OutOfSync' since target resource present but corresponding live resource is missing
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
liveObjBytes, err := json.Marshal(controlledLiveObj[i].Object)
if err != nil {
return nil, err
}
resState.LiveState = string(liveObjBytes)
}
resources[i] = resState
}
for i, resource := range resources {
liveResource := controlledLiveObj[i]
if liveResource != nil {
childResources, err := getChildren(liveResource, liveObjByFullName)
if err != nil {
return nil, err
}
resource.ChildLiveResources = childResources
resources[i] = resource
}
}
compResult := v1alpha1.ComparisonResult{
ComparedTo: app.Spec.Source,
ComparedAt: metav1.Time{Time: time.Now().UTC()},
Server: clst.Server,
Namespace: namespace,
Resources: resources,
Status: comparisonStatus,
}
return &compResult, nil
}
func hasParent(obj *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
return obj.GetKind() == kubeutil.EndpointsKind || metav1.GetControllerOf(obj) != nil
}
func isControlledBy(obj *unstructured.Unstructured, parent *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
if obj.GetKind() == kubeutil.EndpointsKind && parent.GetKind() == kubeutil.ServiceKind {
return obj.GetName() == parent.GetName()
}
return metav1.IsControlledBy(obj, parent)
}
func getChildren(parent *unstructured.Unstructured, liveObjByFullName map[string]*unstructured.Unstructured) ([]v1alpha1.ResourceNode, error) {
children := make([]v1alpha1.ResourceNode, 0)
for fullName, obj := range liveObjByFullName {
if isControlledBy(obj, parent) {
delete(liveObjByFullName, fullName)
childResource := v1alpha1.ResourceNode{}
json, err := json.Marshal(obj)
if err != nil {
return nil, err
}
childResource.State = string(json)
childResourceChildren, err := getChildren(obj, liveObjByFullName)
if err != nil {
return nil, err
}
childResource.Children = childResourceChildren
children = append(children, childResource)
}
}
return children, nil
}
func getResourceFullName(obj *unstructured.Unstructured) string {
return fmt.Sprintf("%s:%s", obj.GetKind(), obj.GetName())
}
// NewKsonnetAppComparator creates new instance of Ksonnet app comparator
func NewKsonnetAppComparator(clusterService cluster.ClusterServiceServer) AppComparator {
return &KsonnetAppComparator{
clusterService: clusterService,
}
}

View File

@@ -1,387 +0,0 @@
package controller
import (
"context"
"encoding/json"
"fmt"
"time"
"sync"
"github.com/argoproj/argo-cd/common"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
appinformers "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/server/cluster"
apireposerver "github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util"
argoutil "github.com/argoproj/argo-cd/util/argo"
"github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
watchResourcesRetryTimeout = 10 * time.Second
)
// ApplicationController is the controller for application resources.
type ApplicationController struct {
namespace string
repoClientset reposerver.Clientset
kubeClientset kubernetes.Interface
applicationClientset appclientset.Interface
appQueue workqueue.RateLimitingInterface
appInformer cache.SharedIndexInformer
appComparator AppComparator
statusRefreshTimeout time.Duration
apiRepoService apireposerver.RepositoryServiceServer
apiClusterService *cluster.Server
forceRefreshApps map[string]bool
forceRefreshAppsMutex *sync.Mutex
}
type ApplicationControllerConfig struct {
InstanceID string
Namespace string
}
// NewApplicationController creates new instance of ApplicationController.
func NewApplicationController(
namespace string,
kubeClientset kubernetes.Interface,
applicationClientset appclientset.Interface,
repoClientset reposerver.Clientset,
apiRepoService apireposerver.RepositoryServiceServer,
apiClusterService *cluster.Server,
appComparator AppComparator,
appResyncPeriod time.Duration,
config *ApplicationControllerConfig,
) *ApplicationController {
appQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
return &ApplicationController{
namespace: namespace,
kubeClientset: kubeClientset,
applicationClientset: applicationClientset,
repoClientset: repoClientset,
appQueue: appQueue,
apiRepoService: apiRepoService,
apiClusterService: apiClusterService,
appComparator: appComparator,
appInformer: newApplicationInformer(applicationClientset, appQueue, appResyncPeriod, config),
statusRefreshTimeout: appResyncPeriod,
forceRefreshApps: make(map[string]bool),
forceRefreshAppsMutex: &sync.Mutex{},
}
}
// Run starts the Application CRD controller.
func (ctrl *ApplicationController) Run(ctx context.Context, appWorkers int) {
defer runtime.HandleCrash()
defer ctrl.appQueue.ShutDown()
go ctrl.appInformer.Run(ctx.Done())
go ctrl.watchAppsResources()
if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced) {
log.Error("Timed out waiting for caches to sync")
return
}
for i := 0; i < appWorkers; i++ {
go wait.Until(ctrl.runWorker, time.Second, ctx.Done())
}
<-ctx.Done()
}
func (ctrl *ApplicationController) forceAppRefresh(appName string) {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
ctrl.forceRefreshApps[appName] = true
}
func (ctrl *ApplicationController) isRefreshForced(appName string) bool {
ctrl.forceRefreshAppsMutex.Lock()
defer ctrl.forceRefreshAppsMutex.Unlock()
_, ok := ctrl.forceRefreshApps[appName]
if ok {
delete(ctrl.forceRefreshApps, appName)
}
return ok
}
// watchClusterResources watches for resource changes annotated with application label on specified cluster and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchClusterResources(ctx context.Context, item appv1.Cluster) {
config := item.RESTConfig()
retryUntilSucceed(func() error {
ch, err := kube.WatchResourcesWithLabel(ctx, config, "", common.LabelApplicationName)
if err != nil {
return err
}
for event := range ch {
eventObj := event.Object.(*unstructured.Unstructured)
objLabels := eventObj.GetLabels()
if objLabels == nil {
objLabels = make(map[string]string)
}
if appName, ok := objLabels[common.LabelApplicationName]; ok {
ctrl.forceAppRefresh(appName)
ctrl.appQueue.Add(ctrl.namespace + "/" + appName)
}
}
return fmt.Errorf("resource updates channel has closed")
}, fmt.Sprintf("watch app resources on %s", config.Host), ctx, watchResourcesRetryTimeout)
}
// watchAppsResources watches for resource changes annotated with application label on all registered clusters and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchAppsResources() {
watchingClusters := make(map[string]context.CancelFunc)
retryUntilSucceed(func() error {
return ctrl.apiClusterService.WatchClusters(context.Background(), func(event *cluster.ClusterEvent) {
cancel, ok := watchingClusters[event.Cluster.Server]
if event.Type == watch.Deleted && ok {
cancel()
delete(watchingClusters, event.Cluster.Server)
} else if event.Type != watch.Deleted && !ok {
ctx, cancel := context.WithCancel(context.Background())
watchingClusters[event.Cluster.Server] = cancel
go ctrl.watchClusterResources(ctx, *event.Cluster)
}
})
}, "watch clusters", context.Background(), watchResourcesRetryTimeout)
<-context.Background().Done()
}
// retryUntilSucceed keep retrying given action with specified timeout until action succeed or specified context is done.
func retryUntilSucceed(action func() error, desc string, ctx context.Context, timeout time.Duration) {
ctxCompleted := false
go func() {
select {
case <-ctx.Done():
ctxCompleted = true
}
}()
for {
err := action()
if err == nil {
return
}
if err != nil {
if ctxCompleted {
log.Infof("Stop retrying %s", desc)
return
} else {
log.Warnf("Failed to %s: %v, retrying in %v", desc, err, timeout)
time.Sleep(timeout)
}
}
}
}
func (ctrl *ApplicationController) processNextItem() bool {
appKey, shutdown := ctrl.appQueue.Get()
if shutdown {
return false
}
defer ctrl.appQueue.Done(appKey)
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
if err != nil {
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
return true
}
if !exists {
// This happens after app was deleted, but the work queue still had an entry for it.
return true
}
app, ok := obj.(*appv1.Application)
if !ok {
log.Warnf("Key '%s' in index is not an application", appKey)
return true
}
isForceRefreshed := ctrl.isRefreshForced(app.Name)
if isForceRefreshed || app.NeedRefreshAppStatus(ctrl.statusRefreshTimeout) {
log.Infof("Refreshing application '%s' status (force refreshed: %v)", app.Name, isForceRefreshed)
status, err := ctrl.tryRefreshAppStatus(app.DeepCopy())
if err != nil {
status = app.Status.DeepCopy()
status.ComparisonResult = appv1.ComparisonResult{
Status: appv1.ComparisonStatusError,
Error: fmt.Sprintf("Failed to get application status for application '%s': %v", app.Name, err),
ComparedTo: app.Spec.Source,
ComparedAt: metav1.Time{Time: time.Now().UTC()},
}
}
ctrl.updateAppStatus(app.Name, app.Namespace, status)
}
return true
}
func (ctrl *ApplicationController) tryRefreshAppStatus(app *appv1.Application) (*appv1.ApplicationStatus, error) {
conn, client, err := ctrl.repoClientset.NewRepositoryClient()
if err != nil {
return nil, err
}
defer util.Close(conn)
repo, err := ctrl.apiRepoService.Get(context.Background(), &apireposerver.RepoQuery{Repo: app.Spec.Source.RepoURL})
if err != nil {
// If we couldn't retrieve from the repo service, assume public repositories
repo = &appv1.Repository{
Repo: app.Spec.Source.RepoURL,
Username: "",
Password: "",
}
}
overrides := make([]*appv1.ComponentParameter, len(app.Spec.Source.ComponentParameterOverrides))
if app.Spec.Source.ComponentParameterOverrides != nil {
for i := range app.Spec.Source.ComponentParameterOverrides {
item := app.Spec.Source.ComponentParameterOverrides[i]
overrides[i] = &item
}
}
revision := app.Spec.Source.TargetRevision
manifestInfo, err := client.GenerateManifest(context.Background(), &repository.ManifestRequest{
Repo: repo,
Revision: revision,
Path: app.Spec.Source.Path,
Environment: app.Spec.Source.Environment,
AppLabel: app.Name,
ComponentParameterOverrides: overrides,
})
if err != nil {
log.Errorf("Failed to load application manifest %v", err)
return nil, err
}
targetObjs := make([]*unstructured.Unstructured, len(manifestInfo.Manifests))
for i, manifestStr := range manifestInfo.Manifests {
var obj unstructured.Unstructured
if err := json.Unmarshal([]byte(manifestStr), &obj); err != nil {
if err != nil {
return nil, err
}
}
targetObjs[i] = &obj
}
server, namespace := argoutil.ResolveServerNamespace(app.Spec.Destination, manifestInfo)
comparisonResult, err := ctrl.appComparator.CompareAppState(server, namespace, targetObjs, app)
if err != nil {
return nil, err
}
log.Infof("App %s comparison result: prev: %s. current: %s", app.Name, app.Status.ComparisonResult.Status, comparisonResult.Status)
newStatus := app.Status
newStatus.ComparisonResult = *comparisonResult
paramsReq := repository.EnvParamsRequest{
Repo: repo,
Revision: revision,
Path: app.Spec.Source.Path,
Environment: app.Spec.Source.Environment,
}
params, err := client.GetEnvParams(context.Background(), &paramsReq)
if err != nil {
return nil, err
}
newStatus.Parameters = make([]appv1.ComponentParameter, len(params.Params))
for i := range params.Params {
newStatus.Parameters[i] = *params.Params[i]
}
return &newStatus, nil
}
func (ctrl *ApplicationController) runWorker() {
for ctrl.processNextItem() {
}
}
func (ctrl *ApplicationController) updateAppStatus(appName string, namespace string, status *appv1.ApplicationStatus) {
appKey := fmt.Sprintf("%s/%s", namespace, appName)
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey)
if err != nil {
log.Warnf("Failed to get application '%s' from informer index: %+v", appKey, err)
} else {
if exists {
app := obj.(*appv1.Application).DeepCopy()
app.Status = *status
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(namespace)
_, err := appClient.Update(app)
if err != nil {
log.Warnf("Error updating application: %v", err)
} else {
log.Info("Application update successful")
}
}
}
}
func newApplicationInformer(
appClientset appclientset.Interface, appQueue workqueue.RateLimitingInterface, appResyncPeriod time.Duration, config *ApplicationControllerConfig) cache.SharedIndexInformer {
appInformerFactory := appinformers.NewFilteredSharedInformerFactory(
appClientset,
appResyncPeriod,
config.Namespace,
func(options *metav1.ListOptions) {
var instanceIDReq *labels.Requirement
var err error
if config.InstanceID != "" {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.Equals, []string{config.InstanceID})
} else {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.DoesNotExist, nil)
}
if err != nil {
panic(err)
}
options.FieldSelector = fields.Everything().String()
labelSelector := labels.NewSelector().Add(*instanceIDReq)
options.LabelSelector = labelSelector.String()
},
)
informer := appInformerFactory.Argoproj().V1alpha1().Applications().Informer()
informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
appQueue.Add(key)
}
},
DeleteFunc: func(obj interface{}) {
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
}
},
},
)
return informer
}

161
controller/health.go Normal file
View File

@@ -0,0 +1,161 @@
package controller
import (
"context"
"encoding/json"
"fmt"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
"k8s.io/api/apps/v1"
coreV1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
maxHistoryCnt = 5
)
type AppHealthManager interface {
GetAppHealth(server string, namespace string, comparisonResult *appv1.ComparisonResult) (*appv1.HealthStatus, error)
}
type kubeAppHealthManager struct {
db db.ArgoDB
namespace string
}
func NewAppHealthManager(db db.ArgoDB, namespace string) AppHealthManager {
return &kubeAppHealthManager{db: db, namespace: namespace}
}
func (ctrl *kubeAppHealthManager) getServiceHealth(config *rest.Config, namespace string, name string) (*appv1.HealthStatus, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
service, err := clientSet.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
health := appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
if service.Spec.Type == coreV1.ServiceTypeLoadBalancer {
health.Status = appv1.HealthStatusProgressing
for _, ingress := range service.Status.LoadBalancer.Ingress {
if ingress.Hostname != "" || ingress.IP != "" {
health.Status = appv1.HealthStatusHealthy
break
}
}
}
return &health, nil
}
func (ctrl *kubeAppHealthManager) getDeploymentHealth(config *rest.Config, namespace string, name string) (*appv1.HealthStatus, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
deployment, err := clientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if deployment.Generation <= deployment.Status.ObservedGeneration {
cond := getDeploymentCondition(deployment.Status, v1.DeploymentProgressing)
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
return &appv1.HealthStatus{
Status: appv1.HealthStatusDegraded,
StatusDetails: fmt.Sprintf("Deployment %q exceeded its progress deadline", name),
}, nil
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
}, nil
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...\n", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
}, nil
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...\n", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
}, nil
}
} else {
return &appv1.HealthStatus{
Status: appv1.HealthStatusProgressing,
StatusDetails: "Waiting for rollout to finish: observed deployment generation less then desired generation",
}, nil
}
return &appv1.HealthStatus{
Status: appv1.HealthStatusHealthy,
}, nil
}
func getDeploymentCondition(status v1.DeploymentStatus, condType v1.DeploymentConditionType) *v1.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
func (ctrl *kubeAppHealthManager) GetAppHealth(server string, namespace string, comparisonResult *appv1.ComparisonResult) (*appv1.HealthStatus, error) {
clst, err := ctrl.db.GetCluster(context.Background(), server)
if err != nil {
return nil, err
}
restConfig := clst.RESTConfig()
appHealth := appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
for i := range comparisonResult.Resources {
resource := comparisonResult.Resources[i]
if resource.LiveState == "null" {
resource.Health = appv1.HealthStatus{Status: appv1.HealthStatusUnknown}
} else {
var obj unstructured.Unstructured
err := json.Unmarshal([]byte(resource.LiveState), &obj)
if err != nil {
return nil, err
}
switch obj.GetKind() {
case kube.DeploymentKind:
state, err := ctrl.getDeploymentHealth(restConfig, namespace, obj.GetName())
if err != nil {
return nil, err
}
resource.Health = *state
case kube.ServiceKind:
state, err := ctrl.getServiceHealth(restConfig, namespace, obj.GetName())
if err != nil {
return nil, err
}
resource.Health = *state
default:
resource.Health = appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
}
if resource.Health.Status == appv1.HealthStatusProgressing {
if appHealth.Status == appv1.HealthStatusHealthy {
appHealth.Status = appv1.HealthStatusProgressing
}
} else if resource.Health.Status == appv1.HealthStatusDegraded {
if appHealth.Status == appv1.HealthStatusHealthy || appHealth.Status == appv1.HealthStatusProgressing {
appHealth.Status = appv1.HealthStatusDegraded
}
}
}
comparisonResult.Resources[i] = resource
}
return &appHealth, nil
}

View File

@@ -1,33 +0,0 @@
// Code generated by mockery v1.0.0
package mocks
import mock "github.com/stretchr/testify/mock"
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
// AppComparator is an autogenerated mock type for the AppComparator type
type AppComparator struct {
mock.Mock
}
// CompareAppState provides a mock function with given fields: appRepoPath, app
func (_m *AppComparator) CompareAppState(appRepoPath string, app *v1alpha1.Application) (*v1alpha1.ComparisonResult, error) {
ret := _m.Called(appRepoPath, app)
var r0 *v1alpha1.ComparisonResult
if rf, ok := ret.Get(0).(func(string, *v1alpha1.Application) *v1alpha1.ComparisonResult); ok {
r0 = rf(appRepoPath, app)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.ComparisonResult)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, *v1alpha1.Application) error); ok {
r1 = rf(appRepoPath, app)
} else {
r1 = ret.Error(1)
}
return r0, r1
}

View File

@@ -0,0 +1,205 @@
package controller
import (
"context"
"encoding/json"
"time"
"runtime/debug"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
type SecretController struct {
kubeClient kubernetes.Interface
secretQueue workqueue.RateLimitingInterface
secretInformer cache.SharedIndexInformer
repoClientset reposerver.Clientset
namespace string
}
func (ctrl *SecretController) Run(ctx context.Context) {
go ctrl.secretInformer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctrl.secretInformer.HasSynced) {
log.Error("Timed out waiting for caches to sync")
return
}
go wait.Until(func() {
for ctrl.processSecret() {
}
}, time.Second, ctx.Done())
}
func (ctrl *SecretController) processSecret() (processNext bool) {
secretKey, shutdown := ctrl.secretQueue.Get()
if shutdown {
processNext = false
return
} else {
processNext = true
}
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
}
ctrl.secretQueue.Done(secretKey)
}()
obj, exists, err := ctrl.secretInformer.GetIndexer().GetByKey(secretKey.(string))
if err != nil {
log.Errorf("Failed to get secret '%s' from informer index: %+v", secretKey, err)
return
}
if !exists {
// This happens after secret was deleted, but the work queue still had an entry for it.
return
}
secret, ok := obj.(*corev1.Secret)
if !ok {
log.Warnf("Key '%s' in index is not an secret", secretKey)
return
}
if secret.Labels[common.LabelKeySecretType] == common.SecretTypeCluster {
cluster := db.SecretToCluster(secret)
ctrl.updateState(secret, ctrl.getClusterState(cluster))
} else if secret.Labels[common.LabelKeySecretType] == common.SecretTypeRepository {
repo := db.SecretToRepo(secret)
ctrl.updateState(secret, ctrl.getRepoConnectionState(repo))
}
return
}
func (ctrl *SecretController) getRepoConnectionState(repo *v1alpha1.Repository) v1alpha1.ConnectionState {
state := v1alpha1.ConnectionState{
ModifiedAt: repo.ConnectionState.ModifiedAt,
Status: v1alpha1.ConnectionStatusUnknown,
}
closer, client, err := ctrl.repoClientset.NewRepositoryClient()
if err != nil {
log.Errorf("Unable to create repository client: %v", err)
return state
}
defer util.Close(closer)
_, err = client.ListDir(context.Background(), &repository.ListDirRequest{Repo: repo, Path: ".gitignore"})
if err == nil {
state.Status = v1alpha1.ConnectionStatusSuccessful
} else {
state.Status = v1alpha1.ConnectionStatusFailed
state.Message = err.Error()
}
return state
}
func (ctrl *SecretController) getClusterState(cluster *v1alpha1.Cluster) v1alpha1.ConnectionState {
state := v1alpha1.ConnectionState{
ModifiedAt: cluster.ConnectionState.ModifiedAt,
Status: v1alpha1.ConnectionStatusUnknown,
}
kubeClientset, err := kubernetes.NewForConfig(cluster.RESTConfig())
if err == nil {
_, err = kubeClientset.Discovery().ServerVersion()
}
if err == nil {
state.Status = v1alpha1.ConnectionStatusSuccessful
} else {
state.Status = v1alpha1.ConnectionStatusFailed
state.Message = err.Error()
}
return state
}
func (ctrl *SecretController) updateState(secret *corev1.Secret, state v1alpha1.ConnectionState) {
annotationsPatch := make(map[string]string)
for key, value := range db.AnnotationsFromConnectionState(&state) {
if secret.Annotations[key] != value {
annotationsPatch[key] = value
}
}
if len(annotationsPatch) > 0 {
annotationsPatch[common.AnnotationConnectionModifiedAt] = metav1.Now().Format(time.RFC3339)
patchData, err := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": annotationsPatch,
},
})
if err != nil {
log.Warnf("Unable to prepare secret state annotation patch: %v", err)
} else {
_, err := ctrl.kubeClient.CoreV1().Secrets(secret.Namespace).Patch(secret.Name, types.MergePatchType, patchData)
if err != nil {
log.Warnf("Unable to patch secret state annotation: %v", err)
}
}
}
}
func newSecretInformer(client kubernetes.Interface, resyncPeriod time.Duration, namespace string, secretQueue workqueue.RateLimitingInterface) cache.SharedIndexInformer {
informerFactory := informers.NewFilteredSharedInformerFactory(
client,
resyncPeriod,
namespace,
func(options *metav1.ListOptions) {
var req *labels.Requirement
req, err := labels.NewRequirement(common.LabelKeySecretType, selection.In, []string{common.SecretTypeCluster, common.SecretTypeRepository})
if err != nil {
panic(err)
}
options.FieldSelector = fields.Everything().String()
labelSelector := labels.NewSelector().Add(*req)
options.LabelSelector = labelSelector.String()
},
)
informer := informerFactory.Core().V1().Secrets().Informer()
informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
secretQueue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
secretQueue.Add(key)
}
},
},
)
return informer
}
func NewSecretController(kubeClient kubernetes.Interface, repoClientset reposerver.Clientset, resyncPeriod time.Duration, namespace string) *SecretController {
secretQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
return &SecretController{
kubeClient: kubeClient,
secretQueue: secretQueue,
secretInformer: newSecretInformer(kubeClient, resyncPeriod, namespace, secretQueue),
namespace: namespace,
repoClientset: repoClientset,
}
}

500
controller/state.go Normal file
View File

@@ -0,0 +1,500 @@
package controller
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/kube"
kubeutil "github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
)
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application) (*v1alpha1.ComparisonResult, *repository.ManifestResponse, error)
SyncAppState(app *v1alpha1.Application, revision string, overrides *[]v1alpha1.ComponentParameter, dryRun bool, prune bool) *v1alpha1.OperationState
}
// KsonnetAppStateManager allows to compare application using KSonnet CLI
type KsonnetAppStateManager struct {
db db.ArgoDB
appclientset appclientset.Interface
repoClientset reposerver.Clientset
namespace string
}
// groupLiveObjects deduplicate list of kubernetes resources and choose correct version of resource: if resource has corresponding expected application resource then method pick
// kubernetes resource with matching version, otherwise chooses single kubernetes resource with any version
func (ks *KsonnetAppStateManager) groupLiveObjects(liveObjs []*unstructured.Unstructured, targetObjs []*unstructured.Unstructured) map[string]*unstructured.Unstructured {
targetByFullName := make(map[string]*unstructured.Unstructured)
for _, obj := range targetObjs {
targetByFullName[getResourceFullName(obj)] = obj
}
liveListByFullName := make(map[string][]*unstructured.Unstructured)
for _, obj := range liveObjs {
list := liveListByFullName[getResourceFullName(obj)]
if list == nil {
list = make([]*unstructured.Unstructured, 0)
}
list = append(list, obj)
liveListByFullName[getResourceFullName(obj)] = list
}
liveByFullName := make(map[string]*unstructured.Unstructured)
for fullName, list := range liveListByFullName {
targetObj := targetByFullName[fullName]
var liveObj *unstructured.Unstructured
if targetObj != nil {
for i := range list {
if list[i].GetAPIVersion() == targetObj.GetAPIVersion() {
liveObj = list[i]
break
}
}
} else {
liveObj = list[0]
}
if liveObj != nil {
liveByFullName[getResourceFullName(liveObj)] = liveObj
}
}
return liveByFullName
}
// CompareAppState compares application spec and real app state using KSonnet
func (ks *KsonnetAppStateManager) CompareAppState(app *v1alpha1.Application) (*v1alpha1.ComparisonResult, *repository.ManifestResponse, error) {
repo := ks.getRepo(app.Spec.Source.RepoURL)
conn, repoClient, err := ks.repoClientset.NewRepositoryClient()
if err != nil {
return nil, nil, err
}
defer util.Close(conn)
overrides := make([]*v1alpha1.ComponentParameter, len(app.Spec.Source.ComponentParameterOverrides))
if app.Spec.Source.ComponentParameterOverrides != nil {
for i := range app.Spec.Source.ComponentParameterOverrides {
item := app.Spec.Source.ComponentParameterOverrides[i]
overrides[i] = &item
}
}
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &repository.ManifestRequest{
Repo: repo,
Environment: app.Spec.Source.Environment,
Path: app.Spec.Source.Path,
Revision: app.Spec.Source.TargetRevision,
ComponentParameterOverrides: overrides,
AppLabel: app.Name,
})
if err != nil {
return nil, nil, err
}
targetObjs := make([]*unstructured.Unstructured, len(manifestInfo.Manifests))
for i, manifest := range manifestInfo.Manifests {
obj, err := v1alpha1.UnmarshalToUnstructured(manifest)
if err != nil {
return nil, nil, err
}
targetObjs[i] = obj
}
server, namespace := app.Spec.Destination.Server, app.Spec.Destination.Namespace
log.Infof("Comparing app %s state in cluster %s (namespace: %s)", app.ObjectMeta.Name, server, namespace)
// Get the REST config for the cluster corresponding to the environment
clst, err := ks.db.GetCluster(context.Background(), server)
if err != nil {
return nil, nil, err
}
restConfig := clst.RESTConfig()
// Retrieve the live versions of the objects
liveObjs, err := kubeutil.GetResourcesWithLabel(restConfig, namespace, common.LabelApplicationName, app.Name)
if err != nil {
return nil, nil, err
}
liveObjByFullName := ks.groupLiveObjects(liveObjs, targetObjs)
controlledLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
// Move live resources which have corresponding target object to controlledLiveObj
dynClientPool := dynamic.NewDynamicClientPool(restConfig)
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, nil, err
}
for i, targetObj := range targetObjs {
fullName := getResourceFullName(targetObj)
liveObj := liveObjByFullName[fullName]
if liveObj == nil && targetObj.GetName() != "" {
// If we get here, it indicates we did not find the live resource when querying using
// our app label. However, it is possible that the resource was created/modified outside
// of ArgoCD. In order to determine that it is truly missing, we fall back to perform a
// direct lookup of the resource by name. See issue #141
gvk := targetObj.GroupVersionKind()
dclient, err := dynClientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return nil, nil, err
}
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(disco, gvk)
if err != nil {
return nil, nil, err
}
liveObj, err = kubeutil.GetLiveResource(dclient, targetObj, apiResource, namespace)
if err != nil {
return nil, nil, err
}
}
controlledLiveObj[i] = liveObj
delete(liveObjByFullName, fullName)
}
// Move root level live resources to controlledLiveObj and add nil to targetObjs to indicate that target object is missing
for fullName := range liveObjByFullName {
liveObj := liveObjByFullName[fullName]
if !hasParent(liveObj) {
targetObjs = append(targetObjs, nil)
controlledLiveObj = append(controlledLiveObj, liveObj)
}
}
// Do the actual comparison
diffResults, err := diff.DiffArray(targetObjs, controlledLiveObj)
if err != nil {
return nil, nil, err
}
comparisonStatus := v1alpha1.ComparisonStatusSynced
resources := make([]v1alpha1.ResourceState, len(targetObjs))
for i := 0; i < len(targetObjs); i++ {
resState := v1alpha1.ResourceState{
ChildLiveResources: make([]v1alpha1.ResourceNode, 0),
}
diffResult := diffResults.Diffs[i]
if diffResult.Modified {
// Set resource state to 'OutOfSync' since target and corresponding live resource are different
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
resState.Status = v1alpha1.ComparisonStatusSynced
}
if targetObjs[i] == nil {
resState.TargetState = "null"
// Set resource state to 'OutOfSync' since target resource is missing and live resource is unexpected
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
targetObjBytes, err := json.Marshal(targetObjs[i].Object)
if err != nil {
return nil, nil, err
}
resState.TargetState = string(targetObjBytes)
}
if controlledLiveObj[i] == nil {
resState.LiveState = "null"
// Set resource state to 'OutOfSync' since target resource present but corresponding live resource is missing
resState.Status = v1alpha1.ComparisonStatusOutOfSync
comparisonStatus = v1alpha1.ComparisonStatusOutOfSync
} else {
liveObjBytes, err := json.Marshal(controlledLiveObj[i].Object)
if err != nil {
return nil, nil, err
}
resState.LiveState = string(liveObjBytes)
}
resources[i] = resState
}
for i, resource := range resources {
liveResource := controlledLiveObj[i]
if liveResource != nil {
childResources, err := getChildren(liveResource, liveObjByFullName)
if err != nil {
return nil, nil, err
}
resource.ChildLiveResources = childResources
resources[i] = resource
}
}
compResult := v1alpha1.ComparisonResult{
ComparedTo: app.Spec.Source,
ComparedAt: metav1.Time{Time: time.Now().UTC()},
Resources: resources,
Status: comparisonStatus,
}
return &compResult, manifestInfo, nil
}
func hasParent(obj *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
return obj.GetKind() == kubeutil.EndpointsKind || metav1.GetControllerOf(obj) != nil
}
func isControlledBy(obj *unstructured.Unstructured, parent *unstructured.Unstructured) bool {
// TODO: remove special case after Service and Endpoint get explicit relationship ( https://github.com/kubernetes/kubernetes/issues/28483 )
if obj.GetKind() == kubeutil.EndpointsKind && parent.GetKind() == kubeutil.ServiceKind {
return obj.GetName() == parent.GetName()
}
return metav1.IsControlledBy(obj, parent)
}
func getChildren(parent *unstructured.Unstructured, liveObjByFullName map[string]*unstructured.Unstructured) ([]v1alpha1.ResourceNode, error) {
children := make([]v1alpha1.ResourceNode, 0)
for fullName, obj := range liveObjByFullName {
if isControlledBy(obj, parent) {
delete(liveObjByFullName, fullName)
childResource := v1alpha1.ResourceNode{}
json, err := json.Marshal(obj)
if err != nil {
return nil, err
}
childResource.State = string(json)
childResourceChildren, err := getChildren(obj, liveObjByFullName)
if err != nil {
return nil, err
}
childResource.Children = childResourceChildren
children = append(children, childResource)
}
}
return children, nil
}
func getResourceFullName(obj *unstructured.Unstructured) string {
return fmt.Sprintf("%s:%s", obj.GetKind(), obj.GetName())
}
func (s *KsonnetAppStateManager) SyncAppState(
app *v1alpha1.Application, revision string, overrides *[]v1alpha1.ComponentParameter, dryRun bool, prune bool) *v1alpha1.OperationState {
if revision != "" {
app.Spec.Source.TargetRevision = revision
}
if overrides != nil {
app.Spec.Source.ComponentParameterOverrides = *overrides
}
opRes, manifest := s.syncAppResources(app, dryRun, prune)
if !dryRun && opRes.Phase.Successful() {
err := s.persistDeploymentInfo(app, manifest.Revision, manifest.Params, nil)
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = fmt.Sprintf("failed to record sync to history: %v", err)
}
}
return opRes
}
func (s *KsonnetAppStateManager) getRepo(repoURL string) *v1alpha1.Repository {
repo, err := s.db.GetRepository(context.Background(), repoURL)
if err != nil {
// If we couldn't retrieve from the repo service, assume public repositories
repo = &v1alpha1.Repository{Repo: repoURL}
}
return repo
}
func (s *KsonnetAppStateManager) persistDeploymentInfo(
app *v1alpha1.Application, revision string, envParams []*v1alpha1.ComponentParameter, overrides *[]v1alpha1.ComponentParameter) error {
params := make([]v1alpha1.ComponentParameter, len(envParams))
for i := range envParams {
param := *envParams[i]
params[i] = param
}
var nextId int64 = 0
if len(app.Status.History) > 0 {
nextId = app.Status.History[len(app.Status.History)-1].ID + 1
}
history := append(app.Status.History, v1alpha1.DeploymentInfo{
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
Revision: revision,
Params: params,
DeployedAt: metav1.NewTime(time.Now()),
ID: nextId,
})
if len(history) > maxHistoryCnt {
history = history[1 : maxHistoryCnt+1]
}
patch, err := json.Marshal(map[string]map[string][]v1alpha1.DeploymentInfo{
"status": {
"history": history,
},
})
if err != nil {
return err
}
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.namespace).Patch(app.Name, types.MergePatchType, patch)
return err
}
func (s *KsonnetAppStateManager) syncAppResources(
app *v1alpha1.Application,
dryRun bool,
prune bool) (*v1alpha1.OperationState, *repository.ManifestResponse) {
opRes := v1alpha1.OperationState{
SyncResult: &v1alpha1.SyncOperationResult{},
}
comparison, manifestInfo, err := s.CompareAppState(app)
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = err.Error()
return &opRes, manifestInfo
}
clst, err := s.db.GetCluster(context.Background(), app.Spec.Destination.Server)
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = err.Error()
return &opRes, manifestInfo
}
config := clst.RESTConfig()
opRes.SyncResult.Resources = make([]*v1alpha1.ResourceDetails, len(comparison.Resources))
liveObjs := make([]*unstructured.Unstructured, len(comparison.Resources))
targetObjs := make([]*unstructured.Unstructured, len(comparison.Resources))
// First perform a `kubectl apply --dry-run` against all the manifests. This will detect most
// (but not all) validation issues with the users' manifests (e.g. will detect syntax issues,
// but will not not detect if they are mutating immutable fields). If anything fails, we will
// refuse to perform the sync.
dryRunSuccessful := true
for i, resourceState := range comparison.Resources {
liveObj, err := resourceState.LiveObject()
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = fmt.Sprintf("Failed to unmarshal live object: %v", err)
return &opRes, manifestInfo
}
targetObj, err := resourceState.TargetObject()
if err != nil {
opRes.Phase = v1alpha1.OperationError
opRes.Message = fmt.Sprintf("Failed to unmarshal target object: %v", err)
return &opRes, manifestInfo
}
liveObjs[i] = liveObj
targetObjs[i] = targetObj
resDetails, successful := syncObject(config, app.Spec.Destination.Namespace, targetObj, liveObj, prune, true)
if !successful {
dryRunSuccessful = false
}
opRes.SyncResult.Resources[i] = &resDetails
}
if !dryRunSuccessful {
opRes.Phase = v1alpha1.OperationFailed
opRes.Message = "one or more objects failed to apply (dry run)"
return &opRes, manifestInfo
}
if dryRun {
opRes.Phase = v1alpha1.OperationSucceeded
opRes.Message = "successfully synced (dry run)"
return &opRes, manifestInfo
}
// If we get here, all objects passed their dry-run, so we are now ready to actually perform the
// `kubectl apply`. Loop through the resources again, this time without dry-run.
syncSuccessful := true
for i := range comparison.Resources {
resDetails, successful := syncObject(config, app.Spec.Destination.Namespace, targetObjs[i], liveObjs[i], prune, false)
if !successful {
syncSuccessful = false
}
opRes.SyncResult.Resources[i] = &resDetails
}
if !syncSuccessful {
opRes.Message = "one or more objects failed to apply"
opRes.Phase = v1alpha1.OperationFailed
} else {
opRes.Message = "successfully synced"
opRes.Phase = v1alpha1.OperationSucceeded
}
return &opRes, manifestInfo
}
// syncObject performs a sync of a single resource
func syncObject(config *rest.Config, namespace string, targetObj, liveObj *unstructured.Unstructured, prune, dryRun bool) (v1alpha1.ResourceDetails, bool) {
obj := targetObj
if obj == nil {
obj = liveObj
}
resDetails := v1alpha1.ResourceDetails{
Name: obj.GetName(),
Kind: obj.GetKind(),
Namespace: namespace,
}
needsDelete := targetObj == nil
successful := true
if needsDelete {
if prune {
if dryRun {
resDetails.Message = "pruned (dry run)"
resDetails.Status = v1alpha1.ResourceDetailsSyncedAndPruned
} else {
err := kubeutil.DeleteResource(config, liveObj, namespace)
if err != nil {
resDetails.Message = err.Error()
resDetails.Status = v1alpha1.ResourceDetailsSyncFailed
successful = false
} else {
resDetails.Message = "pruned"
resDetails.Status = v1alpha1.ResourceDetailsSyncedAndPruned
}
}
} else {
resDetails.Message = "ignored (requires pruning)"
resDetails.Status = v1alpha1.ResourceDetailsPruningRequired
}
} else {
message, err := kube.ApplyResource(config, targetObj, namespace, dryRun)
if err != nil {
resDetails.Message = err.Error()
resDetails.Status = v1alpha1.ResourceDetailsSyncFailed
successful = false
} else {
resDetails.Message = message
resDetails.Status = v1alpha1.ResourceDetailsSynced
}
}
return resDetails, successful
}
// NewAppStateManager creates new instance of Ksonnet app comparator
func NewAppStateManager(
db db.ArgoDB,
appclientset appclientset.Interface,
repoClientset reposerver.Clientset,
namespace string,
) AppStateManager {
return &KsonnetAppStateManager{
db: db,
appclientset: appclientset,
repoClientset: repoClientset,
namespace: namespace,
}
}

View File

@@ -9,9 +9,10 @@
The API server is a gRPC/REST server which exposes the API consumed by the Web UI, CLI, and CI/CD
systems. It has the following responsibilities:
* application management and status reporting
* invoking of application actions (e.g. manual sync, user-defined actions)
* invoking of application operations (e.g. sync, rollback, user-defined actions)
* repository and cluster credential management (stored as K8s secrets)
* authentication and RBAC enforcement, with eventual integration with external identity providers
* authentication and auth delegation to external identity providers
* RBAC enforcement
* listener/forwarder for git webhook events
### Repository Server

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

View File

@@ -11,7 +11,7 @@ An example Ksonnet guestbook application is provided to demonstrates how Argo CD
Download the latest Argo CD version
```
curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v0.3.1/argocd-darwin-amd64
curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v0.5.1/argocd-darwin-amd64
chmod +x /usr/local/bin/argocd
```
@@ -31,7 +31,7 @@ change service type to `LoadBalancer`:
kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
```
# 4. Login to the server from the CLI
## 4. Login to the server from the CLI
```
argocd login $(minikube service argocd-server -n argocd --url | cut -d'/' -f 3)

72
docs/sso.md Normal file
View File

@@ -0,0 +1,72 @@
# SSO Configuration
## Overview
ArgoCD embeds and bundles [Dex](https://github.com/coreos/dex) as part of its installation, for the
purposes of delegating authentication to an external identity provider. Multiple types of identity
providers are supported (OIDC, SAML, LDAP, GitHub, etc...). SSO configuration of ArgoCD requires
editing the `argocd-cm` ConfigMap with a
[Dex connector](https://github.com/coreos/dex/tree/master/Documentation/connectors) settings.
This document describes how to configure ArgoCD SSO using GitHub (OAuth2) as an example, but the
steps should be similar for other identity providers.
### 1. Register the application in the identity provider
In GitHub, register a new application. The callback address should be the `/api/dex/callback`
endpoint of your ArgoCD URL (e.g. https://argocd.example.com/api/dex/callback).
![Register OAuth App](assets/register-app.png "Register OAuth App")
After registering the app, you will receive an OAuth2 client ID and secret. These values will be
inputted into the ArgoCD configmap.
![OAuth2 Client Config](assets/oauth2-config.png "OAuth2 Client Config")
### 2. Configure ArgoCD for SSO
Edit the argocd-cm configmap:
```
kubectl edit configmap argocd-cm
```
* In the `url` key, input the base URL of ArgoCD. In this example, it is https://argocd.example.com
* In the `dex.config` key, add the `github` connector to the `connectors` sub field. See Dex's
[GitHub connector](https://github.com/coreos/dex/blob/master/Documentation/connectors/github.md)
documentation for explanation of the fields. A minimal config should populate the clientID,
clientSecret generated in Step 1.
* You will very likely want to restrict logins to one ore more GitHub organization. In the
`connectors.config.orgs` list, add one or more GitHub organizations. Any member of the org will
then be able to login to ArgoCD to perform management tasks.
```
data:
url: https://argocd.example.com
dex.config: |
connectors:
- type: github
id: github
name: GitHub
config:
clientID: 5aae0fcec2c11634be8c
clientSecret: c6fcb18177869174bd09be2c51259fb049c9d4e5
orgs:
- name: your-github-org
```
NOTES:
* Any values which start with '$' will look to a key in argocd-secret of the same name (minus the $),
to obtain the actual value. This allows you to store the `clientSecret` as a kubernetes secret.
* There is no need to set `redirectURI` in the `connectors.config` as shown in the dex documentation.
ArgoCD will automatically use the correct `redirectURI` for any OAuth2 connectors, to match the
correct external callback URL (e.g. https://argocd.example.com/api/dex/callback)
### 3. Restart ArgoCD for changes to take effect
Any changes to the `argocd-cm` ConfigMap or `argocd-secret` Secret, currently require a restart of
the ArgoCD API server for the settings to take effect. Delete the `argocd-server` pod to force a
restart. [Issue #174](https://github.com/argoproj/argo-cd/issues/174) will address this limitation.
```
kubectl delete pod -l app=argocd-server
```

View File

@@ -2,10 +2,6 @@
An ArgoCD application spec provides several different ways of track kubernetes resource manifests in git. This document describes the different techniques and the means of deploying those manifests to the target environment.
## Auto-Sync
In all tracking strategies described below, the application has the option to sync automatically. If auto-sync is configured, the new resources manifests will be applied automatically -- as soon as a difference is detected between the target state (git) and live state. If auto-sync is disabled, a manual sync will be needed using the Argo UI, CLI, or API.
## Branch Tracking
If a branch name is specified, ArgoCD will continually compare live state against the resource manifests defined at the tip of the specified branch.
@@ -23,12 +19,13 @@ To redeploy an application, the user uses git to change the meaning of a tag by
If a git commit SHA is specified, the application is effectively pinned to the manifests defined at the specified commit. This is the most restrictive of the techniques and is typically used to control production environments.
Since commit SHAs cannot change meaning, the only way to change the live state of an application which is pinned to a commit, is by updating the tracking revision in the application to a different commit containing the new manifests.
Note that parameter overrides can still be made against a application which is pinned to a revision.
## Parameter Overrides
ArgoCD provides means to override the parameters of a ksonnet app. This gives some extra flexibility in having *some* parts of the k8s manifests determined dynamically. It also serves as an alternative way of redeploying an application by changing application parameters via ArgoCD, instead of making the changes to the manifests in git.
The following is an example of where this would be useful: A team maintains a "dev" environment, which needs to be continually updated with the latest version of their guestbook application after every build in the tip of master. To solve this, the ksonnet application would expose an parameter named `image`, whose value used in the `dev` environment contains a placeholder value (e.g. `example/guestbook:replaceme`) intended to be set externally (outside of git) such as by build systems. As part of the build pipeline, the parameter value of the `image` would be continually updated to the freshly built image (e.g. `example/guestbook:abcd123`). A sync operation would result in the application being redeployed with the new image.
The following is an example of where this would be useful: A team maintains a "dev" environment, which needs to be continually updated with the latest version of their guestbook application after every build in the tip of master. To address this use case, the ksonnet application should expose an parameter named `image`, whose value used in the `dev` environment contains a placeholder value (e.g. `example/guestbook:replaceme`), intended to be set externally (outside of git) such as a build systems. As part of the build pipeline, the parameter value of the `image` would be continually updated to the freshly built image (e.g. `example/guestbook:abcd123`). A sync operation would result in the application being redeployed with the new image.
ArgoCD provides these operations conveniently via the CLI, or alternatively via the gRPC/REST API.
```
@@ -38,3 +35,7 @@ $ argocd app sync guestbook
Note that in all tracking strategies, any parameter overrides set in the application instance will be honored.
## [Auto-Sync](https://github.com/argoproj/argo-cd/issues/79) (Not Yet Implemented)
In all tracking strategies, the application will have the option to sync automatically. If auto-sync is configured, the new resources manifests will be applied automatically -- as soon as a difference is detected between the target state (git) and live state. If auto-sync is disabled, a manual sync will be needed using the Argo UI, CLI, or API.

70
docs/webhook.md Normal file
View File

@@ -0,0 +1,70 @@
# Git Webhook Configuration
## Overview
ArgoCD will poll git repositories every three minutes for changes to the manifests. To eliminate
this delay from polling, the API server can be configured to receive webhook events. ArgoCD supports
git webhook notifications from GitHub, GitLab, and BitBucket. The following explains how to configure
a git webhook for GitHub, but the same process should be applicable to other providers.
### 1. Create the webhook in the git provider
In your git provider, navigate to the settings page where webhooks can be configured. The payload
URL configured in the git provider should use the /api/webhook endpoint of your ArgoCD instance
(e.g. https://argocd.example.com/api/webhook). Input an arbitrary value in the secret. The same
value will be used when configuring the webhook in step 2.
![Add Webhook](assets/webhook-config.png "Add Webhook")
### 2. Configure ArgoCD with the webhook secret
In the `argocd-secret` kubernetes secret, configure one of the following keys with the git provider
webhook secret configured in step 1.
| Provider | K8s Secret Key |
|---------- | ------------------------ |
| GitHub | `github.webhook.secret` |
| GitLab | `gitlab.webhook.secret` |
| BitBucket | `bitbucket.webhook.uuid` |
Edit the ArgoCD kubernetes secret:
```
kubectl edit secret argocd-secret
```
TIP: for ease of entering secrets, kubernetes supports inputting secrets in the `stringData` field,
which saves you the trouble of base64 encoding the values and copying it to the `data` field.
Simply copy the shared webhook secret created in step 1, to the corresponding
GitHub/GitLab/BitBucket key under the `stringData` field:
```
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
namespace: argocd
type: Opaque
data:
...
stringData:
# github webhook secret
github.webhook.secret: shhhh! it's a github secret
# gitlab webhook secret
gitlab.webhook.secret: shhhh! it's a gitlab secret
# bitbucket webhook secret
bitbucket.webhook.uuid: your-bitbucket-uuid
```
### 3. Restart ArgoCD for changes to take effect
Any changes to the `argocd-cm` ConfigMap or `argocd-secret` Secret, currently require a restart of
the ArgoCD API server for the settings to take effect. Delete the `argocd-server` pod to force a
restart. [Issue #174](https://github.com/argoproj/argo-cd/issues/174) will address this limitation.
```
kubectl delete pod -l app=argocd-server
```

View File

@@ -23,7 +23,7 @@ local appDeployment = deployment
params.replicas,
container
.new(params.name, params.image)
.withPorts(containerPort.new(targetPort)),
labels);
.withPorts(containerPort.new(targetPort)) + if params.command != null then { command: [ params.command ] } else {},
labels).withProgressDeadlineSeconds(5);
k.core.v1.list.new([appService, appDeployment])

View File

@@ -12,7 +12,8 @@
name: "guestbook-ui",
replicas: 1,
servicePort: 80,
type: "LoadBalancer",
type: "ClusterIP",
command: null,
},
},
}

View File

@@ -1,7 +1,7 @@
{
"Vendor": true,
"DisableAll": true,
"Deadline": "3m",
"Deadline": "8m",
"Enable": [
"vet",
"gofmt",

View File

@@ -0,0 +1,168 @@
package main
import (
"context"
"fmt"
"hash/fnv"
"log"
"os"
"strings"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/git"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
// origRepoURLToSecretName hashes repo URL to the secret name using a formula.
// Part of the original repo name is incorporated for debugging purposes
func origRepoURLToSecretName(repo string) string {
repo = git.NormalizeGitURL(repo)
h := fnv.New32a()
_, _ = h.Write([]byte(repo))
parts := strings.Split(strings.TrimSuffix(repo, ".git"), "/")
return fmt.Sprintf("repo-%s-%v", strings.ToLower(parts[len(parts)-1]), h.Sum32())
}
// repoURLToSecretName hashes repo URL to the secret name using a formula.
// Part of the original repo name is incorporated for debugging purposes
func repoURLToSecretName(repo string) string {
repo = strings.ToLower(git.NormalizeGitURL(repo))
h := fnv.New32a()
_, _ = h.Write([]byte(repo))
parts := strings.Split(strings.TrimSuffix(repo, ".git"), "/")
return fmt.Sprintf("repo-%s-%v", parts[len(parts)-1], h.Sum32())
}
// RenameSecret renames a Kubernetes secret in a given namespace.
func renameSecret(namespace, oldName, newName string) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := clientcmd.ConfigOverrides{}
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &overrides)
log.Printf("Renaming secret %q to %q in namespace %q\n", oldName, newName, namespace)
config, err := clientConfig.ClientConfig()
if err != nil {
log.Println("Could not retrieve client config: ", err)
return
}
kubeclientset := kubernetes.NewForConfigOrDie(config)
repoSecret, err := kubeclientset.CoreV1().Secrets(namespace).Get(oldName, metav1.GetOptions{})
if err != nil {
log.Println("Could not retrieve old secret: ", err)
return
}
repoSecret.ObjectMeta.Name = newName
repoSecret.ObjectMeta.ResourceVersion = ""
repoSecret, err = kubeclientset.CoreV1().Secrets(namespace).Create(repoSecret)
if err != nil {
log.Println("Could not create new secret: ", err)
return
}
err = kubeclientset.CoreV1().Secrets(namespace).Delete(oldName, &metav1.DeleteOptions{})
if err != nil {
log.Println("Could not remove old secret: ", err)
}
}
// RenameRepositorySecrets ensures that repository secrets use the new naming format.
func renameRepositorySecrets(clientOpts argocdclient.ClientOptions, namespace string) {
conn, repoIf := argocdclient.NewClientOrDie(&clientOpts).NewRepoClientOrDie()
defer util.Close(conn)
repos, err := repoIf.List(context.Background(), &repository.RepoQuery{})
if err != nil {
log.Println("An error occurred, so skipping secret renaming: ", err)
return
}
log.Println("Renaming repository secrets...")
for _, repo := range repos.Items {
oldSecretName := origRepoURLToSecretName(repo.Repo)
newSecretName := repoURLToSecretName(repo.Repo)
if oldSecretName != newSecretName {
log.Printf("Repo %q had its secret name change, so updating\n", repo.Repo)
renameSecret(namespace, oldSecretName, newSecretName)
}
}
}
/*
// PopulateAppDestinations ensures that apps have a Server and Namespace set explicitly.
func populateAppDestinations(clientOpts argocdclient.ClientOptions) {
conn, appIf := argocdclient.NewClientOrDie(&clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
apps, err := appIf.List(context.Background(), &application.ApplicationQuery{})
if err != nil {
log.Println("An error occurred, so skipping destination population: ", err)
return
}
log.Println("Populating app Destination fields")
for _, app := range apps.Items {
changed := false
log.Printf("Ensuring destination field is populated on app %q\n", app.ObjectMeta.Name)
if app.Spec.Destination.Server == "" {
if app.Status.ComparisonResult.Status == appv1.ComparisonStatusUnknown || app.Status.ComparisonResult.Status == appv1.ComparisonStatusError {
log.Printf("App %q was missing Destination.Server, but could not fill it in: %s", app.ObjectMeta.Name, app.Status.ComparisonResult.Status)
} else {
log.Printf("App %q was missing Destination.Server, so setting to %q\n", app.ObjectMeta.Name, app.Status.ComparisonResult.Server)
app.Spec.Destination.Server = app.Status.ComparisonResult.Server
changed = true
}
}
if app.Spec.Destination.Namespace == "" {
if app.Status.ComparisonResult.Status == appv1.ComparisonStatusUnknown || app.Status.ComparisonResult.Status == appv1.ComparisonStatusError {
log.Printf("App %q was missing Destination.Namespace, but could not fill it in: %s", app.ObjectMeta.Name, app.Status.ComparisonResult.Status)
} else {
log.Printf("App %q was missing Destination.Namespace, so setting to %q\n", app.ObjectMeta.Name, app.Status.ComparisonResult.Namespace)
app.Spec.Destination.Namespace = app.Status.ComparisonResult.Namespace
changed = true
}
}
if changed {
_, err = appIf.UpdateSpec(context.Background(), &application.ApplicationSpecRequest{
AppName: app.Name,
Spec: &app.Spec,
})
if err != nil {
log.Println("An error occurred (but continuing anyway): ", err)
}
}
}
}
*/
func main() {
if len(os.Args) < 3 {
log.Fatalf("USAGE: %s SERVER NAMESPACE\n", os.Args[0])
}
server, namespace := os.Args[1], os.Args[2]
log.Printf("Using argocd server %q and namespace %q\n", server, namespace)
isLocalhost := false
switch {
case strings.HasPrefix(server, "localhost:"):
isLocalhost = true
case strings.HasPrefix(server, "127.0.0.1:"):
isLocalhost = true
}
clientOpts := argocdclient.ClientOptions{
ServerAddr: server,
Insecure: true,
PlainText: isLocalhost,
}
renameRepositorySecrets(clientOpts, namespace)
//populateAppDestinations(clientOpts)
}

View File

@@ -5,14 +5,6 @@ import (
"strings"
"syscall"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/util/config"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/password"
"github.com/argoproj/argo-cd/util/session"
tlsutil "github.com/argoproj/argo-cd/util/tls"
"github.com/ghodss/yaml"
"github.com/gobuffalo/packr"
log "github.com/sirupsen/logrus"
@@ -29,6 +21,15 @@ import (
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/util/diff"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/password"
"github.com/argoproj/argo-cd/util/session"
"github.com/argoproj/argo-cd/util/settings"
tlsutil "github.com/argoproj/argo-cd/util/tls"
)
var (
@@ -47,17 +48,17 @@ var (
// InstallOptions stores a collection of installation settings.
type InstallOptions struct {
DryRun bool
Upgrade bool
ConfigSuperuser bool
CreateSignature bool
ConfigMap string
Namespace string
ControllerImage string
UIImage string
ServerImage string
RepoServerImage string
ImagePullPolicy string
DryRun bool
Upgrade bool
UpdateSuperuser bool
UpdateSignature bool
SuperuserPassword string
Namespace string
ControllerImage string
UIImage string
ServerImage string
RepoServerImage string
ImagePullPolicy string
}
type Installer struct {
@@ -91,20 +92,29 @@ func (i *Installer) Install() {
i.InstallNamespace()
i.InstallApplicationCRD()
i.InstallSettings()
i.InstallRBACConfigMap()
i.InstallApplicationController()
i.InstallArgoCDServer()
i.InstallArgoCDRepoServer()
}
func (i *Installer) Uninstall() {
func (i *Installer) Uninstall(deleteNamespace, deleteCRD bool) {
manifests := i.box.List()
for _, manifestPath := range manifests {
if strings.HasSuffix(manifestPath, ".yaml") || strings.HasSuffix(manifestPath, ".yml") {
var obj unstructured.Unstructured
i.unmarshalManifest(manifestPath, &obj)
if obj.GetKind() == "Namespace" {
// Don't delete namespaces
continue
switch strings.ToLower(obj.GetKind()) {
case "namespace":
if !deleteNamespace {
log.Infof("Skipped deletion of Namespace: '%s'", obj.GetName())
continue
}
case "customresourcedefinition":
if !deleteCRD {
log.Infof("Skipped deletion of CustomResourceDefinition: '%s'", obj.GetName())
continue
}
}
i.MustUninstallResource(&obj)
}
@@ -131,52 +141,70 @@ func (i *Installer) InstallApplicationCRD() {
func (i *Installer) InstallSettings() {
kubeclientset, err := kubernetes.NewForConfig(i.config)
errors.CheckError(err)
configManager := config.NewConfigManager(kubeclientset, i.Namespace)
_, err = configManager.GetSettings()
if err == nil {
log.Infof("Settings already exists. Skipping creation")
return
}
if !apierr.IsNotFound(err) {
log.Fatal(err)
}
// configmap/secret not yet created
var newSettings config.ArgoCDSettings
// set JWT signature
signature, err := session.MakeSignature(32)
errors.CheckError(err)
newSettings.ServerSignature = signature
// generate admin password
passwordRaw := readAndConfirmPassword()
hashedPassword, err := password.HashPassword(passwordRaw)
errors.CheckError(err)
newSettings.LocalUsers = map[string]string{
common.ArgoCDAdminUsername: hashedPassword,
settingsMgr := settings.NewSettingsManager(kubeclientset, i.Namespace)
cdSettings, err := settingsMgr.GetSettings()
if err != nil {
if apierr.IsNotFound(err) {
cdSettings = &settings.ArgoCDSettings{}
} else {
log.Fatal(err)
}
}
// generate TLS cert
hosts := []string{
"localhost",
"argocd-server",
fmt.Sprintf("argocd-server.%s", i.Namespace),
fmt.Sprintf("argocd-server.%s.svc", i.Namespace),
fmt.Sprintf("argocd-server.%s.svc.cluster.local", i.Namespace),
if cdSettings.ServerSignature == nil || i.UpdateSignature {
// set JWT signature
signature, err := session.MakeSignature(32)
errors.CheckError(err)
cdSettings.ServerSignature = signature
}
certOpts := tlsutil.CertOptions{
Hosts: hosts,
Organization: "Argo CD",
IsCA: true,
}
cert, err := tlsutil.GenerateX509KeyPair(certOpts)
errors.CheckError(err)
newSettings.Certificate = cert
err = configManager.SaveSettings(&newSettings)
if cdSettings.LocalUsers == nil {
cdSettings.LocalUsers = make(map[string]string)
}
if _, ok := cdSettings.LocalUsers[common.ArgoCDAdminUsername]; !ok || i.UpdateSuperuser {
passwordRaw := i.SuperuserPassword
if passwordRaw == "" {
passwordRaw = readAndConfirmPassword()
}
hashedPassword, err := password.HashPassword(passwordRaw)
errors.CheckError(err)
cdSettings.LocalUsers = map[string]string{
common.ArgoCDAdminUsername: hashedPassword,
}
}
if cdSettings.Certificate == nil {
// generate TLS cert
hosts := []string{
"localhost",
"argocd-server",
fmt.Sprintf("argocd-server.%s", i.Namespace),
fmt.Sprintf("argocd-server.%s.svc", i.Namespace),
fmt.Sprintf("argocd-server.%s.svc.cluster.local", i.Namespace),
}
certOpts := tlsutil.CertOptions{
Hosts: hosts,
Organization: "Argo CD",
IsCA: true,
}
cert, err := tlsutil.GenerateX509KeyPair(certOpts)
errors.CheckError(err)
cdSettings.Certificate = cert
}
err = settingsMgr.SaveSettings(cdSettings)
errors.CheckError(err)
}
func (i *Installer) InstallRBACConfigMap() {
var rbacCM apiv1.ConfigMap
i.unmarshalManifest("02c_argocd-rbac-cm.yaml", &rbacCM)
_, err := i.InstallResource(kube.MustToUnstructured(&rbacCM))
if err != nil && !apierr.IsAlreadyExists(err) {
errors.CheckError(err)
}
}
func readAndConfirmPassword() string {
for {
fmt.Print("*** Enter an admin password: ")
@@ -222,8 +250,10 @@ func (i *Installer) InstallArgoCDServer() {
i.unmarshalManifest("04c_argocd-server-rolebinding.yaml", &argoCDServerControllerRoleBinding)
i.unmarshalManifest("04d_argocd-server-deployment.yaml", &argoCDServerControllerDeployment)
i.unmarshalManifest("04e_argocd-server-service.yaml", &argoCDServerService)
argoCDServerControllerDeployment.Spec.Template.Spec.InitContainers[0].Image = i.UIImage
argoCDServerControllerDeployment.Spec.Template.Spec.InitContainers[0].Image = i.ServerImage
argoCDServerControllerDeployment.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = apiv1.PullPolicy(i.ImagePullPolicy)
argoCDServerControllerDeployment.Spec.Template.Spec.InitContainers[1].Image = i.UIImage
argoCDServerControllerDeployment.Spec.Template.Spec.InitContainers[1].ImagePullPolicy = apiv1.PullPolicy(i.ImagePullPolicy)
argoCDServerControllerDeployment.Spec.Template.Spec.Containers[0].Image = i.ServerImage
argoCDServerControllerDeployment.Spec.Template.Spec.Containers[0].ImagePullPolicy = apiv1.PullPolicy(i.ImagePullPolicy)
i.MustInstallResource(kube.MustToUnstructured(&argoCDServerServiceAccount))

View File

@@ -1,7 +1,56 @@
# NOTE: the values here are just a example and are not the values used during an install.
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
namespace: argocd
# TODO: future argocd tuning keys go here (e.g. resync period)
data: {}
data:
# url is the externally facing base URL of ArgoCD.
# This field is required when configuring SSO, which ArgoCD uses as part the redirectURI for the
# dex connectors. When configuring the application in the SSO provider (e.g. github, okta), the
# authorization callback URL will be url + /api/dex/callback. For example, if ArgoCD's url is
# https://example.com, then the auth callback to set in the SSO provider should be:
# https://example.com/api/dex/callback
url: http://localhost:8080
# dex.config holds the contents of the configuration yaml for the dex OIDC/Oauth2 provider sidecar.
# Only a subset of a full dex config is required, namely the connectors list. ArgoCD will generate
# the complete dex config based on the configured URL, and the known callback endpoint which the
# ArgoCD API server exposes (i.e. /api/dex/callback).
dex.config: |
# connectors is a list of dex connector configurations. For details on available connectors and
# how to configure them, see: https://github.com/coreos/dex/tree/master/Documentation/connectors
# NOTE:
# * Any values which start with '$' will look to a key in argocd-secret of the same name, to
# obtain the actual value.
# * ArgoCD will automatically set the 'redirectURI' field in any OAuth2 connectors, to match the
# external callback URL (e.g. https://example.com/api/dex/callback)
connectors:
# GitHub example
- type: github
id: github
name: GitHub
config:
clientID: aabbccddeeff00112233
clientSecret: $dex.github.clientSecret
orgs:
- name: your-github-org
# GitHub enterprise example
- type: github
id: acme-github
name: Acme GitHub
config:
hostName: github.acme.com
clientID: abcdefghijklmnopqrst
clientSecret: $dex.acme.clientSecret
orgs:
- name: your-github-org
# OIDC example (e.g. Okta)
- type: oidc
id: okta
name: Okta
config:
issuer: https://dev-123456.oktapreview.com
clientID: aaaabbbbccccddddeee
clientSecret: $dex.okta.clientSecret

View File

@@ -4,10 +4,23 @@ apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
namespace: argocd
type: Opaque
data:
# bcrypt hash of 'password'
admin.password: JDJhJDEwJGVYYkZmOEt3NUMzTDJVbE9FRDNqUU9QMC5reVNBamVLUXY0N3NqaFFpWlZwTkkyU2dMTzd1
stringData:
# bcrypt hash of the string "password"
admin.password: $2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W
# random server signature key for session validation
server.secretkey: aEDvv73vv70F77+9CRBSNu+/vTYQ77+9EUFh77+9LzFyJ++/vXfLsO+/vWRbeu+/ve+/vQ==
# The following keys hold the shared secret for authenticating GitHub/GitLab/BitBucket webhook
# events. To enable webhooks, configure one or more of the following keys with the shared git
# provider webhook secret. The payload URL configured in the git provider should use the
# /api/webhook endpoint of your ArgoCD instance (e.g. https://argocd.example.com/api/webhook)
github.webhook.secret: shhhh! it's a github secret
gitlab.webhook.secret: shhhh! it's a gitlab secret
bitbucket.webhook.uuid: your-bitbucket-uuid
# the following of user defined keys which are referenced in the example argocd-cm configmap
# as pat of SSO configuration.
dex.github.clientSecret: nv1vx8w4gw5byrflujfkxww6ykh85yq818aorvwy
dex.acme.clientSecret: 5pp7dyre3d5nyk0ree1tr0gd68k18xn94x8lfae9
dex.okta.clientSecret: x41ztv6ufyf07oyoopc6f62p222c00mox2ciquvt

View File

@@ -0,0 +1,26 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
data:
# policy.csv holds the CSV file policy file which contains additional policy and role definitions.
# ArgoCD defines two built-in roles:
# * role:readonly - readonly access to all objects
# * role:admin - admin access to all objects
# The built-in policy can be seen under util/rbac/builtin-policy.csv
#
# The policy definition format is:
# p, <user/group>, <resource>, <action>, <project>/<object>
# For example, the following rule gives all members of 'my-org:team1' the ability to sync
# applications in the project named: my-project
# p, my-org:team1, applications, sync, my-project/*
#
# The role definition format is:
# g, <user/group>, <group>
# For example, the following rule makes all members of 'my-org:team2' have the role:admin role:
# g, my-org:team2, role:admin
policy.csv: ""
# policy.default holds the default policy which will ArgoCD will fall back to, when authorizing
# a user for API requests.
policy.default: role:readonly

View File

@@ -2,4 +2,3 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: application-controller
namespace: argocd

View File

@@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: application-controller-role
namespace: argocd
rules:
- apiGroups:
- ""
@@ -11,6 +10,9 @@ rules:
verbs:
- get
- watch
- list
- patch
- update
- apiGroups:
- argoproj.io
resources:

View File

@@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-controller-role-binding
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role

View File

@@ -2,7 +2,6 @@ apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: application-controller
namespace: argocd
spec:
selector:
matchLabels:

View File

@@ -2,4 +2,3 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: argocd-server
namespace: argocd

View File

@@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argocd-server-role
namespace: argocd
rules:
- apiGroups:
- ""

View File

@@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argocd-server-role-binding
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role

View File

@@ -2,7 +2,6 @@ apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: argocd-server
namespace: argocd
spec:
selector:
matchLabels:
@@ -14,16 +13,28 @@ spec:
spec:
serviceAccountName: argocd-server
initContainers:
- command: [cp, -r, /app, /shared]
- name: copyutil
image: argoproj/argocd-server:latest
command: [cp, /argocd-util, /shared]
volumeMounts:
- mountPath: /shared
name: static-files
- name: ui
image: argoproj/argocd-ui:latest
name: argocd-server-ui
command: [cp, -r, /app, /shared]
volumeMounts:
- mountPath: /shared
name: static-files
containers:
- command: [/argocd-server, --staticassets, /shared/app, --repo-server, 'argocd-repo-server:8081']
- name: argocd-server
image: argoproj/argocd-server:latest
name: argocd-server
command: [/argocd-server, --staticassets, /shared/app, --repo-server, 'argocd-repo-server:8081']
volumeMounts:
- mountPath: /shared
name: static-files
- name: dex
image: quay.io/coreos/dex:v2.10.0
command: [/shared/argocd-util, rundex]
volumeMounts:
- mountPath: /shared
name: static-files

View File

@@ -2,7 +2,6 @@ apiVersion: v1
kind: Service
metadata:
name: argocd-server
namespace: argocd
spec:
ports:
- name: http

View File

@@ -2,7 +2,6 @@ apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: argocd-repo-server
namespace: argocd
spec:
selector:
matchLabels:
@@ -13,8 +12,12 @@ spec:
app: argocd-repo-server
spec:
containers:
- command: [/argocd-repo-server]
- name: argocd-repo-server
image: argoproj/argocd-repo-server:latest
name: argocd-repo-server
command: [/argocd-repo-server]
ports:
- containerPort: 8081
- name: redis
image: redis:3.2.11
ports:
- containerPort: 6379

View File

@@ -2,7 +2,6 @@ apiVersion: v1
kind: Service
metadata:
name: argocd-repo-server
namespace: argocd
spec:
ports:
- port: 8081

View File

@@ -15,6 +15,7 @@ import (
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/server/session"
"github.com/argoproj/argo-cd/server/settings"
"github.com/argoproj/argo-cd/server/version"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/localconfig"
@@ -24,14 +25,16 @@ import (
)
const (
MetaDataTokenKey = "token"
// EnvArgoCDServer is the environment variable to look for an ArgoCD server address
EnvArgoCDServer = "ARGOCD_SERVER"
// EnvArgoCDAuthToken is the environment variable to look for an ArgoCD auth token
EnvArgoCDAuthToken = "ARGOCD_AUTH_TOKEN"
)
// ServerClient defines an interface for interaction with an Argo CD server.
type ServerClient interface {
// Client defines an interface for interaction with an Argo CD server.
type Client interface {
ClientOptions() ClientOptions
NewConn() (*grpc.ClientConn, error)
NewRepoClient() (*grpc.ClientConn, repository.RepositoryServiceClient, error)
NewRepoClientOrDie() (*grpc.ClientConn, repository.RepositoryServiceClient)
@@ -41,6 +44,8 @@ type ServerClient interface {
NewApplicationClientOrDie() (*grpc.ClientConn, application.ApplicationServiceClient)
NewSessionClient() (*grpc.ClientConn, session.SessionServiceClient, error)
NewSessionClientOrDie() (*grpc.ClientConn, session.SessionServiceClient)
NewSettingsClient() (*grpc.ClientConn, settings.SettingsServiceClient, error)
NewSettingsClientOrDie() (*grpc.ClientConn, settings.SettingsServiceClient)
NewVersionClient() (*grpc.ClientConn, version.VersionServiceClient, error)
NewVersionClientOrDie() (*grpc.ClientConn, version.VersionServiceClient)
}
@@ -65,7 +70,7 @@ type client struct {
}
// NewClient creates a new API client from a set of config options.
func NewClient(opts *ClientOptions) (ServerClient, error) {
func NewClient(opts *ClientOptions) (Client, error) {
var c client
localCfg, err := localconfig.ReadLocalConfig(opts.ConfigPath)
if err != nil {
@@ -130,7 +135,7 @@ func NewClient(opts *ClientOptions) (ServerClient, error) {
}
// NewClientOrDie creates a new API client from a set of config options, or fails fatally if the new client creation fails.
func NewClientOrDie(opts *ClientOptions) ServerClient {
func NewClientOrDie(opts *ClientOptions) Client {
client, err := NewClient(opts)
if err != nil {
log.Fatal(err)
@@ -138,7 +143,8 @@ func NewClientOrDie(opts *ClientOptions) ServerClient {
return client
}
// JwtCredentials holds a token for authentication.
// JwtCredentials implements the gRPC credentials.Credentials interface which we is used to do
// grpc.WithPerRPCCredentials(), for authentication
type jwtCredentials struct {
Token string
}
@@ -149,7 +155,8 @@ func (c jwtCredentials) RequireTransportSecurity() bool {
func (c jwtCredentials) GetRequestMetadata(context.Context, ...string) (map[string]string, error) {
return map[string]string{
"tokens": c.Token,
MetaDataTokenKey: c.Token,
"tokens": c.Token, // legacy key. delete eventually
}, nil
}
@@ -175,6 +182,15 @@ func (c *client) NewConn() (*grpc.ClientConn, error) {
return grpc_util.BlockingDial(context.Background(), "tcp", c.ServerAddr, creds, grpc.WithPerRPCCredentials(endpointCredentials))
}
func (c *client) ClientOptions() ClientOptions {
return ClientOptions{
ServerAddr: c.ServerAddr,
PlainText: c.PlainText,
Insecure: c.Insecure,
AuthToken: c.AuthToken,
}
}
func (c *client) NewRepoClient() (*grpc.ClientConn, repository.RepositoryServiceClient, error) {
conn, err := c.NewConn()
if err != nil {
@@ -243,6 +259,23 @@ func (c *client) NewSessionClientOrDie() (*grpc.ClientConn, session.SessionServi
return conn, sessionIf
}
func (c *client) NewSettingsClient() (*grpc.ClientConn, settings.SettingsServiceClient, error) {
conn, err := c.NewConn()
if err != nil {
return nil, nil, err
}
setIf := settings.NewSettingsServiceClient(conn)
return conn, setIf, nil
}
func (c *client) NewSettingsClientOrDie() (*grpc.ClientConn, settings.SettingsServiceClient) {
conn, setIf, err := c.NewSettingsClient()
if err != nil {
log.Fatalf("Failed to establish connection to %s: %v", c.ServerAddr, err)
}
return conn, setIf
}
func (c *client) NewVersionClient() (*grpc.ClientConn, version.VersionServiceClient, error) {
conn, err := c.NewConn()
if err != nil {

File diff suppressed because it is too large Load Diff

View File

@@ -23,6 +23,17 @@ message Application {
optional ApplicationSpec spec = 2;
optional ApplicationStatus status = 3;
optional Operation operation = 4;
}
// ApplicationCondition contains details about current application condition
message ApplicationCondition {
// Type is an application condition type
optional string type = 1;
// Message contains human-readable message indicating details about condition
optional string message = 2;
}
// ApplicationDestination contains deployment destination information
@@ -67,20 +78,22 @@ message ApplicationSpec {
optional ApplicationSource source = 1;
// Destination overrides the kubernetes server and namespace defined in the environment ksonnet app.yaml
// This field is optional. If omitted, uses the server and namespace defined in the environment
optional ApplicationDestination destination = 2;
// SyncPolicy dictates whether we auto-sync based on the delta between the tracked branch and live state
optional string syncPolicy = 3;
}
// ApplicationStatus contains information about application status in target environment.
message ApplicationStatus {
optional ComparisonResult comparisonResult = 1;
repeated DeploymentInfo recentDeployment = 2;
repeated DeploymentInfo history = 2;
repeated ComponentParameter parameters = 3;
optional HealthStatus health = 4;
optional OperationState operationState = 5;
repeated ApplicationCondition conditions = 6;
}
// ApplicationWatchEvent contains information about application change.
@@ -105,6 +118,9 @@ message Cluster {
// Config holds cluster information for connecting to a cluster
optional ClusterConfig config = 3;
// ConnectionState contains information about cluster connection state
optional ConnectionState connectionState = 4;
}
// ClusterConfig is the configuration attributes. This structure is subset of the go-client
@@ -137,10 +153,6 @@ message ComparisonResult {
optional ApplicationSource comparedTo = 2;
optional string server = 3;
optional string namespace = 4;
optional string status = 5;
repeated ResourceState resources = 6;
@@ -157,6 +169,15 @@ message ComponentParameter {
optional string value = 3;
}
// ConnectionState contains information about remote resource connection state
message ConnectionState {
optional string status = 1;
optional string message = 2;
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time attemptedAt = 3;
}
// DeploymentInfo contains information relevant to an application deployment
message DeploymentInfo {
repeated ComponentParameter params = 1;
@@ -170,6 +191,43 @@ message DeploymentInfo {
optional int64 id = 5;
}
message HealthStatus {
optional string status = 1;
optional string statusDetails = 2;
}
// Operation contains requested operation parameters.
message Operation {
optional SyncOperation sync = 1;
optional RollbackOperation rollback = 2;
}
// OperationState contains information about state of currently performing operation on application.
message OperationState {
// Operation is the original requested operation
optional Operation operation = 1;
// Phase is the current phase of the operation
optional string phase = 2;
// Message hold any pertinent messages when attempting to perform operation (typically errors).
optional string message = 3;
// SyncResult is the result of a Sync operation
optional SyncOperationResult syncResult = 4;
// RollbackResult is the result of a Rollback operation
optional SyncOperationResult rollbackResult = 5;
// StartedAt contains time of operation start
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 6;
// FinishedAt contains time of operation completion
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 7;
}
// Repository is a Git repository holding application configurations
message Repository {
optional string repo = 1;
@@ -179,6 +237,8 @@ message Repository {
optional string password = 3;
optional string sshPrivateKey = 4;
optional ConnectionState connectionState = 5;
}
// RepositoryList is a collection of Repositories.
@@ -188,6 +248,18 @@ message RepositoryList {
repeated Repository items = 2;
}
message ResourceDetails {
optional string name = 1;
optional string kind = 2;
optional string namespace = 3;
optional string message = 4;
optional string status = 5;
}
// ResourceNode contains information about live resource and its children
message ResourceNode {
optional string state = 1;
@@ -204,6 +276,30 @@ message ResourceState {
optional string status = 3;
repeated ResourceNode childLiveResources = 4;
optional HealthStatus health = 5;
}
message RollbackOperation {
optional int64 id = 1;
optional bool prune = 2;
optional bool dryRun = 3;
}
// SyncOperation contains sync operation details.
message SyncOperation {
optional string revision = 1;
optional bool prune = 2;
optional bool dryRun = 3;
}
// SyncOperationResult represent result of sync operation
message SyncOperationResult {
repeated ResourceDetails resources = 1;
}
// TLSClientConfig contains settings to enable transport layer security

View File

@@ -11,7 +11,9 @@ type objectMeta struct {
}
func (a *Application) GetMetadata() *objectMeta {
return &objectMeta{
Name: &a.Name,
var om objectMeta
if a != nil {
om.Name = &a.Name
}
return &om
}

View File

@@ -2,15 +2,95 @@ package v1alpha1
import (
"encoding/json"
"time"
"github.com/argoproj/argo-cd/common"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/rest"
)
// SyncOperation contains sync operation details.
type SyncOperation struct {
Revision string `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
Prune bool `json:"prune,omitempty" protobuf:"bytes,2,opt,name=prune"`
DryRun bool `json:"dryRun,omitempty" protobuf:"bytes,3,opt,name=dryRun"`
}
type RollbackOperation struct {
ID int64 `json:"id" protobuf:"bytes,1,opt,name=id"`
Prune bool `json:"prune,omitempty" protobuf:"bytes,2,opt,name=prune"`
DryRun bool `json:"dryRun,omitempty" protobuf:"bytes,3,opt,name=dryRun"`
}
// Operation contains requested operation parameters.
type Operation struct {
Sync *SyncOperation `json:"sync,omitempty" protobuf:"bytes,1,opt,name=sync"`
Rollback *RollbackOperation `json:"rollback,omitempty" protobuf:"bytes,2,opt,name=rollback"`
}
type OperationPhase string
const (
OperationRunning OperationPhase = "Running"
OperationFailed OperationPhase = "Failed"
OperationError OperationPhase = "Error"
OperationSucceeded OperationPhase = "Succeeded"
)
func (os OperationPhase) Completed() bool {
switch os {
case OperationFailed, OperationError, OperationSucceeded:
return true
}
return false
}
func (os OperationPhase) Successful() bool {
return os == OperationSucceeded
}
// OperationState contains information about state of currently performing operation on application.
type OperationState struct {
// Operation is the original requested operation
Operation Operation `json:"operation" protobuf:"bytes,1,opt,name=operation"`
// Phase is the current phase of the operation
Phase OperationPhase `json:"phase" protobuf:"bytes,2,opt,name=phase"`
// Message hold any pertinent messages when attempting to perform operation (typically errors).
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// SyncResult is the result of a Sync operation
SyncResult *SyncOperationResult `json:"syncResult,omitempty" protobuf:"bytes,4,opt,name=syncResult"`
// RollbackResult is the result of a Rollback operation
RollbackResult *SyncOperationResult `json:"rollbackResult,omitempty" protobuf:"bytes,5,opt,name=rollbackResult"`
// StartedAt contains time of operation start
StartedAt metav1.Time `json:"startedAt" protobuf:"bytes,6,opt,name=startedAt"`
// FinishedAt contains time of operation completion
FinishedAt *metav1.Time `json:"finishedAt" protobuf:"bytes,7,opt,name=finishedAt"`
}
// SyncOperationResult represent result of sync operation
type SyncOperationResult struct {
Resources []*ResourceDetails `json:"resources" protobuf:"bytes,1,opt,name=resources"`
}
type ResourceSyncStatus string
const (
ResourceDetailsSynced ResourceSyncStatus = "Synced"
ResourceDetailsSyncFailed ResourceSyncStatus = "SyncFailed"
ResourceDetailsSyncedAndPruned ResourceSyncStatus = "SyncedAndPruned"
ResourceDetailsPruningRequired ResourceSyncStatus = "PruningRequired"
)
type ResourceDetails struct {
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
Namespace string `json:"namespace" protobuf:"bytes,3,opt,name=namespace"`
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
Status ResourceSyncStatus `json:"status,omitempty" protobuf:"bytes,5,opt,name=status"`
}
// DeploymentInfo contains information relevant to an application deployment
type DeploymentInfo struct {
Params []ComponentParameter `json:"params" protobuf:"bytes,1,name=params"`
@@ -29,6 +109,7 @@ type Application struct {
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
Spec ApplicationSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
Status ApplicationStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
Operation *Operation `json:"operation,omitempty" protobuf:"bytes,4,opt,name=operation"`
}
// ApplicationWatchEvent contains information about application change.
@@ -56,10 +137,7 @@ type ApplicationSpec struct {
// Source is a reference to the location ksonnet application definition
Source ApplicationSource `json:"source" protobuf:"bytes,1,opt,name=source"`
// Destination overrides the kubernetes server and namespace defined in the environment ksonnet app.yaml
// This field is optional. If omitted, uses the server and namespace defined in the environment
Destination *ApplicationDestination `json:"destination,omitempty" protobuf:"bytes,2,opt,name=destination"`
// SyncPolicy dictates whether we auto-sync based on the delta between the tracked branch and live state
SyncPolicy string `json:"syncPolicy,omitempty" protobuf:"bytes,3,opt,name=syncPolicy"`
Destination ApplicationDestination `json:"destination" protobuf:"bytes,2,name=destination"`
}
// ComponentParameter contains information about component parameter value
@@ -105,22 +183,52 @@ const (
// ApplicationStatus contains information about application status in target environment.
type ApplicationStatus struct {
ComparisonResult ComparisonResult `json:"comparisonResult" protobuf:"bytes,1,opt,name=comparisonResult"`
RecentDeployments []DeploymentInfo `json:"recentDeployments" protobuf:"bytes,2,opt,name=recentDeployment"`
Parameters []ComponentParameter `json:"parameters,omitempty" protobuf:"bytes,3,opt,name=parameters"`
ComparisonResult ComparisonResult `json:"comparisonResult" protobuf:"bytes,1,opt,name=comparisonResult"`
History []DeploymentInfo `json:"history" protobuf:"bytes,2,opt,name=history"`
Parameters []ComponentParameter `json:"parameters,omitempty" protobuf:"bytes,3,opt,name=parameters"`
Health HealthStatus `json:"health,omitempty" protobuf:"bytes,4,opt,name=health"`
OperationState *OperationState `json:"operationState,omitempty" protobuf:"bytes,5,opt,name=operationState"`
Conditions []ApplicationCondition `json:"conditions,omitempty" protobuf:"bytes,6,opt,name=conditions"`
}
type ApplicationConditionType = string
const (
// ApplicationConditionDeletionError indicates that controller failed to delete application
ApplicationConditionDeletionError = "DeletionError"
)
// ApplicationCondition contains details about current application condition
type ApplicationCondition struct {
// Type is an application condition type
Type ApplicationConditionType `json:"type" protobuf:"bytes,1,opt,name=type"`
// Message contains human-readable message indicating details about condition
Message string `json:"message" protobuf:"bytes,2,opt,name=message"`
}
// ComparisonResult is a comparison result of application spec and deployed application.
type ComparisonResult struct {
ComparedAt metav1.Time `json:"comparedAt" protobuf:"bytes,1,opt,name=comparedAt"`
ComparedTo ApplicationSource `json:"comparedTo" protobuf:"bytes,2,opt,name=comparedTo"`
Server string `json:"server" protobuf:"bytes,3,opt,name=server"`
Namespace string `json:"namespace" protobuf:"bytes,4,opt,name=namespace"`
Status ComparisonStatus `json:"status" protobuf:"bytes,5,opt,name=status,casttype=ComparisonStatus"`
Resources []ResourceState `json:"resources" protobuf:"bytes,6,opt,name=resources"`
Error string `json:"error,omitempty" protobuf:"bytes,7,opt,name=error"`
Error string `json:"error" protobuf:"bytes,7,opt,name=error"`
}
type HealthStatus struct {
Status HealthStatusCode `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"`
StatusDetails string `json:"statusDetails,omitempty" protobuf:"bytes,2,opt,name=statusDetails"`
}
type HealthStatusCode = string
const (
HealthStatusUnknown = ""
HealthStatusProgressing = "Progressing"
HealthStatusHealthy = "Healthy"
HealthStatusDegraded = "Degraded"
)
// ResourceNode contains information about live resource and its children
type ResourceNode struct {
State string `json:"state,omitempty" protobuf:"bytes,1,opt,name=state"`
@@ -133,6 +241,23 @@ type ResourceState struct {
LiveState string `json:"liveState,omitempty" protobuf:"bytes,2,opt,name=liveState"`
Status ComparisonStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
ChildLiveResources []ResourceNode `json:"childLiveResources,omitempty" protobuf:"bytes,4,opt,name=childLiveResources"`
Health HealthStatus `json:"health,omitempty" protobuf:"bytes,5,opt,name=health"`
}
// ConnectionStatus represents connection status
type ConnectionStatus = string
const (
ConnectionStatusUnknown = "Unknown"
ConnectionStatusSuccessful = "Successful"
ConnectionStatusFailed = "Failed"
)
// ConnectionState contains information about remote resource connection state
type ConnectionState struct {
Status ConnectionStatus `json:"status" protobuf:"bytes,1,opt,name=status"`
Message string `json:"message" protobuf:"bytes,2,opt,name=message"`
ModifiedAt *metav1.Time `json:"attemptedAt" protobuf:"bytes,3,opt,name=attemptedAt"`
}
// Cluster is the definition of a cluster resource
@@ -145,6 +270,9 @@ type Cluster struct {
// Config holds cluster information for connecting to a cluster
Config ClusterConfig `json:"config" protobuf:"bytes,3,opt,name=config"`
// ConnectionState contains information about cluster connection state
ConnectionState ConnectionState `json:"connectionState,omitempty" protobuf:"bytes,4,opt,name=connectionState"`
}
// ClusterList is a collection of Clusters.
@@ -191,10 +319,11 @@ type TLSClientConfig struct {
// Repository is a Git repository holding application configurations
type Repository struct {
Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"`
Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"`
Password string `json:"password,omitempty" protobuf:"bytes,3,opt,name=password"`
SSHPrivateKey string `json:"sshPrivateKey,omitempty" protobuf:"bytes,4,opt,name=sshPrivateKey"`
Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"`
Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"`
Password string `json:"password,omitempty" protobuf:"bytes,3,opt,name=password"`
SSHPrivateKey string `json:"sshPrivateKey,omitempty" protobuf:"bytes,4,opt,name=sshPrivateKey"`
ConnectionState ConnectionState `json:"connectionState,omitempty" protobuf:"bytes,5,opt,name=connectionState"`
}
// RepositoryList is a collection of Repositories.
@@ -203,6 +332,33 @@ type RepositoryList struct {
Items []Repository `json:"items" protobuf:"bytes,2,rep,name=items"`
}
func (app *Application) getFinalizerIndex(name string) int {
for i, finalizer := range app.Finalizers {
if finalizer == name {
return i
}
}
return -1
}
// CascadedDeletion indicates if resources finalizer is set and controller should delete app resources before deleting app
func (app *Application) CascadedDeletion() bool {
return app.getFinalizerIndex(common.ResourcesFinalizerName) > -1
}
// SetCascadedDeletion sets or remove resources finalizer
func (app *Application) SetCascadedDeletion(prune bool) {
index := app.getFinalizerIndex(common.ResourcesFinalizerName)
if prune != (index > -1) {
if index > -1 {
app.Finalizers[index] = app.Finalizers[len(app.Finalizers)-1]
app.Finalizers = app.Finalizers[:len(app.Finalizers)-1]
} else {
app.Finalizers = append(app.Finalizers, common.ResourcesFinalizerName)
}
}
}
// NeedRefreshAppStatus answers if application status needs to be refreshed. Returns true if application never been compared, has changed or comparison result has expired.
func (app *Application) NeedRefreshAppStatus(statusRefreshTimeout time.Duration) bool {
return app.Status.ComparisonResult.Status == ComparisonStatusUnknown ||
@@ -239,7 +395,7 @@ func (c *Cluster) RESTConfig() *rest.Config {
func (cr *ComparisonResult) TargetObjects() ([]*unstructured.Unstructured, error) {
objs := make([]*unstructured.Unstructured, len(cr.Resources))
for i, resState := range cr.Resources {
obj, err := UnmarshalToUnstructured(resState.TargetState)
obj, err := resState.TargetObject()
if err != nil {
return nil, err
}
@@ -252,7 +408,7 @@ func (cr *ComparisonResult) TargetObjects() ([]*unstructured.Unstructured, error
func (cr *ComparisonResult) LiveObjects() ([]*unstructured.Unstructured, error) {
objs := make([]*unstructured.Unstructured, len(cr.Resources))
for i, resState := range cr.Resources {
obj, err := UnmarshalToUnstructured(resState.LiveState)
obj, err := resState.LiveObject()
if err != nil {
return nil, err
}
@@ -272,3 +428,11 @@ func UnmarshalToUnstructured(resource string) (*unstructured.Unstructured, error
}
return &obj, nil
}
func (r ResourceState) LiveObject() (*unstructured.Unstructured, error) {
return UnmarshalToUnstructured(r.LiveState)
}
func (r ResourceState) TargetObject() (*unstructured.Unstructured, error) {
return UnmarshalToUnstructured(r.TargetState)
}

View File

@@ -5,6 +5,7 @@
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -15,6 +16,15 @@ func (in *Application) DeepCopyInto(out *Application) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
if in.Operation != nil {
in, out := &in.Operation, &out.Operation
if *in == nil {
*out = nil
} else {
*out = new(Operation)
(*in).DeepCopyInto(*out)
}
}
return
}
@@ -36,6 +46,22 @@ func (in *Application) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationCondition) DeepCopyInto(out *ApplicationCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationCondition.
func (in *ApplicationCondition) DeepCopy() *ApplicationCondition {
if in == nil {
return nil
}
out := new(ApplicationCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDestination) DeepCopyInto(out *ApplicationDestination) {
*out = *in
@@ -110,15 +136,7 @@ func (in *ApplicationSource) DeepCopy() *ApplicationSource {
func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
if in.Destination != nil {
in, out := &in.Destination, &out.Destination
if *in == nil {
*out = nil
} else {
*out = new(ApplicationDestination)
**out = **in
}
}
out.Destination = in.Destination
return
}
@@ -136,8 +154,8 @@ func (in *ApplicationSpec) DeepCopy() *ApplicationSpec {
func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) {
*out = *in
in.ComparisonResult.DeepCopyInto(&out.ComparisonResult)
if in.RecentDeployments != nil {
in, out := &in.RecentDeployments, &out.RecentDeployments
if in.History != nil {
in, out := &in.History, &out.History
*out = make([]DeploymentInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
@@ -148,6 +166,21 @@ func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) {
*out = make([]ComponentParameter, len(*in))
copy(*out, *in)
}
out.Health = in.Health
if in.OperationState != nil {
in, out := &in.OperationState, &out.OperationState
if *in == nil {
*out = nil
} else {
*out = new(OperationState)
(*in).DeepCopyInto(*out)
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ApplicationCondition, len(*in))
copy(*out, *in)
}
return
}
@@ -182,6 +215,7 @@ func (in *ApplicationWatchEvent) DeepCopy() *ApplicationWatchEvent {
func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = *in
in.Config.DeepCopyInto(&out.Config)
in.ConnectionState.DeepCopyInto(&out.ConnectionState)
return
}
@@ -277,6 +311,31 @@ func (in *ComponentParameter) DeepCopy() *ComponentParameter {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConnectionState) DeepCopyInto(out *ConnectionState) {
*out = *in
if in.ModifiedAt != nil {
in, out := &in.ModifiedAt, &out.ModifiedAt
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionState.
func (in *ConnectionState) DeepCopy() *ConnectionState {
if in == nil {
return nil
}
out := new(ConnectionState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentInfo) DeepCopyInto(out *DeploymentInfo) {
*out = *in
@@ -304,9 +363,105 @@ func (in *DeploymentInfo) DeepCopy() *DeploymentInfo {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HealthStatus) DeepCopyInto(out *HealthStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthStatus.
func (in *HealthStatus) DeepCopy() *HealthStatus {
if in == nil {
return nil
}
out := new(HealthStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Operation) DeepCopyInto(out *Operation) {
*out = *in
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
if *in == nil {
*out = nil
} else {
*out = new(SyncOperation)
**out = **in
}
}
if in.Rollback != nil {
in, out := &in.Rollback, &out.Rollback
if *in == nil {
*out = nil
} else {
*out = new(RollbackOperation)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Operation.
func (in *Operation) DeepCopy() *Operation {
if in == nil {
return nil
}
out := new(Operation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperationState) DeepCopyInto(out *OperationState) {
*out = *in
in.Operation.DeepCopyInto(&out.Operation)
if in.SyncResult != nil {
in, out := &in.SyncResult, &out.SyncResult
if *in == nil {
*out = nil
} else {
*out = new(SyncOperationResult)
(*in).DeepCopyInto(*out)
}
}
if in.RollbackResult != nil {
in, out := &in.RollbackResult, &out.RollbackResult
if *in == nil {
*out = nil
} else {
*out = new(SyncOperationResult)
(*in).DeepCopyInto(*out)
}
}
in.StartedAt.DeepCopyInto(&out.StartedAt)
if in.FinishedAt != nil {
in, out := &in.FinishedAt, &out.FinishedAt
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationState.
func (in *OperationState) DeepCopy() *OperationState {
if in == nil {
return nil
}
out := new(OperationState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Repository) DeepCopyInto(out *Repository) {
*out = *in
in.ConnectionState.DeepCopyInto(&out.ConnectionState)
return
}
@@ -327,7 +482,9 @@ func (in *RepositoryList) DeepCopyInto(out *RepositoryList) {
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Repository, len(*in))
copy(*out, *in)
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@@ -342,6 +499,22 @@ func (in *RepositoryList) DeepCopy() *RepositoryList {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceDetails) DeepCopyInto(out *ResourceDetails) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDetails.
func (in *ResourceDetails) DeepCopy() *ResourceDetails {
if in == nil {
return nil
}
out := new(ResourceDetails)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceNode) DeepCopyInto(out *ResourceNode) {
*out = *in
@@ -375,6 +548,7 @@ func (in *ResourceState) DeepCopyInto(out *ResourceState) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out.Health = in.Health
return
}
@@ -388,6 +562,66 @@ func (in *ResourceState) DeepCopy() *ResourceState {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollbackOperation) DeepCopyInto(out *RollbackOperation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackOperation.
func (in *RollbackOperation) DeepCopy() *RollbackOperation {
if in == nil {
return nil
}
out := new(RollbackOperation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncOperation) DeepCopyInto(out *SyncOperation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncOperation.
func (in *SyncOperation) DeepCopy() *SyncOperation {
if in == nil {
return nil
}
out := new(SyncOperation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncOperationResult) DeepCopyInto(out *SyncOperationResult) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]*ResourceDetails, len(*in))
for i := range *in {
if (*in)[i] == nil {
(*out)[i] = nil
} else {
(*out)[i] = new(ResourceDetails)
(*in)[i].DeepCopyInto((*out)[i])
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncOperationResult.
func (in *SyncOperationResult) DeepCopy() *SyncOperationResult {
if in == nil {
return nil
}
out := new(SyncOperationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) {
*out = *in

View File

@@ -1,85 +1,146 @@
package repository
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/git"
ksutil "github.com/argoproj/argo-cd/util/ksonnet"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes"
"github.com/argoproj/argo-cd/util/kube"
)
const (
// DefaultRepoCacheExpiration is the duration for items to live in the repo cache
DefaultRepoCacheExpiration = 24 * time.Hour
)
// Service implements ManifestService interface
type Service struct {
ns string
kubeClient kubernetes.Interface
gitClient git.Client
repoLock *util.KeyLock
gitFactory git.ClientFactory
cache cache.Cache
}
// NewService returns a new instance of the Manifest service
func NewService(namespace string, kubeClient kubernetes.Interface, gitClient git.Client) *Service {
func NewService(gitFactory git.ClientFactory, cache cache.Cache) *Service {
return &Service{
ns: namespace,
kubeClient: kubeClient,
gitClient: gitClient,
repoLock: util.NewKeyLock(),
gitFactory: gitFactory,
cache: cache,
}
}
func (s *Service) GetKsonnetApp(ctx context.Context, in *KsonnetAppRequest) (*KsonnetAppResponse, error) {
appRepoPath := tempRepoPath(in.Repo.Repo)
// ListDir lists the contents of a GitHub repo
func (s *Service) ListDir(ctx context.Context, q *ListDirRequest) (*FileList, error) {
appRepoPath := tempRepoPath(q.Repo.Repo)
s.repoLock.Lock(appRepoPath)
defer s.unlockAndResetRepoPath(appRepoPath)
ksApp, err := s.getAppSpec(*in.Repo, appRepoPath, in.Revision, in.Path)
defer s.repoLock.Unlock(appRepoPath)
gitClient := s.gitFactory.NewClient(q.Repo.Repo, appRepoPath, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey)
err := gitClient.Init()
if err != nil {
return nil, err
}
return ksAppToResponse(ksApp)
commitSHA, err := gitClient.LsRemote(q.Revision)
if err != nil {
return nil, err
}
cacheKey := listDirCacheKey(commitSHA, q)
var res FileList
err = s.cache.Get(cacheKey, &res)
if err == nil {
log.Infof("manifest cache hit: %s", cacheKey)
return &res, nil
}
err = checkoutRevision(gitClient, q.Revision)
if err != nil {
return nil, err
}
lsFiles, err := gitClient.LsFiles(q.Path)
if err != nil {
return nil, err
}
res = FileList{
Items: lsFiles,
}
err = s.cache.Set(&cache.Item{
Key: cacheKey,
Object: &res,
Expiration: DefaultRepoCacheExpiration,
})
if err != nil {
log.Warnf("manifest cache set error %s: %v", cacheKey, err)
}
return &res, nil
}
// ksAppToResponse converts a Ksonnet app instance to a API response object
func ksAppToResponse(ksApp ksutil.KsonnetApp) (*KsonnetAppResponse, error) {
var appRes KsonnetAppResponse
appRes.Environments = make(map[string]*KsonnetEnvironment)
for envName, env := range ksApp.Spec().Environments {
if env.Destination == nil {
return nil, fmt.Errorf("Environment '%s' has no destination defined", envName)
}
envRes := KsonnetEnvironment{
Name: envName,
K8SVersion: env.KubernetesVersion,
Path: env.Path,
Destination: &KsonnetEnvironmentDestination{
Server: env.Destination.Server,
Namespace: env.Destination.Namespace,
},
}
appRes.Environments[envName] = &envRes
func (s *Service) GetFile(ctx context.Context, q *GetFileRequest) (*GetFileResponse, error) {
appRepoPath := tempRepoPath(q.Repo.Repo)
s.repoLock.Lock(appRepoPath)
defer s.repoLock.Unlock(appRepoPath)
gitClient := s.gitFactory.NewClient(q.Repo.Repo, appRepoPath, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey)
err := gitClient.Init()
if err != nil {
return nil, err
}
return &appRes, nil
err = checkoutRevision(gitClient, q.Revision)
if err != nil {
return nil, err
}
data, err := ioutil.ReadFile(path.Join(gitClient.Root(), q.Path))
if err != nil {
return nil, err
}
res := GetFileResponse{
Data: data,
}
return &res, nil
}
func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*ManifestResponse, error) {
appRepoPath := tempRepoPath(q.Repo.Repo)
s.repoLock.Lock(appRepoPath)
defer s.unlockAndResetRepoPath(appRepoPath)
defer s.repoLock.Unlock(appRepoPath)
err := s.gitClient.CloneOrFetch(q.Repo.Repo, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey, appRepoPath)
gitClient := s.gitFactory.NewClient(q.Repo.Repo, appRepoPath, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey)
err := gitClient.Init()
if err != nil {
return nil, err
}
commitSHA, err := gitClient.LsRemote(q.Revision)
if err != nil {
return nil, err
}
cacheKey := manifestCacheKey(commitSHA, q)
var res ManifestResponse
err = s.cache.Get(cacheKey, &res)
if err == nil {
log.Infof("manifest cache hit: %s", cacheKey)
return &res, nil
}
if err != cache.ErrCacheMiss {
log.Warnf("manifest cache error %s: %v", cacheKey, err)
} else {
log.Infof("manifest cache miss: %s", cacheKey)
}
revision, err := s.gitClient.Checkout(appRepoPath, q.Revision)
err = checkoutRevision(gitClient, q.Revision)
if err != nil {
return nil, err
}
@@ -89,6 +150,11 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
return nil, fmt.Errorf("unable to load application from %s: %v", appPath, err)
}
params, err := ksApp.ListEnvParams(q.Environment)
if err != nil {
return nil, err
}
if q.ComponentParameterOverrides != nil {
for _, override := range q.ComponentParameterOverrides {
err = ksApp.SetComponentParams(q.Environment, override.Component, override.Name, override.Value)
@@ -111,7 +177,7 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
manifests := make([]string, len(targetObjs))
for i, target := range targetObjs {
if q.AppLabel != "" {
err = s.setAppLabels(target, q.AppLabel)
err = kube.SetLabel(target, common.LabelApplicationName, q.AppLabel)
if err != nil {
return nil, err
}
@@ -122,82 +188,22 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
}
manifests[i] = string(manifestStr)
}
return &ManifestResponse{
Revision: revision,
res = ManifestResponse{
Revision: commitSHA,
Manifests: manifests,
Namespace: env.Destination.Namespace,
Server: env.Destination.Server,
}, nil
}
func (s *Service) getAppSpec(repo v1alpha1.Repository, appRepoPath, revision, subPath string) (ksutil.KsonnetApp, error) {
err := s.gitClient.CloneOrFetch(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey, appRepoPath)
Params: params,
}
err = s.cache.Set(&cache.Item{
Key: cacheKey,
Object: &res,
Expiration: DefaultRepoCacheExpiration,
})
if err != nil {
return nil, err
log.Warnf("manifest cache set error %s: %v", cacheKey, err)
}
_, err = s.gitClient.Checkout(appRepoPath, revision)
if err != nil {
return nil, err
}
appPath := path.Join(appRepoPath, subPath)
ksApp, err := ksutil.NewKsonnetApp(appPath)
if err != nil {
return nil, err
}
return ksApp, nil
}
func (s *Service) setAppLabels(target *unstructured.Unstructured, appName string) error {
labels := target.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[common.LabelApplicationName] = appName
target.SetLabels(labels)
// special case for deployment: make sure that derived replicaset and pod has application label
if target.GetKind() == "Deployment" {
labels, ok := unstructured.NestedMap(target.UnstructuredContent(), "spec", "template", "metadata", "labels")
if ok {
if labels == nil {
labels = make(map[string]interface{})
}
labels[common.LabelApplicationName] = appName
}
unstructured.SetNestedMap(target.UnstructuredContent(), labels, "spec", "template", "metadata", "labels")
}
return nil
}
// GetEnvParams retrieves Ksonnet environment params in specified repo name and revision
func (s *Service) GetEnvParams(c context.Context, q *EnvParamsRequest) (*EnvParamsResponse, error) {
appRepoPath := tempRepoPath(q.Repo.Repo)
s.repoLock.Lock(appRepoPath)
defer s.repoLock.Unlock(appRepoPath)
err := s.gitClient.CloneOrFetch(q.Repo.Repo, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey, appRepoPath)
if err != nil {
return nil, err
}
_, err = s.gitClient.Checkout(appRepoPath, q.Revision)
if err != nil {
return nil, err
}
appPath := path.Join(appRepoPath, q.Path)
ksApp, err := ksutil.NewKsonnetApp(appPath)
if err != nil {
return nil, err
}
target, err := ksApp.ListEnvParams(q.Environment)
if err != nil {
return nil, err
}
return &EnvParamsResponse{
Params: target,
}, nil
return &res, nil
}
// tempRepoPath returns a formulated temporary directory location to clone a repository
@@ -205,12 +211,28 @@ func tempRepoPath(repo string) string {
return path.Join(os.TempDir(), strings.Replace(repo, "/", "_", -1))
}
// unlockAndResetRepoPath will reset any local changes in a local git repo and unlock the path
// so that other workers can use the local repo
func (s *Service) unlockAndResetRepoPath(appRepoPath string) {
err := s.gitClient.Reset(appRepoPath)
// checkoutRevision is a convenience function to initialize a repo, fetch, and checkout a revision
func checkoutRevision(gitClient git.Client, revision string) error {
err := gitClient.Fetch()
if err != nil {
return err
}
err = gitClient.Reset()
if err != nil {
log.Warn(err)
}
s.repoLock.Unlock(appRepoPath)
err = gitClient.Checkout(revision)
if err != nil {
return err
}
return nil
}
func manifestCacheKey(commitSHA string, q *ManifestRequest) string {
pStr, _ := json.Marshal(q.ComponentParameterOverrides)
return fmt.Sprintf("mfst|%s|%s|%s|%s", q.Path, q.Environment, commitSHA, string(pStr))
}
func listDirCacheKey(commitSHA string, q *ListDirRequest) string {
return fmt.Sprintf("ldir|%s|%s", q.Path, commitSHA)
}

File diff suppressed because it is too large Load Diff

View File

@@ -23,58 +23,34 @@ message ManifestResponse {
string namespace = 2;
string server = 3;
string revision = 4;
repeated github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ComponentParameter params = 5;
}
message EnvParamsRequest {
// ListDirRequest requests a repository directory structure
message ListDirRequest {
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository repo = 1;
string revision = 2;
string path = 3;
string environment = 4;
}
message EnvParamsResponse {
repeated github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ComponentParameter params = 1;
// FileList returns the contents of the repo of a ListDir request
message FileList {
repeated string items = 1;
}
// KsonnetAppRequest is a query for ksonnet app
message KsonnetAppRequest {
// GetFileRequest return
message GetFileRequest {
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository repo = 1;
string revision = 2;
string path = 3;
}
// KsonnetAppResponse contains Ksonnet app response
// This roughly reflects: ksonnet/ksonnet/metadata/app/schema.go
// NOTE: we may expose ksonnet apps from API server, in which case these definitions will move to
// a more public place. For now, these types are only used internally.
message KsonnetAppResponse {
string name = 1;
map<string, KsonnetEnvironment> environments = 2;
// GetFileResponse returns the contents of the file of a GetFile request
message GetFileResponse {
bytes data = 1;
}
message KsonnetEnvironment {
// Name is the user defined name of an environment
string name = 1;
// KubernetesVersion is the kubernetes version the targetted cluster is running on.
string k8sVersion = 2;
// Path is the relative project path containing metadata for this environment.
string path = 3;
// Destination stores the cluster address that this environment points to.
KsonnetEnvironmentDestination destination = 4;
// Targets contain the relative component paths that this environment
//repeated string targets = X;
}
message KsonnetEnvironmentDestination {
// Server is the Kubernetes server that the cluster is running on.
string server = 1;
// Namespace is the namespace of the Kubernetes server that targets should be deployed to
string namespace = 2;
}
// ManifestService
service RepositoryService {
@@ -82,12 +58,12 @@ service RepositoryService {
rpc GenerateManifest(ManifestRequest) returns (ManifestResponse) {
}
// Retrieve Ksonnet environment params in specified repo name and revision
rpc GetEnvParams(EnvParamsRequest) returns (EnvParamsResponse) {
// ListDir returns the file contents at the specified repo and path
rpc ListDir(ListDirRequest) returns (FileList) {
}
// Retrieve Ksonnet environment params in specified repo name and revision
rpc GetKsonnetApp(KsonnetAppRequest) returns (KsonnetAppResponse) {
// GetFile returns the file contents at the specified repo and path
rpc GetFile(GetFileRequest) returns (GetFileResponse) {
}
}

View File

@@ -3,6 +3,7 @@ package reposerver
import (
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/server/version"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/git"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
"github.com/grpc-ecosystem/go-grpc-middleware"
@@ -10,27 +11,26 @@ import (
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"k8s.io/client-go/kubernetes"
)
// ArgoCDRepoServer is the repo server implementation
type ArgoCDRepoServer struct {
ns string
kubeclientset kubernetes.Interface
log *log.Entry
log *log.Entry
gitFactory git.ClientFactory
cache cache.Cache
}
// NewServer returns a new instance of the ArgoCD Repo server
func NewServer(kubeclientset kubernetes.Interface, namespace string) *ArgoCDRepoServer {
func NewServer(gitFactory git.ClientFactory, cache cache.Cache) *ArgoCDRepoServer {
return &ArgoCDRepoServer{
ns: namespace,
kubeclientset: kubeclientset,
log: log.NewEntry(log.New()),
log: log.NewEntry(log.New()),
gitFactory: gitFactory,
cache: cache,
}
}
// CreateGRPC creates new configured grpc server
func (a *ArgoCDRepoServer) CreateGRPC(gitClient git.Client) *grpc.Server {
func (a *ArgoCDRepoServer) CreateGRPC() *grpc.Server {
server := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_logrus.StreamServerInterceptor(a.log),
@@ -42,7 +42,7 @@ func (a *ArgoCDRepoServer) CreateGRPC(gitClient git.Client) *grpc.Server {
)),
)
version.RegisterVersionServiceServer(server, &version.Server{})
manifestService := repository.NewService(a.ns, a.kubeclientset, gitClient)
manifestService := repository.NewService(a.gitFactory, a.cache)
repository.RegisterRepositoryServiceServer(server, manifestService)
// Register reflection service on gRPC server.

View File

@@ -4,21 +4,13 @@ import (
"bufio"
"encoding/json"
"fmt"
"path"
"reflect"
"strings"
"time"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/server/cluster"
apirepository "github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util"
argoutil "github.com/argoproj/argo-cd/util/argo"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/kube"
"github.com/ghodss/yaml"
"github.com/ksonnet/ksonnet/pkg/app"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
@@ -26,14 +18,23 @@ import (
"k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
maxRecentDeploymentsCnt = 5
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
argoutil "github.com/argoproj/argo-cd/util/argo"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/rbac"
)
// Server provides a Application service
@@ -42,10 +43,9 @@ type Server struct {
kubeclientset kubernetes.Interface
appclientset appclientset.Interface
repoClientset reposerver.Clientset
// TODO(jessesuen): move common cluster code to shared libraries
clusterService cluster.ClusterServiceServer
repoService apirepository.RepositoryServiceServer
appComparator controller.AppComparator
db db.ArgoDB
appComparator controller.AppStateManager
enf *rbac.Enforcer
}
// NewServer returns a new instance of the Application service
@@ -54,75 +54,237 @@ func NewServer(
kubeclientset kubernetes.Interface,
appclientset appclientset.Interface,
repoClientset reposerver.Clientset,
repoService apirepository.RepositoryServiceServer,
clusterService cluster.ClusterServiceServer) ApplicationServiceServer {
db db.ArgoDB,
enf *rbac.Enforcer,
) ApplicationServiceServer {
return &Server{
ns: namespace,
appclientset: appclientset,
kubeclientset: kubeclientset,
clusterService: clusterService,
repoClientset: repoClientset,
repoService: repoService,
appComparator: controller.NewKsonnetAppComparator(clusterService),
ns: namespace,
appclientset: appclientset,
kubeclientset: kubeclientset,
db: db,
repoClientset: repoClientset,
appComparator: controller.NewAppStateManager(db, appclientset, repoClientset, namespace),
enf: enf,
}
}
// appRBACName formats fully qualified application name for RBAC check
func appRBACName(app appv1.Application) string {
return fmt.Sprintf("%s/%s", git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Name)
}
// List returns list of applications
func (s *Server) List(ctx context.Context, q *ApplicationQuery) (*appv1.ApplicationList, error) {
return s.appclientset.ArgoprojV1alpha1().Applications(s.ns).List(metav1.ListOptions{})
}
// Create creates an application
func (s *Server) Create(ctx context.Context, a *appv1.Application) (*appv1.Application, error) {
err := s.validateApp(ctx, a)
appList, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).List(metav1.ListOptions{})
if err != nil {
return nil, err
}
return s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Create(a)
newItems := make([]appv1.Application, 0)
for _, a := range appList.Items {
if s.enf.EnforceClaims(ctx.Value("claims"), "applications", "get", appRBACName(a)) {
newItems = append(newItems, a)
}
}
appList.Items = newItems
return appList, nil
}
// Create creates an application
func (s *Server) Create(ctx context.Context, q *ApplicationCreateRequest) (*appv1.Application, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "create", appRBACName(q.Application)) {
return nil, grpc.ErrPermissionDenied
}
a := q.Application
err := s.validateApp(ctx, &a.Spec)
if err != nil {
return nil, err
}
a.SetCascadedDeletion(true)
out, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Create(&a)
if apierr.IsAlreadyExists(err) {
// act idempotent if existing spec matches new spec
existing, getErr := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(a.Name, metav1.GetOptions{})
if getErr != nil {
return nil, status.Errorf(codes.Internal, "unable to check existing application details: %v", err)
}
if q.Upsert != nil && *q.Upsert {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "update", appRBACName(a)) {
return nil, grpc.ErrPermissionDenied
}
existing.Spec = a.Spec
out, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(existing)
} else {
if reflect.DeepEqual(existing.Spec, a.Spec) {
return existing, nil
} else {
return nil, status.Errorf(codes.InvalidArgument, "existing application spec is different, use upsert flag to force update")
}
}
}
return out, err
}
// GetManifests returns application manifests
func (s *Server) GetManifests(ctx context.Context, q *ApplicationManifestQuery) (*repository.ManifestResponse, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*q.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications/manifests", "get", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
repo := s.getRepo(ctx, a.Spec.Source.RepoURL)
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
if err != nil {
return nil, err
}
defer util.Close(conn)
overrides := make([]*appv1.ComponentParameter, len(a.Spec.Source.ComponentParameterOverrides))
if a.Spec.Source.ComponentParameterOverrides != nil {
for i := range a.Spec.Source.ComponentParameterOverrides {
item := a.Spec.Source.ComponentParameterOverrides[i]
overrides[i] = &item
}
}
revision := a.Spec.Source.TargetRevision
if q.Revision != "" {
revision = q.Revision
}
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &repository.ManifestRequest{
Repo: repo,
Environment: a.Spec.Source.Environment,
Path: a.Spec.Source.Path,
Revision: revision,
ComponentParameterOverrides: overrides,
AppLabel: a.Name,
})
if err != nil {
return nil, err
}
return manifestInfo, nil
}
// Get returns an application by name
func (s *Server) Get(ctx context.Context, q *ApplicationQuery) (*appv1.Application, error) {
return s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(q.Name, metav1.GetOptions{})
appIf := s.appclientset.ArgoprojV1alpha1().Applications(s.ns)
a, err := appIf.Get(*q.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "get", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
if q.Refresh {
_, err = argoutil.RefreshApp(appIf, *q.Name)
if err != nil {
return nil, err
}
a, err = argoutil.WaitForRefresh(appIf, *q.Name, nil)
if err != nil {
return nil, err
}
}
return a, nil
}
// ListResourceEvents returns a list of event resources
func (s *Server) ListResourceEvents(ctx context.Context, q *ApplicationResourceEventsQuery) (*v1.EventList, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*q.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications/events", "get", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
config, namespace, err := s.getApplicationClusterConfig(*q.Name)
if err != nil {
return nil, err
}
kubeClientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
fieldSelector := fields.SelectorFromSet(map[string]string{
"involvedObject.name": q.ResourceName,
"involvedObject.uid": q.ResourceUID,
"involvedObject.namespace": namespace,
}).String()
log.Infof("Querying for resource events with field selector: %s", fieldSelector)
opts := metav1.ListOptions{FieldSelector: fieldSelector}
return kubeClientset.CoreV1().Events(namespace).List(opts)
}
// Update updates an application
func (s *Server) Update(ctx context.Context, a *appv1.Application) (*appv1.Application, error) {
err := s.validateApp(ctx, a)
func (s *Server) Update(ctx context.Context, q *ApplicationUpdateRequest) (*appv1.Application, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "update", appRBACName(*q.Application)) {
return nil, grpc.ErrPermissionDenied
}
a := q.Application
err := s.validateApp(ctx, &a.Spec)
if err != nil {
return nil, err
}
return s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(a)
}
// UpdateSpec updates an application spec
func (s *Server) UpdateSpec(ctx context.Context, q *ApplicationUpdateSpecRequest) (*appv1.ApplicationSpec, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*q.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "update", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
err = s.validateApp(ctx, &q.Spec)
if err != nil {
return nil, err
}
patch, err := json.Marshal(map[string]appv1.ApplicationSpec{
"spec": q.Spec,
})
if err != nil {
return nil, err
}
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Patch(*q.Name, types.MergePatchType, patch)
return &q.Spec, err
}
// Delete removes an application and all associated resources
func (s *Server) Delete(ctx context.Context, q *DeleteApplicationRequest) (*ApplicationResponse, error) {
var err error
server := q.Server
namespace := q.Namespace
if server == "" || namespace == "" {
server, namespace, err = s.getApplicationDestination(ctx, q.Name)
if err != nil && !apierr.IsNotFound(err) && !q.Force {
func (s *Server) Delete(ctx context.Context, q *ApplicationDeleteRequest) (*ApplicationResponse, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*q.Name, metav1.GetOptions{})
if err != nil && !apierr.IsNotFound(err) {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "delete", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
if q.Cascade != nil && *q.Cascade != a.CascadedDeletion() {
a.SetCascadedDeletion(*q.Cascade)
patch, err := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": a.Finalizers,
},
})
if err != nil {
return nil, err
}
_, err = s.appclientset.ArgoprojV1alpha1().Applications(a.Namespace).Patch(a.Name, types.MergePatchType, patch)
if err != nil {
return nil, err
}
}
if server != "" && namespace != "" {
clst, err := s.clusterService.Get(ctx, &cluster.ClusterQuery{Server: server})
if err != nil && !q.Force {
return nil, err
}
if clst != nil {
config := clst.RESTConfig()
err = kube.DeleteResourceWithLabel(config, namespace, common.LabelApplicationName, q.Name)
if err != nil && !q.Force {
return nil, err
}
}
}
err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Delete(q.Name, &metav1.DeleteOptions{})
err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Delete(*q.Name, &metav1.DeleteOptions{})
if err != nil && !apierr.IsNotFound(err) {
return nil, err
}
@@ -135,14 +297,19 @@ func (s *Server) Watch(q *ApplicationQuery, ws ApplicationService_WatchServer) e
if err != nil {
return err
}
claims := ws.Context().Value("claims")
done := make(chan bool)
go func() {
for next := range w.ResultChan() {
app := *next.Object.(*appv1.Application)
if q.Name == "" || q.Name == app.Name {
a := *next.Object.(*appv1.Application)
if q.Name == nil || *q.Name == "" || *q.Name == a.Name {
if !s.enf.EnforceClaims(claims, "applications", "get", appRBACName(a)) {
// do not emit apps user does not have accessing
continue
}
err = ws.Send(&appv1.ApplicationWatchEvent{
Type: next.Type,
Application: app,
Application: a,
})
if err != nil {
log.Warnf("Unable to send stream message: %v", err)
@@ -164,20 +331,20 @@ func (s *Server) Watch(q *ApplicationQuery, ws ApplicationService_WatchServer) e
// * the git path contains a valid app.yaml
// * the specified environment exists
// * the referenced cluster has been added to ArgoCD
func (s *Server) validateApp(ctx context.Context, a *appv1.Application) error {
func (s *Server) validateApp(ctx context.Context, spec *appv1.ApplicationSpec) error {
// Test the repo
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
if err != nil {
return err
}
defer util.Close(conn)
repoRes, err := s.repoService.Get(ctx, &apirepository.RepoQuery{Repo: a.Spec.Source.RepoURL})
repoRes, err := s.db.GetRepository(ctx, spec.Source.RepoURL)
if err != nil {
if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.NotFound {
// The repo has not been added to ArgoCD so we do not have credentials to access it.
// We support the mode where apps can be created from public repositories. Test the
// repo to make sure it is publically accessible
err = git.TestRepo(a.Spec.Source.RepoURL, "", "", "")
// repo to make sure it is publicly accessible
err = git.TestRepo(spec.Source.RepoURL, "", "", "")
if err != nil {
return err
}
@@ -187,36 +354,53 @@ func (s *Server) validateApp(ctx context.Context, a *appv1.Application) error {
}
// Verify app.yaml is functional
req := repository.KsonnetAppRequest{
req := repository.GetFileRequest{
Repo: &appv1.Repository{
Repo: a.Spec.Source.RepoURL,
Repo: spec.Source.RepoURL,
},
Revision: a.Spec.Source.TargetRevision,
Path: a.Spec.Source.Path,
Revision: spec.Source.TargetRevision,
Path: path.Join(spec.Source.Path, "app.yaml"),
}
if repoRes != nil {
req.Repo.Username = repoRes.Username
req.Repo.Password = repoRes.Password
req.Repo.SSHPrivateKey = repoRes.SSHPrivateKey
}
ksAppRes, err := repoClient.GetKsonnetApp(ctx, &req)
getRes, err := repoClient.GetFile(ctx, &req)
if err != nil {
return err
}
var appSpec app.Spec
err = yaml.Unmarshal(getRes.Data, &appSpec)
if err != nil {
return status.Errorf(codes.InvalidArgument, "app.yaml is not a valid ksonnet app spec")
}
// Default revision to HEAD if unspecified
if spec.Source.TargetRevision == "" {
spec.Source.TargetRevision = "HEAD"
}
// Verify the specified environment is defined in it
envSpec, ok := ksAppRes.Environments[a.Spec.Source.Environment]
if !ok {
return status.Errorf(codes.InvalidArgument, "environment '%s' does not exist in app", a.Spec.Source.Environment)
envSpec, ok := appSpec.Environments[spec.Source.Environment]
if !ok || envSpec == nil {
return status.Errorf(codes.InvalidArgument, "environment '%s' does not exist in ksonnet app", spec.Source.Environment)
}
// If server and namespace are not supplied, pull it from the app.yaml
if spec.Destination.Server == "" {
spec.Destination.Server = envSpec.Destination.Server
}
if spec.Destination.Namespace == "" {
spec.Destination.Namespace = envSpec.Destination.Namespace
}
// Ensure the k8s cluster the app is referencing, is configured in ArgoCD
// NOTE: need to check if it was overridden in the destination spec
clusterURL := envSpec.Destination.Server
if a.Spec.Destination != nil && a.Spec.Destination.Server != "" {
clusterURL = a.Spec.Destination.Server
}
_, err = s.clusterService.Get(ctx, &cluster.ClusterQuery{Server: clusterURL})
_, err = s.db.GetCluster(ctx, spec.Destination.Server)
if err != nil {
if apierr.IsNotFound(err) {
return status.Errorf(codes.InvalidArgument, "cluster '%s' has not been configured", spec.Destination.Server)
}
return err
}
return nil
@@ -227,7 +411,7 @@ func (s *Server) getApplicationClusterConfig(applicationName string) (*rest.Conf
if err != nil {
return nil, "", err
}
clst, err := s.clusterService.Get(context.Background(), &cluster.ClusterQuery{Server: server})
clst, err := s.db.GetCluster(context.Background(), server)
if err != nil {
return nil, "", err
}
@@ -240,7 +424,7 @@ func (s *Server) ensurePodBelongsToApp(applicationName string, podName, namespac
if err != nil {
return err
}
wrongPodError := fmt.Errorf("pod %s does not belong to application %s", podName, applicationName)
wrongPodError := status.Errorf(codes.InvalidArgument, "pod %s does not belong to application %s", podName, applicationName)
if pod.Labels == nil {
return wrongPodError
}
@@ -250,8 +434,17 @@ func (s *Server) ensurePodBelongsToApp(applicationName string, podName, namespac
return nil
}
func (s *Server) DeletePod(ctx context.Context, q *DeletePodQuery) (*ApplicationResponse, error) {
config, namespace, err := s.getApplicationClusterConfig(q.ApplicationName)
func (s *Server) DeletePod(ctx context.Context, q *ApplicationDeletePodRequest) (*ApplicationResponse, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*q.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications/pods", "delete", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
config, namespace, err := s.getApplicationClusterConfig(*q.Name)
if err != nil {
return nil, err
}
@@ -259,19 +452,26 @@ func (s *Server) DeletePod(ctx context.Context, q *DeletePodQuery) (*Application
if err != nil {
return nil, err
}
err = s.ensurePodBelongsToApp(q.ApplicationName, q.PodName, namespace, kubeClientset)
err = s.ensurePodBelongsToApp(*q.Name, *q.PodName, namespace, kubeClientset)
if err != nil {
return nil, err
}
err = kubeClientset.CoreV1().Pods(namespace).Delete(q.PodName, &metav1.DeleteOptions{})
err = kubeClientset.CoreV1().Pods(namespace).Delete(*q.PodName, &metav1.DeleteOptions{})
if err != nil {
return nil, err
}
return &ApplicationResponse{}, nil
}
func (s *Server) PodLogs(q *PodLogsQuery, ws ApplicationService_PodLogsServer) error {
config, namespace, err := s.getApplicationClusterConfig(q.ApplicationName)
func (s *Server) PodLogs(q *ApplicationPodLogsQuery, ws ApplicationService_PodLogsServer) error {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*q.Name, metav1.GetOptions{})
if err != nil {
return err
}
if !s.enf.EnforceClaims(ws.Context().Value("claims"), "applications/logs", "get", appRBACName(*a)) {
return grpc.ErrPermissionDenied
}
config, namespace, err := s.getApplicationClusterConfig(*q.Name)
if err != nil {
return err
}
@@ -279,7 +479,7 @@ func (s *Server) PodLogs(q *PodLogsQuery, ws ApplicationService_PodLogsServer) e
if err != nil {
return err
}
err = s.ensurePodBelongsToApp(q.ApplicationName, q.PodName, namespace, kubeClientset)
err = s.ensurePodBelongsToApp(*q.Name, *q.PodName, namespace, kubeClientset)
if err != nil {
return err
}
@@ -291,7 +491,7 @@ func (s *Server) PodLogs(q *PodLogsQuery, ws ApplicationService_PodLogsServer) e
if q.TailLines > 0 {
tailLines = &q.TailLines
}
stream, err := kubeClientset.CoreV1().Pods(namespace).GetLogs(q.PodName, &v1.PodLogOptions{
stream, err := kubeClientset.CoreV1().Pods(namespace).GetLogs(*q.PodName, &v1.PodLogOptions{
Container: q.Container,
Follow: q.Follow,
Timestamps: true,
@@ -316,7 +516,7 @@ func (s *Server) PodLogs(q *PodLogsQuery, ws ApplicationService_PodLogsServer) e
if line != "" {
err = ws.Send(&LogEntry{
Content: line,
TimeStamp: &metaLogTime,
TimeStamp: metaLogTime,
})
if err != nil {
log.Warnf("Unable to send stream message: %v", err)
@@ -336,144 +536,17 @@ func (s *Server) PodLogs(q *PodLogsQuery, ws ApplicationService_PodLogsServer) e
return nil
}
// Sync syncs an application to its target state
func (s *Server) Sync(ctx context.Context, syncReq *ApplicationSyncRequest) (*ApplicationSyncResult, error) {
return s.deployAndPersistDeploymentInfo(ctx, syncReq.Name, syncReq.Revision, nil, syncReq.DryRun, syncReq.Prune)
}
func (s *Server) Rollback(ctx context.Context, rollbackReq *ApplicationRollbackRequest) (*ApplicationSyncResult, error) {
app, err := s.Get(ctx, &ApplicationQuery{Name: rollbackReq.Name})
if err != nil {
return nil, err
}
var deploymentInfo *appv1.DeploymentInfo
for _, info := range app.Status.RecentDeployments {
if info.ID == rollbackReq.ID {
deploymentInfo = &info
break
}
}
if deploymentInfo == nil {
return nil, status.Errorf(codes.InvalidArgument, "application %s does not have deployment with id %v", rollbackReq.Name, rollbackReq.ID)
}
return s.deployAndPersistDeploymentInfo(ctx, rollbackReq.Name, deploymentInfo.Revision, &deploymentInfo.ComponentParameterOverrides, rollbackReq.DryRun, rollbackReq.Prune)
}
func (s *Server) deployAndPersistDeploymentInfo(
ctx context.Context, appName string, revision string, overrides *[]appv1.ComponentParameter, dryRun bool, prune bool) (*ApplicationSyncResult, error) {
log.Infof("Syncing application %s", appName)
app, err := s.Get(ctx, &ApplicationQuery{Name: appName})
if err != nil {
return nil, err
}
if revision != "" {
app.Spec.Source.TargetRevision = revision
}
if overrides != nil {
app.Spec.Source.ComponentParameterOverrides = *overrides
}
res, manifest, err := s.deploy(ctx, app, dryRun, prune)
if err != nil {
return nil, err
}
if !dryRun {
err = s.persistDeploymentInfo(ctx, appName, manifest.Revision, nil)
if err != nil {
return nil, err
}
}
return res, err
}
func (s *Server) persistDeploymentInfo(ctx context.Context, appName string, revision string, overrides *[]appv1.ComponentParameter) error {
app, err := s.Get(ctx, &ApplicationQuery{Name: appName})
if err != nil {
return err
}
repo := s.getRepo(ctx, app.Spec.Source.RepoURL)
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
if err != nil {
return err
}
defer util.Close(conn)
log.Infof("Retrieving deployment params for application %s", appName)
envParams, err := repoClient.GetEnvParams(ctx, &repository.EnvParamsRequest{
Repo: repo,
Environment: app.Spec.Source.Environment,
Path: app.Spec.Source.Path,
Revision: revision,
})
if err != nil {
return err
}
params := make([]appv1.ComponentParameter, len(envParams.Params))
for i := range envParams.Params {
param := *envParams.Params[i]
params[i] = param
}
var nextId int64 = 0
if len(app.Status.RecentDeployments) > 0 {
nextId = app.Status.RecentDeployments[len(app.Status.RecentDeployments)-1].ID + 1
}
recentDeployments := append(app.Status.RecentDeployments, appv1.DeploymentInfo{
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
Revision: revision,
Params: params,
DeployedAt: metav1.NewTime(time.Now()),
ID: nextId,
})
if len(recentDeployments) > maxRecentDeploymentsCnt {
recentDeployments = recentDeployments[1 : maxRecentDeploymentsCnt+1]
}
patch, err := json.Marshal(map[string]map[string][]appv1.DeploymentInfo{
"status": {
"recentDeployments": recentDeployments,
},
})
if err != nil {
return err
}
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Patch(app.Name, types.MergePatchType, patch)
return err
}
func (s *Server) getApplicationDestination(ctx context.Context, name string) (string, string, error) {
app, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(name, metav1.GetOptions{})
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(name, metav1.GetOptions{})
if err != nil {
return "", "", err
} else {
repo := s.getRepo(ctx, app.Spec.Source.RepoURL)
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
if err != nil {
return "", "", err
}
defer util.Close(conn)
manifestInfo, err := repoClient.GenerateManifest(ctx, &repository.ManifestRequest{
Repo: repo,
Environment: app.Spec.Source.Environment,
Path: app.Spec.Source.Path,
Revision: app.Spec.Source.TargetRevision,
AppLabel: app.Name,
})
if err != nil {
return "", "", err
}
server, namespace := argoutil.ResolveServerNamespace(app.Spec.Destination, manifestInfo)
return server, namespace, nil
}
server, namespace := a.Spec.Destination.Server, a.Spec.Destination.Namespace
return server, namespace, nil
}
func (s *Server) getRepo(ctx context.Context, repoURL string) *appv1.Repository {
repo, err := s.repoService.Get(ctx, &apirepository.RepoQuery{Repo: repoURL})
repo, err := s.db.GetRepository(ctx, repoURL)
if err != nil {
// If we couldn't retrieve from the repo service, assume public repositories
repo = &appv1.Repository{Repo: repoURL}
@@ -481,134 +554,65 @@ func (s *Server) getRepo(ctx context.Context, repoURL string) *appv1.Repository
return repo
}
func (s *Server) deploy(
ctx context.Context,
app *appv1.Application,
dryRun bool,
prune bool) (*ApplicationSyncResult, *repository.ManifestResponse, error) {
repo := s.getRepo(ctx, app.Spec.Source.RepoURL)
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
// Sync syncs an application to its target state
func (s *Server) Sync(ctx context.Context, syncReq *ApplicationSyncRequest) (*appv1.Application, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*syncReq.Name, metav1.GetOptions{})
if err != nil {
return nil, nil, err
return nil, err
}
defer util.Close(conn)
overrides := make([]*appv1.ComponentParameter, len(app.Spec.Source.ComponentParameterOverrides))
if app.Spec.Source.ComponentParameterOverrides != nil {
for i := range app.Spec.Source.ComponentParameterOverrides {
item := app.Spec.Source.ComponentParameterOverrides[i]
overrides[i] = &item
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "sync", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
manifestInfo, err := repoClient.GenerateManifest(ctx, &repository.ManifestRequest{
Repo: repo,
Environment: app.Spec.Source.Environment,
Path: app.Spec.Source.Path,
Revision: app.Spec.Source.TargetRevision,
ComponentParameterOverrides: overrides,
AppLabel: app.Name,
return s.setAppOperation(ctx, *syncReq.Name, func(app *appv1.Application) (*appv1.Operation, error) {
return &appv1.Operation{
Sync: &appv1.SyncOperation{
Revision: syncReq.Revision,
Prune: syncReq.Prune,
DryRun: syncReq.DryRun,
},
}, nil
})
if err != nil {
return nil, nil, err
}
targetObjs := make([]*unstructured.Unstructured, len(manifestInfo.Manifests))
for i, manifest := range manifestInfo.Manifests {
obj, err := appv1.UnmarshalToUnstructured(manifest)
if err != nil {
return nil, nil, err
}
targetObjs[i] = obj
}
server, namespace := argoutil.ResolveServerNamespace(app.Spec.Destination, manifestInfo)
comparison, err := s.appComparator.CompareAppState(server, namespace, targetObjs, app)
if err != nil {
return nil, nil, err
}
clst, err := s.clusterService.Get(ctx, &cluster.ClusterQuery{Server: server})
if err != nil {
return nil, nil, err
}
config := clst.RESTConfig()
var syncRes ApplicationSyncResult
syncRes.Resources = make([]*ResourceDetails, 0)
for _, resourceState := range comparison.Resources {
var liveObj, targetObj *unstructured.Unstructured
if resourceState.LiveState != "null" {
liveObj = &unstructured.Unstructured{}
err = json.Unmarshal([]byte(resourceState.LiveState), liveObj)
if err != nil {
return nil, nil, err
}
}
if resourceState.TargetState != "null" {
targetObj = &unstructured.Unstructured{}
err = json.Unmarshal([]byte(resourceState.TargetState), targetObj)
if err != nil {
return nil, nil, err
}
}
needsCreate := liveObj == nil
needsDelete := targetObj == nil
obj := targetObj
if obj == nil {
obj = liveObj
}
resDetails := ResourceDetails{
Name: obj.GetName(),
Kind: obj.GetKind(),
Namespace: namespace,
}
if resourceState.Status == appv1.ComparisonStatusSynced {
resDetails.Message = fmt.Sprintf("already synced")
} else if dryRun {
if needsCreate {
resDetails.Message = fmt.Sprintf("will create")
} else if needsDelete {
if prune {
resDetails.Message = fmt.Sprintf("will delete")
} else {
resDetails.Message = fmt.Sprintf("will be ignored (should be deleted)")
}
} else {
resDetails.Message = fmt.Sprintf("will update")
}
} else {
if needsDelete {
if prune {
err = kube.DeleteResource(config, liveObj, namespace)
if err != nil {
return nil, nil, err
}
resDetails.Message = fmt.Sprintf("deleted")
} else {
resDetails.Message = fmt.Sprintf("ignored (should be deleted)")
}
} else {
_, err := kube.ApplyResource(config, targetObj, namespace)
if err != nil {
return nil, nil, err
}
if needsCreate {
resDetails.Message = fmt.Sprintf("created")
} else {
resDetails.Message = fmt.Sprintf("updated")
}
}
}
syncRes.Resources = append(syncRes.Resources, &resDetails)
}
syncRes.Message = "successfully synced"
return &syncRes, manifestInfo, nil
}
func (s *Server) Rollback(ctx context.Context, rollbackReq *ApplicationRollbackRequest) (*appv1.Application, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*rollbackReq.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "rollback", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
return s.setAppOperation(ctx, *rollbackReq.Name, func(app *appv1.Application) (*appv1.Operation, error) {
return &appv1.Operation{
Rollback: &appv1.RollbackOperation{
ID: rollbackReq.ID,
Prune: rollbackReq.Prune,
DryRun: rollbackReq.DryRun,
},
}, nil
})
}
func (s *Server) setAppOperation(ctx context.Context, appName string, operationCreator func(app *appv1.Application) (*appv1.Operation, error)) (*appv1.Application, error) {
for {
a, err := s.Get(ctx, &ApplicationQuery{Name: &appName})
if err != nil {
return nil, err
}
if a.Operation != nil {
return nil, status.Errorf(codes.InvalidArgument, "another operation is already in progress")
}
op, err := operationCreator(a)
if err != nil {
return nil, err
}
a.Operation = op
a.Status.OperationState = nil
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(a)
if err != nil && apierr.IsConflict(err) {
log.Warnf("Failed to set operation for app '%s' due to update conflict. Retrying again...", appName)
} else {
return a, err
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ import (
"io"
"net/http"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
@@ -46,6 +45,41 @@ func request_ApplicationService_List_0(ctx context.Context, marshaler runtime.Ma
}
var (
filter_ApplicationService_ListResourceEvents_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
)
func request_ApplicationService_ListResourceEvents_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ApplicationResourceEventsQuery
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ApplicationService_ListResourceEvents_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListResourceEvents(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_ApplicationService_Watch_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
@@ -71,11 +105,19 @@ func request_ApplicationService_Watch_0(ctx context.Context, marshaler runtime.M
}
var (
filter_ApplicationService_Create_0 = &utilities.DoubleArray{Encoding: map[string]int{"application": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
)
func request_ApplicationService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v1alpha1.Application
var protoReq ApplicationCreateRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Application); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ApplicationService_Create_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
@@ -84,6 +126,10 @@ func request_ApplicationService_Create_0(ctx context.Context, marshaler runtime.
}
var (
filter_ApplicationService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
)
func request_ApplicationService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ApplicationQuery
var metadata runtime.ServerMetadata
@@ -100,54 +146,27 @@ func request_ApplicationService_Get_0(ctx context.Context, marshaler runtime.Mar
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.Name, err = runtime.String(val)
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ApplicationService_Get_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ApplicationService_Update_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v1alpha1.Application
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["metadata.name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "metadata.name")
}
protoReq.GetMetadata().Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "metadata.name", err)
}
msg, err := client.Update(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_ApplicationService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
filter_ApplicationService_GetManifests_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
)
func request_ApplicationService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteApplicationRequest
func request_ApplicationService_GetManifests_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ApplicationManifestQuery
var metadata runtime.ServerMetadata
var (
@@ -162,7 +181,104 @@ func request_ApplicationService_Delete_0(ctx context.Context, marshaler runtime.
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.Name, err = runtime.String(val)
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ApplicationService_GetManifests_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.GetManifests(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ApplicationService_Update_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ApplicationUpdateRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Application); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["application.metadata.name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "application.metadata.name")
}
protoReq.GetApplication().GetMetadata().Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "application.metadata.name", err)
}
msg, err := client.Update(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ApplicationService_UpdateSpec_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ApplicationUpdateSpecRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Spec); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
msg, err := client.UpdateSpec(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_ApplicationService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
)
func request_ApplicationService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ApplicationDeleteRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
@@ -197,7 +313,7 @@ func request_ApplicationService_Sync_0(ctx context.Context, marshaler runtime.Ma
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.Name, err = runtime.String(val)
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
@@ -228,7 +344,7 @@ func request_ApplicationService_Rollback_0(ctx context.Context, marshaler runtim
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.Name, err = runtime.String(val)
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
@@ -240,7 +356,7 @@ func request_ApplicationService_Rollback_0(ctx context.Context, marshaler runtim
}
func request_ApplicationService_DeletePod_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeletePodQuery
var protoReq ApplicationDeletePodRequest
var metadata runtime.ServerMetadata
var (
@@ -250,15 +366,15 @@ func request_ApplicationService_DeletePod_0(ctx context.Context, marshaler runti
_ = err
)
val, ok = pathParams["applicationName"]
val, ok = pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "applicationName")
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.ApplicationName, err = runtime.String(val)
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "applicationName", err)
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
val, ok = pathParams["podName"]
@@ -266,7 +382,7 @@ func request_ApplicationService_DeletePod_0(ctx context.Context, marshaler runti
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "podName")
}
protoReq.PodName, err = runtime.String(val)
protoReq.PodName, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "podName", err)
@@ -278,11 +394,11 @@ func request_ApplicationService_DeletePod_0(ctx context.Context, marshaler runti
}
var (
filter_ApplicationService_PodLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"applicationName": 0, "podName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
filter_ApplicationService_PodLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0, "podName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
)
func request_ApplicationService_PodLogs_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (ApplicationService_PodLogsClient, runtime.ServerMetadata, error) {
var protoReq PodLogsQuery
var protoReq ApplicationPodLogsQuery
var metadata runtime.ServerMetadata
var (
@@ -292,15 +408,15 @@ func request_ApplicationService_PodLogs_0(ctx context.Context, marshaler runtime
_ = err
)
val, ok = pathParams["applicationName"]
val, ok = pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "applicationName")
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
protoReq.ApplicationName, err = runtime.String(val)
protoReq.Name, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "applicationName", err)
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
val, ok = pathParams["podName"]
@@ -308,7 +424,7 @@ func request_ApplicationService_PodLogs_0(ctx context.Context, marshaler runtime
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "podName")
}
protoReq.PodName, err = runtime.String(val)
protoReq.PodName, err = runtime.StringP(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "podName", err)
@@ -398,6 +514,35 @@ func RegisterApplicationServiceHandlerClient(ctx context.Context, mux *runtime.S
})
mux.Handle("GET", pattern_ApplicationService_ListResourceEvents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ApplicationService_ListResourceEvents_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ApplicationService_ListResourceEvents_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ApplicationService_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -485,6 +630,35 @@ func RegisterApplicationServiceHandlerClient(ctx context.Context, mux *runtime.S
})
mux.Handle("GET", pattern_ApplicationService_GetManifests_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ApplicationService_GetManifests_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ApplicationService_GetManifests_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("PUT", pattern_ApplicationService_Update_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -514,6 +688,35 @@ func RegisterApplicationServiceHandlerClient(ctx context.Context, mux *runtime.S
})
mux.Handle("PUT", pattern_ApplicationService_UpdateSpec_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ApplicationService_UpdateSpec_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ApplicationService_UpdateSpec_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_ApplicationService_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -665,13 +868,19 @@ func RegisterApplicationServiceHandlerClient(ctx context.Context, mux *runtime.S
var (
pattern_ApplicationService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "applications"}, ""))
pattern_ApplicationService_ListResourceEvents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "applications", "name", "events"}, ""))
pattern_ApplicationService_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "stream", "applications"}, ""))
pattern_ApplicationService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "applications"}, ""))
pattern_ApplicationService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "applications", "name"}, ""))
pattern_ApplicationService_Update_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "applications", "metadata.name"}, ""))
pattern_ApplicationService_GetManifests_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "applications", "name", "manifests"}, ""))
pattern_ApplicationService_Update_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "applications", "application.metadata.name"}, ""))
pattern_ApplicationService_UpdateSpec_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "applications", "name", "spec"}, ""))
pattern_ApplicationService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "applications", "name"}, ""))
@@ -679,22 +888,28 @@ var (
pattern_ApplicationService_Rollback_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "applications", "name", "rollback"}, ""))
pattern_ApplicationService_DeletePod_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "applications", "applicationName", "pods", "podName"}, ""))
pattern_ApplicationService_DeletePod_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "applications", "name", "pods", "podName"}, ""))
pattern_ApplicationService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "applications", "applicationName", "pods", "podName", "logs"}, ""))
pattern_ApplicationService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "applications", "name", "pods", "podName", "logs"}, ""))
)
var (
forward_ApplicationService_List_0 = runtime.ForwardResponseMessage
forward_ApplicationService_ListResourceEvents_0 = runtime.ForwardResponseMessage
forward_ApplicationService_Watch_0 = runtime.ForwardResponseStream
forward_ApplicationService_Create_0 = runtime.ForwardResponseMessage
forward_ApplicationService_Get_0 = runtime.ForwardResponseMessage
forward_ApplicationService_GetManifests_0 = runtime.ForwardResponseMessage
forward_ApplicationService_Update_0 = runtime.ForwardResponseMessage
forward_ApplicationService_UpdateSpec_0 = runtime.ForwardResponseMessage
forward_ApplicationService_Delete_0 = runtime.ForwardResponseMessage
forward_ApplicationService_Sync_0 = runtime.ForwardResponseMessage

View File

@@ -1,9 +1,9 @@
syntax = "proto3";
syntax = "proto2";
option go_package = "github.com/argoproj/argo-cd/server/application";
// Application Service
//
// Application Service API performs CRUD actions against application resources
// Application Service API performs CRUD actions against application resources
package application;
import "gogoproto/gogo.proto";
@@ -11,68 +11,83 @@ import "google/api/annotations.proto";
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1/generated.proto";
import "github.com/argoproj/argo-cd/reposerver/repository/repository.proto";
// ApplicationQuery is a query for application resources
message ApplicationQuery {
string name = 1;
optional string name = 1;
optional bool refresh = 2 [(gogoproto.nullable) = false];
}
// ApplicationEventsQuery is a query for application resource events
message ApplicationResourceEventsQuery {
required string name = 1;
required string resourceName = 2 [(gogoproto.nullable) = false];
required string resourceUID = 3 [(gogoproto.nullable) = false];
}
// ManifestQuery is a query for manifest resources
message ApplicationManifestQuery {
required string name = 1;
optional string revision = 2 [(gogoproto.nullable) = false];
}
message ApplicationResponse {}
message DeleteApplicationRequest {
string name = 1;
string namespace = 2;
string server = 3;
bool force = 4;
message ApplicationCreateRequest {
required github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application application = 1 [(gogoproto.nullable) = false];
optional bool upsert = 2;
}
message ApplicationUpdateRequest {
required github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application application = 1;
}
message ApplicationDeleteRequest {
required string name = 1;
optional bool cascade = 2;
}
// ApplicationSyncRequest is a request to apply the config state to live state
message ApplicationSyncRequest {
string name = 1;
string revision = 2;
bool dryRun = 3;
bool prune = 4;
required string name = 1;
required string revision = 2 [(gogoproto.nullable) = false];
required bool dryRun = 3 [(gogoproto.nullable) = false];
required bool prune = 4 [(gogoproto.nullable) = false];
}
// ApplicationSyncResult is a result of a sync requeswt
message ApplicationSyncResult {
string message = 1;
repeated ResourceDetails resources = 2;
// ApplicationUpdateSpecRequest is a request to update application spec
message ApplicationUpdateSpecRequest {
required string name = 1;
required github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ApplicationSpec spec = 2 [(gogoproto.nullable) = false];
}
message ApplicationRollbackRequest {
string name = 1;
int64 id = 2 [(gogoproto.customname) = "ID"];
bool dryRun = 3;
bool prune = 4;
required string name = 1;
required int64 id = 2 [(gogoproto.customname) = "ID", (gogoproto.nullable) = false];
required bool dryRun = 3 [(gogoproto.nullable) = false];
required bool prune = 4 [(gogoproto.nullable) = false];
}
message ResourceDetails {
string name = 1;
string kind = 2;
string namespace = 3;
string message = 4;
message ApplicationDeletePodRequest {
required string name = 1;
required string podName = 2;
}
message DeletePodQuery {
string applicationName = 1;
string podName = 2;
}
message PodLogsQuery {
string applicationName = 1;
string podName = 2;
string container = 3;
int64 sinceSeconds = 4;
k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
int64 tailLines = 6;
bool follow = 7;
message ApplicationPodLogsQuery {
required string name = 1;
required string podName = 2;
required string container = 3 [(gogoproto.nullable) = false];
required int64 sinceSeconds = 4 [(gogoproto.nullable) = false];
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
required int64 tailLines = 6 [(gogoproto.nullable) = false];
required bool follow = 7 [(gogoproto.nullable) = false];
}
message LogEntry {
string content = 1;
k8s.io.apimachinery.pkg.apis.meta.v1.Time timeStamp = 2;
required string content = 1 [(gogoproto.nullable) = false];
required k8s.io.apimachinery.pkg.apis.meta.v1.Time timeStamp = 2 [(gogoproto.nullable) = false];
}
// ApplicationService
@@ -83,16 +98,21 @@ service ApplicationService {
option (google.api.http).get = "/api/v1/applications";
}
// ListResourceEvents returns a list of event resources
rpc ListResourceEvents(ApplicationResourceEventsQuery) returns (k8s.io.api.core.v1.EventList) {
option (google.api.http).get = "/api/v1/applications/{name}/events";
}
// Watch returns stream of application change events.
rpc Watch(ApplicationQuery) returns (stream github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ApplicationWatchEvent) {
option (google.api.http).get = "/api/v1/stream/applications";
}
// Create creates an application
rpc Create(github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) {
rpc Create(ApplicationCreateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) {
option (google.api.http) = {
post: "/api/v1/applications"
body: "*"
body: "application"
};
}
@@ -101,21 +121,34 @@ service ApplicationService {
option (google.api.http).get = "/api/v1/applications/{name}";
}
// GetManifests returns application manifests
rpc GetManifests(ApplicationManifestQuery) returns (repository.ManifestResponse) {
option (google.api.http).get = "/api/v1/applications/{name}/manifests";
}
// Update updates an application
rpc Update(github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) {
rpc Update(ApplicationUpdateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) {
option (google.api.http) = {
put: "/api/v1/applications/{metadata.name}"
body: "*"
put: "/api/v1/applications/{application.metadata.name}"
body: "application"
};
}
// Update updates an application spec
rpc UpdateSpec(ApplicationUpdateSpecRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ApplicationSpec) {
option (google.api.http) = {
put: "/api/v1/applications/{name}/spec"
body: "spec"
};
}
// Delete deletes an application
rpc Delete(DeleteApplicationRequest) returns (ApplicationResponse) {
rpc Delete(ApplicationDeleteRequest) returns (ApplicationResponse) {
option (google.api.http).delete = "/api/v1/applications/{name}";
}
// Sync syncs an application to its target state
rpc Sync(ApplicationSyncRequest) returns (ApplicationSyncResult) {
rpc Sync(ApplicationSyncRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) {
option (google.api.http) = {
post: "/api/v1/applications/{name}/sync"
body: "*"
@@ -123,7 +156,7 @@ service ApplicationService {
}
// Sync syncs an application to its target state
rpc Rollback(ApplicationRollbackRequest) returns (ApplicationSyncResult) {
rpc Rollback(ApplicationRollbackRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Application) {
option (google.api.http) = {
post: "/api/v1/applications/{name}/rollback"
body: "*"
@@ -131,12 +164,12 @@ service ApplicationService {
}
// PodLogs returns stream of log entries for the specified pod. Pod
rpc DeletePod(DeletePodQuery) returns (ApplicationResponse) {
option (google.api.http).delete = "/api/v1/applications/{applicationName}/pods/{podName}";
rpc DeletePod(ApplicationDeletePodRequest) returns (ApplicationResponse) {
option (google.api.http).delete = "/api/v1/applications/{name}/pods/{podName}";
}
// PodLogs returns stream of log entries for the specified pod. Pod
rpc PodLogs(PodLogsQuery) returns (stream LogEntry) {
option (google.api.http).get = "/api/v1/applications/{applicationName}/pods/{podName}/logs";
rpc PodLogs(ApplicationPodLogsQuery) returns (stream LogEntry) {
option (google.api.http).get = "/api/v1/applications/{name}/pods/{podName}/logs";
}
}

View File

@@ -1,226 +1,87 @@
package cluster
import (
"encoding/json"
"fmt"
"hash/fnv"
"net/url"
"strings"
"github.com/argoproj/argo-cd/common"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/util/kube"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
apiv1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/rbac"
)
// Server provides a Cluster service
type Server struct {
ns string
kubeclientset kubernetes.Interface
appclientset appclientset.Interface
db db.ArgoDB
enf *rbac.Enforcer
}
// NewServer returns a new instance of the Cluster service
func NewServer(namespace string, kubeclientset kubernetes.Interface, appclientset appclientset.Interface) *Server {
func NewServer(db db.ArgoDB, enf *rbac.Enforcer) *Server {
return &Server{
ns: namespace,
appclientset: appclientset,
kubeclientset: kubeclientset,
db: db,
enf: enf,
}
}
// ListPods returns application related pods in a cluster
func (s *Server) ListPods(ctx context.Context, q *ClusterQuery) (*apiv1.PodList, error) {
// TODO: filter by the app label
return s.kubeclientset.CoreV1().Pods(s.ns).List(metav1.ListOptions{})
}
// List returns list of clusters
func (s *Server) List(ctx context.Context, q *ClusterQuery) (*appv1.ClusterList, error) {
listOpts := metav1.ListOptions{}
labelSelector := labels.NewSelector()
req, err := labels.NewRequirement(common.LabelKeySecretType, selection.Equals, []string{common.SecretTypeCluster})
if err != nil {
return nil, err
clusterList, err := s.db.ListClusters(ctx)
if clusterList != nil {
newItems := make([]appv1.Cluster, 0)
for _, clust := range clusterList.Items {
if s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "get", fmt.Sprintf("*/%s", clust.Server)) {
newItems = append(newItems, *redact(&clust))
}
}
clusterList.Items = newItems
}
labelSelector = labelSelector.Add(*req)
listOpts.LabelSelector = labelSelector.String()
clusterSecrets, err := s.kubeclientset.CoreV1().Secrets(s.ns).List(listOpts)
if err != nil {
return nil, err
}
clusterList := appv1.ClusterList{
Items: make([]appv1.Cluster, len(clusterSecrets.Items)),
}
for i, clusterSecret := range clusterSecrets.Items {
clusterList.Items[i] = *secretToCluster(&clusterSecret)
}
return &clusterList, nil
return clusterList, err
}
// Create creates a cluster
func (s *Server) Create(ctx context.Context, c *appv1.Cluster) (*appv1.Cluster, error) {
err := kube.TestConfig(c.RESTConfig())
if err != nil {
return nil, err
func (s *Server) Create(ctx context.Context, q *ClusterCreateRequest) (*appv1.Cluster, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "create", fmt.Sprintf("*/%s", q.Cluster.Server)) {
return nil, grpc.ErrPermissionDenied
}
secName := serverToSecretName(c.Server)
clusterSecret := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secName,
Labels: map[string]string{
common.LabelKeySecretType: common.SecretTypeCluster,
},
},
}
clusterSecret.StringData = clusterToStringData(c)
clusterSecret, err = s.kubeclientset.CoreV1().Secrets(s.ns).Create(clusterSecret)
if err != nil {
if apierr.IsAlreadyExists(err) {
return nil, grpc.Errorf(codes.AlreadyExists, "cluster '%s' already exists", c.Server)
}
return nil, err
}
return secretToCluster(clusterSecret), nil
}
// ClusterEvent contains information about cluster event
type ClusterEvent struct {
Type watch.EventType
Cluster *appv1.Cluster
}
// WatchClusters allow watching for cluster events
func (s *Server) WatchClusters(ctx context.Context, callback func(*ClusterEvent)) error {
listOpts := metav1.ListOptions{}
labelSelector := labels.NewSelector()
req, err := labels.NewRequirement(common.LabelKeySecretType, selection.Equals, []string{common.SecretTypeCluster})
if err != nil {
return err
}
labelSelector = labelSelector.Add(*req)
listOpts.LabelSelector = labelSelector.String()
w, err := s.kubeclientset.CoreV1().Secrets(s.ns).Watch(listOpts)
if err != nil {
return err
}
go func() {
<-ctx.Done()
w.Stop()
}()
for next := range w.ResultChan() {
secret := next.Object.(*apiv1.Secret)
cluster := secretToCluster(secret)
callback(&ClusterEvent{
Type: next.Type,
Cluster: cluster,
})
}
return nil
}
func (s *Server) getClusterSecret(server string) (*apiv1.Secret, error) {
secName := serverToSecretName(server)
clusterSecret, err := s.kubeclientset.CoreV1().Secrets(s.ns).Get(secName, metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
return nil, grpc.Errorf(codes.NotFound, "cluster '%s' not found", server)
}
return nil, err
}
return clusterSecret, nil
clust, err := s.db.CreateCluster(ctx, q.Cluster)
return redact(clust), err
}
// Get returns a cluster from a query
func (s *Server) Get(ctx context.Context, q *ClusterQuery) (*appv1.Cluster, error) {
clusterSecret, err := s.getClusterSecret(q.Server)
if err != nil {
return nil, err
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "get", fmt.Sprintf("*/%s", q.Server)) {
return nil, grpc.ErrPermissionDenied
}
return secretToCluster(clusterSecret), nil
clust, err := s.db.GetCluster(ctx, q.Server)
return redact(clust), err
}
// Update updates a cluster
func (s *Server) Update(ctx context.Context, c *appv1.Cluster) (*appv1.Cluster, error) {
err := kube.TestConfig(c.RESTConfig())
if err != nil {
return nil, err
func (s *Server) Update(ctx context.Context, q *ClusterUpdateRequest) (*appv1.Cluster, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "update", fmt.Sprintf("*/%s", q.Cluster.Server)) {
return nil, grpc.ErrPermissionDenied
}
clusterSecret, err := s.getClusterSecret(c.Server)
if err != nil {
return nil, err
}
clusterSecret.StringData = clusterToStringData(c)
clusterSecret, err = s.kubeclientset.CoreV1().Secrets(s.ns).Update(clusterSecret)
if err != nil {
return nil, err
}
return secretToCluster(clusterSecret), nil
}
// UpdateREST updates a cluster (special handler intended to be used only by the gRPC gateway)
func (s *Server) UpdateREST(ctx context.Context, r *ClusterUpdateRequest) (*appv1.Cluster, error) {
return s.Update(ctx, r.Cluster)
clust, err := s.db.UpdateCluster(ctx, q.Cluster)
return redact(clust), err
}
// Delete deletes a cluster by name
func (s *Server) Delete(ctx context.Context, q *ClusterQuery) (*ClusterResponse, error) {
secName := serverToSecretName(q.Server)
err := s.kubeclientset.CoreV1().Secrets(s.ns).Delete(secName, &metav1.DeleteOptions{})
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "delete", fmt.Sprintf("*/%s", q.Server)) {
return nil, grpc.ErrPermissionDenied
}
err := s.db.DeleteCluster(ctx, q.Server)
return &ClusterResponse{}, err
}
// serverToSecretName hashes server address to the secret name using a formula.
// Part of the server address is incorporated for debugging purposes
func serverToSecretName(server string) string {
serverURL, err := url.ParseRequestURI(server)
if err != nil {
panic(err)
func redact(clust *appv1.Cluster) *appv1.Cluster {
if clust == nil {
return nil
}
h := fnv.New32a()
_, _ = h.Write([]byte(server))
host := strings.ToLower(strings.Split(serverURL.Host, ":")[0])
return fmt.Sprintf("cluster-%s-%v", host, h.Sum32())
}
// clusterToStringData converts a cluster object to string data for serialization to a secret
func clusterToStringData(c *appv1.Cluster) map[string]string {
stringData := make(map[string]string)
stringData["server"] = c.Server
if c.Name == "" {
stringData["name"] = c.Server
} else {
stringData["name"] = c.Name
}
configBytes, err := json.Marshal(c.Config)
if err != nil {
panic(err)
}
stringData["config"] = string(configBytes)
return stringData
}
// secretToRepo converts a secret into a repository object
func secretToCluster(s *apiv1.Secret) *appv1.Cluster {
var config appv1.ClusterConfig
err := json.Unmarshal(s.Data["config"], &config)
if err != nil {
panic(err)
}
cluster := appv1.Cluster{
Server: string(s.Data["server"]),
Name: string(s.Data["name"]),
Config: config,
}
return &cluster
clust.Config.Password = ""
clust.Config.BearerToken = ""
clust.Config.TLSClientConfig.KeyData = nil
return clust
}

View File

@@ -14,6 +14,7 @@
It has these top-level messages:
ClusterQuery
ClusterResponse
ClusterCreateRequest
ClusterUpdateRequest
*/
package cluster
@@ -23,7 +24,7 @@ import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import k8s_io_api_core_v1 "k8s.io/api/core/v1"
import _ "k8s.io/api/core/v1"
import github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import context "golang.org/x/net/context"
@@ -67,22 +68,30 @@ func (m *ClusterResponse) String() string { return proto.CompactTextS
func (*ClusterResponse) ProtoMessage() {}
func (*ClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{1} }
type ClusterCreateRequest struct {
Cluster *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
}
func (m *ClusterCreateRequest) Reset() { *m = ClusterCreateRequest{} }
func (m *ClusterCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterCreateRequest) ProtoMessage() {}
func (*ClusterCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{2} }
func (m *ClusterCreateRequest) GetCluster() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster {
if m != nil {
return m.Cluster
}
return nil
}
type ClusterUpdateRequest struct {
Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"`
Cluster *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"`
Cluster *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
}
func (m *ClusterUpdateRequest) Reset() { *m = ClusterUpdateRequest{} }
func (m *ClusterUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterUpdateRequest) ProtoMessage() {}
func (*ClusterUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{2} }
func (m *ClusterUpdateRequest) GetServer() string {
if m != nil {
return m.Server
}
return ""
}
func (*ClusterUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{3} }
func (m *ClusterUpdateRequest) GetCluster() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster {
if m != nil {
@@ -94,6 +103,7 @@ func (m *ClusterUpdateRequest) GetCluster() *github_com_argoproj_argo_cd_pkg_api
func init() {
proto.RegisterType((*ClusterQuery)(nil), "cluster.ClusterQuery")
proto.RegisterType((*ClusterResponse)(nil), "cluster.ClusterResponse")
proto.RegisterType((*ClusterCreateRequest)(nil), "cluster.ClusterCreateRequest")
proto.RegisterType((*ClusterUpdateRequest)(nil), "cluster.ClusterUpdateRequest")
}
@@ -111,17 +121,13 @@ type ClusterServiceClient interface {
// List returns list of clusters
List(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ClusterList, error)
// Create creates a cluster
Create(ctx context.Context, in *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Create(ctx context.Context, in *ClusterCreateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Get returns a cluster by server address
Get(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Update updates a cluster
Update(ctx context.Context, in *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Update updates a cluster (special handler intended to be used only by the gRPC gateway)
UpdateREST(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Update(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Delete updates a cluster
Delete(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*ClusterResponse, error)
// ListPods returns pods in a cluster
ListPods(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*k8s_io_api_core_v1.PodList, error)
}
type clusterServiceClient struct {
@@ -141,7 +147,7 @@ func (c *clusterServiceClient) List(ctx context.Context, in *ClusterQuery, opts
return out, nil
}
func (c *clusterServiceClient) Create(ctx context.Context, in *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
func (c *clusterServiceClient) Create(ctx context.Context, in *ClusterCreateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
err := grpc.Invoke(ctx, "/cluster.ClusterService/Create", in, out, c.cc, opts...)
if err != nil {
@@ -159,7 +165,7 @@ func (c *clusterServiceClient) Get(ctx context.Context, in *ClusterQuery, opts .
return out, nil
}
func (c *clusterServiceClient) Update(ctx context.Context, in *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
func (c *clusterServiceClient) Update(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
err := grpc.Invoke(ctx, "/cluster.ClusterService/Update", in, out, c.cc, opts...)
if err != nil {
@@ -168,15 +174,6 @@ func (c *clusterServiceClient) Update(ctx context.Context, in *github_com_argopr
return out, nil
}
func (c *clusterServiceClient) UpdateREST(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
err := grpc.Invoke(ctx, "/cluster.ClusterService/UpdateREST", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterServiceClient) Delete(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*ClusterResponse, error) {
out := new(ClusterResponse)
err := grpc.Invoke(ctx, "/cluster.ClusterService/Delete", in, out, c.cc, opts...)
@@ -186,32 +183,19 @@ func (c *clusterServiceClient) Delete(ctx context.Context, in *ClusterQuery, opt
return out, nil
}
func (c *clusterServiceClient) ListPods(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*k8s_io_api_core_v1.PodList, error) {
out := new(k8s_io_api_core_v1.PodList)
err := grpc.Invoke(ctx, "/cluster.ClusterService/ListPods", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ClusterService service
type ClusterServiceServer interface {
// List returns list of clusters
List(context.Context, *ClusterQuery) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ClusterList, error)
// Create creates a cluster
Create(context.Context, *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Create(context.Context, *ClusterCreateRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Get returns a cluster by server address
Get(context.Context, *ClusterQuery) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Update updates a cluster
Update(context.Context, *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Update updates a cluster (special handler intended to be used only by the gRPC gateway)
UpdateREST(context.Context, *ClusterUpdateRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Update(context.Context, *ClusterUpdateRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
// Delete updates a cluster
Delete(context.Context, *ClusterQuery) (*ClusterResponse, error)
// ListPods returns pods in a cluster
ListPods(context.Context, *ClusterQuery) (*k8s_io_api_core_v1.PodList, error)
}
func RegisterClusterServiceServer(s *grpc.Server, srv ClusterServiceServer) {
@@ -237,7 +221,7 @@ func _ClusterService_List_Handler(srv interface{}, ctx context.Context, dec func
}
func _ClusterService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
in := new(ClusterCreateRequest)
if err := dec(in); err != nil {
return nil, err
}
@@ -249,7 +233,7 @@ func _ClusterService_Create_Handler(srv interface{}, ctx context.Context, dec fu
FullMethod: "/cluster.ClusterService/Create",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterServiceServer).Create(ctx, req.(*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster))
return srv.(ClusterServiceServer).Create(ctx, req.(*ClusterCreateRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -273,7 +257,7 @@ func _ClusterService_Get_Handler(srv interface{}, ctx context.Context, dec func(
}
func _ClusterService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
in := new(ClusterUpdateRequest)
if err := dec(in); err != nil {
return nil, err
}
@@ -285,25 +269,7 @@ func _ClusterService_Update_Handler(srv interface{}, ctx context.Context, dec fu
FullMethod: "/cluster.ClusterService/Update",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterServiceServer).Update(ctx, req.(*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster))
}
return interceptor(ctx, in, info, handler)
}
func _ClusterService_UpdateREST_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ClusterUpdateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterServiceServer).UpdateREST(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cluster.ClusterService/UpdateREST",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterServiceServer).UpdateREST(ctx, req.(*ClusterUpdateRequest))
return srv.(ClusterServiceServer).Update(ctx, req.(*ClusterUpdateRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -326,24 +292,6 @@ func _ClusterService_Delete_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
func _ClusterService_ListPods_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ClusterQuery)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterServiceServer).ListPods(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cluster.ClusterService/ListPods",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterServiceServer).ListPods(ctx, req.(*ClusterQuery))
}
return interceptor(ctx, in, info, handler)
}
var _ClusterService_serviceDesc = grpc.ServiceDesc{
ServiceName: "cluster.ClusterService",
HandlerType: (*ClusterServiceServer)(nil),
@@ -364,18 +312,10 @@ var _ClusterService_serviceDesc = grpc.ServiceDesc{
MethodName: "Update",
Handler: _ClusterService_Update_Handler,
},
{
MethodName: "UpdateREST",
Handler: _ClusterService_UpdateREST_Handler,
},
{
MethodName: "Delete",
Handler: _ClusterService_Delete_Handler,
},
{
MethodName: "ListPods",
Handler: _ClusterService_ListPods_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "server/cluster/cluster.proto",
@@ -423,6 +363,34 @@ func (m *ClusterResponse) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func (m *ClusterCreateRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ClusterCreateRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Cluster != nil {
dAtA[i] = 0xa
i++
i = encodeVarintCluster(dAtA, i, uint64(m.Cluster.Size()))
n1, err := m.Cluster.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
return i, nil
}
func (m *ClusterUpdateRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -438,21 +406,15 @@ func (m *ClusterUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.Server) > 0 {
if m.Cluster != nil {
dAtA[i] = 0xa
i++
i = encodeVarintCluster(dAtA, i, uint64(len(m.Server)))
i += copy(dAtA[i:], m.Server)
}
if m.Cluster != nil {
dAtA[i] = 0x12
i++
i = encodeVarintCluster(dAtA, i, uint64(m.Cluster.Size()))
n1, err := m.Cluster.MarshalTo(dAtA[i:])
n2, err := m.Cluster.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
i += n2
}
return i, nil
}
@@ -482,13 +444,19 @@ func (m *ClusterResponse) Size() (n int) {
return n
}
func (m *ClusterCreateRequest) Size() (n int) {
var l int
_ = l
if m.Cluster != nil {
l = m.Cluster.Size()
n += 1 + l + sovCluster(uint64(l))
}
return n
}
func (m *ClusterUpdateRequest) Size() (n int) {
var l int
_ = l
l = len(m.Server)
if l > 0 {
n += 1 + l + sovCluster(uint64(l))
}
if m.Cluster != nil {
l = m.Cluster.Size()
n += 1 + l + sovCluster(uint64(l))
@@ -638,6 +606,89 @@ func (m *ClusterResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *ClusterCreateRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCluster
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ClusterCreateRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ClusterCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCluster
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCluster
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Cluster == nil {
m.Cluster = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster{}
}
if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCluster(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthCluster
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ClusterUpdateRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -668,35 +719,6 @@ func (m *ClusterUpdateRequest) Unmarshal(dAtA []byte) error {
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCluster
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCluster
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Server = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType)
}
@@ -858,37 +880,34 @@ var (
func init() { proto.RegisterFile("server/cluster/cluster.proto", fileDescriptorCluster) }
var fileDescriptorCluster = []byte{
// 510 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0x4f, 0x6b, 0x14, 0x31,
0x14, 0x37, 0x55, 0xa6, 0x1a, 0xc5, 0x3f, 0xa1, 0x95, 0x75, 0xb6, 0x2e, 0xeb, 0x20, 0xa5, 0x2c,
0x98, 0xb0, 0xf5, 0x52, 0x7a, 0x6c, 0xfd, 0x83, 0xe0, 0xa1, 0x6e, 0xf5, 0x22, 0x05, 0x49, 0x67,
0x9e, 0xd3, 0x71, 0xc7, 0x49, 0x4c, 0xb2, 0x03, 0x22, 0x22, 0xe8, 0xd5, 0x93, 0x1e, 0xfd, 0x0c,
0x7e, 0x07, 0x8f, 0x1e, 0x05, 0xbf, 0x80, 0x2c, 0x7e, 0x10, 0x99, 0x4c, 0x62, 0xdb, 0x2d, 0xb3,
0x17, 0xe7, 0xe0, 0x69, 0x92, 0x97, 0xcc, 0xef, 0xf7, 0x7e, 0xef, 0xfd, 0xf2, 0xf0, 0x8a, 0x06,
0x55, 0x82, 0x62, 0x71, 0x3e, 0xd1, 0xe6, 0xf0, 0x4b, 0xa5, 0x12, 0x46, 0x90, 0x45, 0xb7, 0x0d,
0x97, 0x52, 0x91, 0x0a, 0x1b, 0x63, 0xd5, 0xaa, 0x3e, 0x0e, 0x57, 0x52, 0x21, 0xd2, 0x1c, 0x18,
0x97, 0x19, 0xe3, 0x45, 0x21, 0x0c, 0x37, 0x99, 0x28, 0xb4, 0x3b, 0x8d, 0xc6, 0x1b, 0x9a, 0x66,
0xc2, 0x9e, 0xc6, 0x42, 0x01, 0x2b, 0x87, 0x2c, 0x85, 0x02, 0x14, 0x37, 0x90, 0xb8, 0x3b, 0x0f,
0xd2, 0xcc, 0x1c, 0x4c, 0xf6, 0x69, 0x2c, 0x5e, 0x32, 0xae, 0x2c, 0xc5, 0x0b, 0xbb, 0xb8, 0x15,
0x27, 0x4c, 0x8e, 0xd3, 0xea, 0x67, 0xcd, 0xb8, 0x94, 0x79, 0x16, 0x5b, 0x70, 0x56, 0x0e, 0x79,
0x2e, 0x0f, 0xf8, 0x09, 0xa8, 0x68, 0x15, 0x5f, 0xd8, 0xae, 0xb3, 0x7d, 0x34, 0x01, 0xf5, 0x9a,
0x5c, 0xc5, 0x41, 0xad, 0xad, 0x83, 0xfa, 0x68, 0xed, 0xdc, 0xc8, 0xed, 0xa2, 0x2b, 0xf8, 0x92,
0xbb, 0x37, 0x02, 0x2d, 0x45, 0xa1, 0x21, 0xfa, 0x88, 0xf0, 0x92, 0x8b, 0x3d, 0x91, 0x09, 0x37,
0x30, 0x82, 0x57, 0x13, 0xd0, 0xa6, 0x09, 0x83, 0xec, 0x61, 0x5f, 0x99, 0xce, 0x42, 0x1f, 0xad,
0x9d, 0x5f, 0xdf, 0xa2, 0x87, 0x42, 0xa8, 0x17, 0x62, 0x17, 0xcf, 0xe2, 0x84, 0xca, 0x71, 0x4a,
0x2b, 0x21, 0xf4, 0x88, 0x10, 0xea, 0x85, 0x50, 0x9f, 0x8d, 0x87, 0x5c, 0xff, 0xb6, 0x88, 0x2f,
0xba, 0xe0, 0x2e, 0xa8, 0x32, 0x8b, 0x81, 0xbc, 0xc3, 0x67, 0x1e, 0x66, 0xda, 0x90, 0x65, 0xea,
0x1b, 0x74, 0x54, 0x6b, 0x78, 0xef, 0xdf, 0xe9, 0x2b, 0xf8, 0xa8, 0xf3, 0xfe, 0xe7, 0xef, 0xcf,
0x0b, 0x84, 0x5c, 0xb6, 0x4d, 0x2b, 0x87, 0xde, 0x0e, 0x9a, 0x7c, 0x45, 0x38, 0xd8, 0x56, 0xc0,
0x0d, 0x90, 0x16, 0xb4, 0x86, 0x2d, 0x60, 0x44, 0x5d, 0x9b, 0xec, 0x72, 0x74, 0x22, 0xd9, 0x4d,
0x34, 0x20, 0x1f, 0x10, 0x3e, 0x7d, 0x1f, 0x1a, 0x0b, 0xd6, 0x06, 0xff, 0x0d, 0xcb, 0xdf, 0x25,
0xd7, 0x66, 0xf9, 0xd9, 0x9b, 0xda, 0x26, 0x6f, 0xc9, 0x27, 0x84, 0x83, 0xda, 0x51, 0xff, 0x4d,
0xd5, 0x4e, 0x91, 0x2f, 0x08, 0x63, 0x67, 0xf3, 0xbb, 0xbb, 0x8f, 0xc9, 0xf5, 0xd9, 0x0a, 0x1d,
0x7b, 0x02, 0xad, 0x70, 0x0e, 0x6c, 0xa5, 0x6e, 0x86, 0xcd, 0x95, 0xda, 0xf4, 0xe6, 0x27, 0x7b,
0x38, 0xb8, 0x03, 0x39, 0x18, 0x68, 0x6a, 0x5d, 0x67, 0x36, 0xfc, 0xf7, 0x19, 0xbb, 0x86, 0x0c,
0xe6, 0x34, 0xe4, 0x39, 0x3e, 0x5b, 0x19, 0x7d, 0x47, 0x24, 0xba, 0x09, 0xbf, 0x4b, 0xeb, 0xb9,
0x55, 0xe9, 0xa2, 0xd5, 0xdc, 0xa2, 0xe5, 0x90, 0xee, 0x88, 0xc4, 0x3e, 0x90, 0x55, 0x4b, 0xd1,
0x27, 0xbd, 0x46, 0x0a, 0x26, 0x45, 0xa2, 0xb7, 0x36, 0xbe, 0x4f, 0x7b, 0xe8, 0xc7, 0xb4, 0x87,
0x7e, 0x4d, 0x7b, 0xe8, 0xe9, 0x60, 0xde, 0x94, 0x3b, 0x3e, 0x80, 0xf7, 0x03, 0x3b, 0xcd, 0x6e,
0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x25, 0x28, 0x85, 0xfb, 0x99, 0x05, 0x00, 0x00,
// 454 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcd, 0xaa, 0x13, 0x31,
0x14, 0xc7, 0x89, 0x1f, 0x23, 0x46, 0xf1, 0x23, 0x5c, 0xa5, 0x8e, 0xd7, 0xe2, 0x9d, 0x85, 0xc8,
0x45, 0x13, 0x5a, 0x37, 0x17, 0x97, 0xf7, 0x8a, 0x22, 0xb8, 0xb1, 0xe2, 0x46, 0x0a, 0x92, 0xce,
0x1c, 0xd2, 0xb1, 0xe3, 0x24, 0x26, 0x99, 0x01, 0x11, 0x11, 0x74, 0xef, 0xc6, 0x07, 0x70, 0xeb,
0xa3, 0xb8, 0x14, 0x7c, 0x01, 0x29, 0x3e, 0x88, 0x4c, 0x26, 0xb1, 0x1f, 0x43, 0xdd, 0x58, 0xee,
0xaa, 0xc9, 0x49, 0x7a, 0xfe, 0xbf, 0xf3, 0xcf, 0x99, 0x83, 0x77, 0x0d, 0xe8, 0x1a, 0x34, 0x4b,
0x8b, 0xca, 0xd8, 0xc5, 0x2f, 0x55, 0x5a, 0x5a, 0x49, 0xce, 0xf8, 0x6d, 0xbc, 0x23, 0xa4, 0x90,
0x2e, 0xc6, 0x9a, 0x55, 0x7b, 0x1c, 0xef, 0x0a, 0x29, 0x45, 0x01, 0x8c, 0xab, 0x9c, 0xf1, 0xb2,
0x94, 0x96, 0xdb, 0x5c, 0x96, 0xc6, 0x9f, 0x26, 0xb3, 0x03, 0x43, 0x73, 0xe9, 0x4e, 0x53, 0xa9,
0x81, 0xd5, 0x03, 0x26, 0xa0, 0x04, 0xcd, 0x2d, 0x64, 0xfe, 0xce, 0x63, 0x91, 0xdb, 0x69, 0x35,
0xa1, 0xa9, 0x7c, 0xcd, 0xb8, 0x76, 0x12, 0xaf, 0xdc, 0xe2, 0x6e, 0x9a, 0x31, 0x35, 0x13, 0xcd,
0x9f, 0x0d, 0xe3, 0x4a, 0x15, 0x79, 0xea, 0x92, 0xb3, 0x7a, 0xc0, 0x0b, 0x35, 0xe5, 0x9d, 0x54,
0xc9, 0x2d, 0x7c, 0xfe, 0xa8, 0xa5, 0x7d, 0x5a, 0x81, 0x7e, 0x4b, 0xae, 0xe2, 0xa8, 0xad, 0xad,
0x87, 0x6e, 0xa2, 0xdb, 0x67, 0x47, 0x7e, 0x97, 0x5c, 0xc6, 0x17, 0xfd, 0xbd, 0x11, 0x18, 0x25,
0x4b, 0x03, 0x89, 0xc5, 0x3b, 0x3e, 0x74, 0xa4, 0x81, 0x5b, 0x18, 0xc1, 0x9b, 0x0a, 0x8c, 0x25,
0x63, 0x1c, 0x0c, 0x70, 0x39, 0xce, 0x0d, 0x0f, 0xe9, 0x82, 0x97, 0x06, 0x5e, 0xb7, 0x78, 0x99,
0x66, 0x54, 0xcd, 0x04, 0x6d, 0x78, 0xe9, 0x12, 0x2f, 0x0d, 0xbc, 0x34, 0x88, 0x86, 0x94, 0x4b,
0xaa, 0xcf, 0x55, 0x76, 0x5c, 0xaa, 0xc3, 0x6f, 0xa7, 0xf1, 0x05, 0x1f, 0x7c, 0x06, 0xba, 0xce,
0x53, 0x20, 0x1f, 0xf0, 0xa9, 0x27, 0xb9, 0xb1, 0xe4, 0x0a, 0x0d, 0xaf, 0xbf, 0x6c, 0x64, 0xfc,
0xf0, 0xff, 0xe5, 0x9b, 0xf4, 0x49, 0xef, 0xe3, 0xcf, 0xdf, 0x5f, 0x4e, 0x10, 0x72, 0xc9, 0x75,
0x44, 0x3d, 0x08, 0xbd, 0x66, 0xc8, 0x67, 0x84, 0xa3, 0xd6, 0x79, 0x72, 0x63, 0x9d, 0x61, 0xe5,
0x45, 0xe2, 0x2d, 0x58, 0x91, 0xec, 0x39, 0x8e, 0xeb, 0x49, 0x87, 0xe3, 0x7e, 0x30, 0x89, 0x7c,
0x42, 0xf8, 0xe4, 0x23, 0xd8, 0xe8, 0xc8, 0x16, 0x29, 0xc8, 0xb5, 0x75, 0x0a, 0xf6, 0xae, 0x6d,
0xd4, 0xf7, 0xe4, 0x2b, 0xc2, 0x51, 0xdb, 0x1a, 0x5d, 0x5b, 0x56, 0x5a, 0x66, 0x2b, 0x40, 0x43,
0x07, 0x74, 0x27, 0xde, 0xeb, 0x02, 0x05, 0x6d, 0x0f, 0xb6, 0xf0, 0x69, 0x8c, 0xa3, 0x07, 0x50,
0x80, 0x85, 0x4d, 0x4e, 0xf5, 0xd6, 0xc3, 0x7f, 0xbf, 0x39, 0x5f, 0xff, 0xfe, 0xe6, 0xfa, 0x0f,
0x0f, 0xbe, 0xcf, 0xfb, 0xe8, 0xc7, 0xbc, 0x8f, 0x7e, 0xcd, 0xfb, 0xe8, 0xc5, 0xfe, 0xbf, 0x46,
0xc5, 0xea, 0x14, 0x9b, 0x44, 0x6e, 0x24, 0xdc, 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x32,
0x83, 0x14, 0xde, 0x04, 0x00, 0x00,
}

View File

@@ -12,7 +12,6 @@ import (
"io"
"net/http"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
@@ -47,10 +46,10 @@ func request_ClusterService_List_0(ctx context.Context, marshaler runtime.Marsha
}
func request_ClusterService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v1alpha1.Cluster
var protoReq ClusterCreateRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Cluster); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
@@ -86,7 +85,7 @@ func request_ClusterService_Get_0(ctx context.Context, marshaler runtime.Marshal
}
func request_ClusterService_UpdateREST_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
func request_ClusterService_Update_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ClusterUpdateRequest
var metadata runtime.ServerMetadata
@@ -101,18 +100,18 @@ func request_ClusterService_UpdateREST_0(ctx context.Context, marshaler runtime.
_ = err
)
val, ok = pathParams["server"]
val, ok = pathParams["cluster.server"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "server")
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "cluster.server")
}
protoReq.Server, err = runtime.String(val)
err = runtime.PopulateFieldFromPath(&protoReq, "cluster.server", val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "server", err)
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "cluster.server", err)
}
msg, err := client.UpdateREST(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
msg, err := client.Update(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
@@ -144,33 +143,6 @@ func request_ClusterService_Delete_0(ctx context.Context, marshaler runtime.Mars
}
func request_ClusterService_ListPods_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ClusterQuery
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["server"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "server")
}
protoReq.Server, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "server", err)
}
msg, err := client.ListPods(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterClusterServiceHandlerFromEndpoint is same as RegisterClusterServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterClusterServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
@@ -296,7 +268,7 @@ func RegisterClusterServiceHandlerClient(ctx context.Context, mux *runtime.Serve
})
mux.Handle("PUT", pattern_ClusterService_UpdateREST_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("PUT", pattern_ClusterService_Update_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
@@ -314,14 +286,14 @@ func RegisterClusterServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ClusterService_UpdateREST_0(rctx, inboundMarshaler, client, req, pathParams)
resp, md, err := request_ClusterService_Update_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ClusterService_UpdateREST_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_ClusterService_Update_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -354,35 +326,6 @@ func RegisterClusterServiceHandlerClient(ctx context.Context, mux *runtime.Serve
})
mux.Handle("GET", pattern_ClusterService_ListPods_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ClusterService_ListPods_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ClusterService_ListPods_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -393,11 +336,9 @@ var (
pattern_ClusterService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "clusters", "server"}, ""))
pattern_ClusterService_UpdateREST_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "clusters", "server"}, ""))
pattern_ClusterService_Update_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "clusters", "cluster.server"}, ""))
pattern_ClusterService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "clusters", "server"}, ""))
pattern_ClusterService_ListPods_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "clusters", "server", "pods"}, ""))
)
var (
@@ -407,9 +348,7 @@ var (
forward_ClusterService_Get_0 = runtime.ForwardResponseMessage
forward_ClusterService_UpdateREST_0 = runtime.ForwardResponseMessage
forward_ClusterService_Update_0 = runtime.ForwardResponseMessage
forward_ClusterService_Delete_0 = runtime.ForwardResponseMessage
forward_ClusterService_ListPods_0 = runtime.ForwardResponseMessage
)

View File

@@ -19,9 +19,12 @@ message ClusterQuery {
message ClusterResponse {}
message ClusterCreateRequest {
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster cluster = 1;
}
message ClusterUpdateRequest {
string server = 1;
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster cluster = 2;
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster cluster = 1;
}
// ClusterService
@@ -33,26 +36,22 @@ service ClusterService {
}
// Create creates a cluster
rpc Create(github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) {
rpc Create(ClusterCreateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) {
option (google.api.http) = {
post: "/api/v1/clusters"
body: "*"
body: "cluster"
};
}
// Get returns a cluster by server address
rpc Get(ClusterQuery) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) {
// Get returns a cluster by server address
rpc Get(ClusterQuery) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) {
option (google.api.http).get = "/api/v1/clusters/{server}";
}
// Update updates a cluster
rpc Update(github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) {
}
// Update updates a cluster (special handler intended to be used only by the gRPC gateway)
rpc UpdateREST(ClusterUpdateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) {
rpc Update(ClusterUpdateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Cluster) {
option (google.api.http) = {
put: "/api/v1/clusters/{server}"
put: "/api/v1/clusters/{cluster.server}"
body: "cluster"
};
}
@@ -62,9 +61,4 @@ service ClusterService {
option (google.api.http).delete = "/api/v1/clusters/{server}";
}
// ListPods returns pods in a cluster
rpc ListPods(ClusterQuery) returns (k8s.io.api.core.v1.PodList) {
option (google.api.http).get = "/api/v1/clusters/{server}/pods";
}
}

View File

@@ -4,7 +4,6 @@ package mocks
import cluster "github.com/argoproj/argo-cd/server/cluster"
import context "context"
import mock "github.com/stretchr/testify/mock"
import v1 "k8s.io/api/core/v1"
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
// ClusterServiceServer is an autogenerated mock type for the ClusterServiceServer type
@@ -13,11 +12,11 @@ type ClusterServiceServer struct {
}
// Create provides a mock function with given fields: _a0, _a1
func (_m *ClusterServiceServer) Create(_a0 context.Context, _a1 *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
func (_m *ClusterServiceServer) Create(_a0 context.Context, _a1 *cluster.ClusterCreateRequest) (*v1alpha1.Cluster, error) {
ret := _m.Called(_a0, _a1)
var r0 *v1alpha1.Cluster
if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Cluster) *v1alpha1.Cluster); ok {
if rf, ok := ret.Get(0).(func(context.Context, *cluster.ClusterCreateRequest) *v1alpha1.Cluster); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -26,7 +25,7 @@ func (_m *ClusterServiceServer) Create(_a0 context.Context, _a1 *v1alpha1.Cluste
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1alpha1.Cluster) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, *cluster.ClusterCreateRequest) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -104,54 +103,8 @@ func (_m *ClusterServiceServer) List(_a0 context.Context, _a1 *cluster.ClusterQu
return r0, r1
}
// ListPods provides a mock function with given fields: _a0, _a1
func (_m *ClusterServiceServer) ListPods(_a0 context.Context, _a1 *cluster.ClusterQuery) (*v1.PodList, error) {
ret := _m.Called(_a0, _a1)
var r0 *v1.PodList
if rf, ok := ret.Get(0).(func(context.Context, *cluster.ClusterQuery) *v1.PodList); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.PodList)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *cluster.ClusterQuery) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Update provides a mock function with given fields: _a0, _a1
func (_m *ClusterServiceServer) Update(_a0 context.Context, _a1 *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
ret := _m.Called(_a0, _a1)
var r0 *v1alpha1.Cluster
if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Cluster) *v1alpha1.Cluster); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.Cluster)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1alpha1.Cluster) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateREST provides a mock function with given fields: _a0, _a1
func (_m *ClusterServiceServer) UpdateREST(_a0 context.Context, _a1 *cluster.ClusterUpdateRequest) (*v1alpha1.Cluster, error) {
func (_m *ClusterServiceServer) Update(_a0 context.Context, _a1 *cluster.ClusterUpdateRequest) (*v1alpha1.Cluster, error) {
ret := _m.Called(_a0, _a1)
var r0 *v1alpha1.Cluster

View File

@@ -12,11 +12,11 @@ type RepositoryServiceServer struct {
}
// Create provides a mock function with given fields: _a0, _a1
func (_m *RepositoryServiceServer) Create(_a0 context.Context, _a1 *v1alpha1.Repository) (*v1alpha1.Repository, error) {
func (_m *RepositoryServiceServer) Create(_a0 context.Context, _a1 *repository.RepoCreateRequest) (*v1alpha1.Repository, error) {
ret := _m.Called(_a0, _a1)
var r0 *v1alpha1.Repository
if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Repository) *v1alpha1.Repository); ok {
if rf, ok := ret.Get(0).(func(context.Context, *repository.RepoCreateRequest) *v1alpha1.Repository); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -25,7 +25,7 @@ func (_m *RepositoryServiceServer) Create(_a0 context.Context, _a1 *v1alpha1.Rep
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1alpha1.Repository) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, *repository.RepoCreateRequest) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -103,21 +103,21 @@ func (_m *RepositoryServiceServer) List(_a0 context.Context, _a1 *repository.Rep
return r0, r1
}
// Update provides a mock function with given fields: _a0, _a1
func (_m *RepositoryServiceServer) Update(_a0 context.Context, _a1 *v1alpha1.Repository) (*v1alpha1.Repository, error) {
// ListKsonnetApps provides a mock function with given fields: _a0, _a1
func (_m *RepositoryServiceServer) ListKsonnetApps(_a0 context.Context, _a1 *repository.RepoKsonnetQuery) (*repository.RepoKsonnetResponse, error) {
ret := _m.Called(_a0, _a1)
var r0 *v1alpha1.Repository
if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Repository) *v1alpha1.Repository); ok {
var r0 *repository.RepoKsonnetResponse
if rf, ok := ret.Get(0).(func(context.Context, *repository.RepoKsonnetQuery) *repository.RepoKsonnetResponse); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.Repository)
r0 = ret.Get(0).(*repository.RepoKsonnetResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1alpha1.Repository) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, *repository.RepoKsonnetQuery) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -126,8 +126,8 @@ func (_m *RepositoryServiceServer) Update(_a0 context.Context, _a1 *v1alpha1.Rep
return r0, r1
}
// UpdateREST provides a mock function with given fields: _a0, _a1
func (_m *RepositoryServiceServer) UpdateREST(_a0 context.Context, _a1 *repository.RepoUpdateRequest) (*v1alpha1.Repository, error) {
// Update provides a mock function with given fields: _a0, _a1
func (_m *RepositoryServiceServer) Update(_a0 context.Context, _a1 *repository.RepoUpdateRequest) (*v1alpha1.Repository, error) {
ret := _m.Called(_a0, _a1)
var r0 *v1alpha1.Repository

View File

@@ -2,169 +2,152 @@ package repository
import (
"fmt"
"hash/fnv"
"strings"
"github.com/argoproj/argo-cd/common"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/util/git"
"github.com/ghodss/yaml"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
apiv1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/kubernetes"
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/rbac"
)
// Server provides a Repository service
type Server struct {
ns string
kubeclientset kubernetes.Interface
appclientset appclientset.Interface
db db.ArgoDB
repoClientset reposerver.Clientset
enf *rbac.Enforcer
}
// NewServer returns a new instance of the Repository service
func NewServer(namespace string, kubeclientset kubernetes.Interface, appclientset appclientset.Interface) *Server {
func NewServer(
repoClientset reposerver.Clientset,
db db.ArgoDB,
enf *rbac.Enforcer,
) *Server {
return &Server{
ns: namespace,
appclientset: appclientset,
kubeclientset: kubeclientset,
db: db,
repoClientset: repoClientset,
enf: enf,
}
}
// List returns list of repositories
func (s *Server) List(ctx context.Context, q *RepoQuery) (*appsv1.RepositoryList, error) {
listOpts := metav1.ListOptions{}
labelSelector := labels.NewSelector()
req, err := labels.NewRequirement(common.LabelKeySecretType, selection.Equals, []string{common.SecretTypeRepository})
repoList, err := s.db.ListRepositories(ctx)
if repoList != nil {
newItems := make([]appsv1.Repository, 0)
for _, repo := range repoList.Items {
if s.enf.EnforceClaims(ctx.Value("claims"), "repositories", "get", fmt.Sprintf("*/%s", repo.Repo)) {
newItems = append(newItems, *redact(&repo))
}
}
repoList.Items = newItems
}
return repoList, err
}
// ListKsonnetApps returns list of Ksonnet apps in the repo
func (s *Server) ListKsonnetApps(ctx context.Context, q *RepoKsonnetQuery) (*RepoKsonnetResponse, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories/apps", "get", fmt.Sprintf("*/%s", q.Repo)) {
return nil, grpc.ErrPermissionDenied
}
repo, err := s.db.GetRepository(ctx, q.Repo)
if err != nil {
return nil, err
}
labelSelector = labelSelector.Add(*req)
listOpts.LabelSelector = labelSelector.String()
repoSecrets, err := s.kubeclientset.CoreV1().Secrets(s.ns).List(listOpts)
// Test the repo
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
if err != nil {
return nil, err
}
repoList := appsv1.RepositoryList{
Items: make([]appsv1.Repository, len(repoSecrets.Items)),
defer util.Close(conn)
revision := q.Revision
if revision == "" {
revision = "HEAD"
}
for i, repoSec := range repoSecrets.Items {
repoList.Items[i] = *secretToRepo(&repoSec)
// Verify app.yaml is functional
req := repository.ListDirRequest{
Repo: repo,
Revision: revision,
Path: "*app.yaml",
}
return &repoList, nil
getRes, err := repoClient.ListDir(ctx, &req)
if err != nil {
return nil, err
}
out := make([]*KsonnetAppSpec, 0)
for _, path := range getRes.Items {
getFileRes, err := repoClient.GetFile(ctx, &repository.GetFileRequest{
Repo: repo,
Revision: revision,
Path: path,
})
if err != nil {
return nil, err
}
var appSpec KsonnetAppSpec
appSpec.Path = path
err = yaml.Unmarshal(getFileRes.Data, &appSpec)
if err == nil && appSpec.Name != "" && len(appSpec.Environments) > 0 {
out = append(out, &appSpec)
}
}
return &RepoKsonnetResponse{
Items: out,
}, nil
}
// Create creates a repository
func (s *Server) Create(ctx context.Context, r *appsv1.Repository) (*appsv1.Repository, error) {
shallowCopy := *r
r = &shallowCopy
r.Repo = git.NormalizeGitURL(r.Repo)
err := git.TestRepo(r.Repo, r.Username, r.Password, r.SSHPrivateKey)
if err != nil {
return nil, err
func (s *Server) Create(ctx context.Context, q *RepoCreateRequest) (*appsv1.Repository, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories", "create", fmt.Sprintf("*/%s", q.Repo.Repo)) {
return nil, grpc.ErrPermissionDenied
}
secName := repoURLToSecretName(r.Repo)
repoSecret := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secName,
Labels: map[string]string{
common.LabelKeySecretType: common.SecretTypeRepository,
},
},
}
repoSecret.StringData = repoToStringData(r)
repoSecret, err = s.kubeclientset.CoreV1().Secrets(s.ns).Create(repoSecret)
if err != nil {
if apierr.IsAlreadyExists(err) {
return nil, grpc.Errorf(codes.AlreadyExists, "repository '%s' already exists", r.Repo)
}
return nil, err
}
return secretToRepo(repoSecret), nil
}
func (s *Server) getRepoSecret(repo string) (*apiv1.Secret, error) {
secName := repoURLToSecretName(repo)
repoSecret, err := s.kubeclientset.CoreV1().Secrets(s.ns).Get(secName, metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
return nil, grpc.Errorf(codes.NotFound, "repo '%s' not found", repo)
}
return nil, err
}
return repoSecret, nil
repo, err := s.db.CreateRepository(ctx, q.Repo)
return redact(repo), err
}
// Get returns a repository by URL
func (s *Server) Get(ctx context.Context, q *RepoQuery) (*appsv1.Repository, error) {
repoSecret, err := s.getRepoSecret(q.Repo)
if err != nil {
return nil, err
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories", "get", fmt.Sprintf("*/%s", q.Repo)) {
return nil, grpc.ErrPermissionDenied
}
return secretToRepo(repoSecret), nil
repo, err := s.db.GetRepository(ctx, q.Repo)
return redact(repo), err
}
// Update updates a repository
func (s *Server) Update(ctx context.Context, r *appsv1.Repository) (*appsv1.Repository, error) {
err := git.TestRepo(r.Repo, r.Username, r.Password, r.SSHPrivateKey)
if err != nil {
return nil, err
func (s *Server) Update(ctx context.Context, q *RepoUpdateRequest) (*appsv1.Repository, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories", "update", fmt.Sprintf("*/%s", q.Repo.Repo)) {
return nil, grpc.ErrPermissionDenied
}
repoSecret, err := s.getRepoSecret(r.Repo)
if err != nil {
return nil, err
}
repoSecret.StringData = repoToStringData(r)
repoSecret, err = s.kubeclientset.CoreV1().Secrets(s.ns).Update(repoSecret)
if err != nil {
return nil, err
}
return secretToRepo(repoSecret), nil
}
// UpdateREST updates a repository (from a REST request)
func (s *Server) UpdateREST(ctx context.Context, r *RepoUpdateRequest) (*appsv1.Repository, error) {
return s.Update(ctx, r.Repo)
repo, err := s.db.UpdateRepository(ctx, q.Repo)
return redact(repo), err
}
// Delete updates a repository
func (s *Server) Delete(ctx context.Context, q *RepoQuery) (*RepoResponse, error) {
secName := repoURLToSecretName(q.Repo)
err := s.kubeclientset.CoreV1().Secrets(s.ns).Delete(secName, &metav1.DeleteOptions{})
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories", "delete", fmt.Sprintf("*/%s", q.Repo)) {
return nil, grpc.ErrPermissionDenied
}
err := s.db.DeleteRepository(ctx, q.Repo)
return &RepoResponse{}, err
}
// repoURLToSecretName hashes repo URL to the secret name using a formula.
// Part of the original repo name is incorporated for debugging purposes
func repoURLToSecretName(repo string) string {
repo = git.NormalizeGitURL(repo)
h := fnv.New32a()
_, _ = h.Write([]byte(repo))
parts := strings.Split(strings.TrimSuffix(repo, ".git"), "/")
return fmt.Sprintf("repo-%s-%v", strings.ToLower(parts[len(parts)-1]), h.Sum32())
}
// repoToStringData converts a repository object to string data for serialization to a secret
func repoToStringData(r *appsv1.Repository) map[string]string {
return map[string]string{
"repository": r.Repo,
"username": r.Username,
"password": r.Password,
"sshPrivateKey": r.SSHPrivateKey,
}
}
// secretToRepo converts a secret into a repository object
func secretToRepo(s *apiv1.Secret) *appsv1.Repository {
return &appsv1.Repository{
Repo: string(s.Data["repository"]),
Username: string(s.Data["username"]),
Password: string(s.Data["password"]),
SSHPrivateKey: string(s.Data["sshPrivateKey"]),
func redact(repo *appsv1.Repository) *appsv1.Repository {
if repo == nil {
return nil
}
repo.Password = ""
repo.SSHPrivateKey = ""
return repo
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ import (
"io"
"net/http"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
@@ -46,11 +45,46 @@ func request_RepositoryService_List_0(ctx context.Context, marshaler runtime.Mar
}
func request_RepositoryService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client RepositoryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v1alpha1.Repository
var (
filter_RepositoryService_ListKsonnetApps_0 = &utilities.DoubleArray{Encoding: map[string]int{"repo": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
)
func request_RepositoryService_ListKsonnetApps_0(ctx context.Context, marshaler runtime.Marshaler, client RepositoryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq RepoKsonnetQuery
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["repo"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "repo")
}
protoReq.Repo, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "repo", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_RepositoryService_ListKsonnetApps_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListKsonnetApps(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RepositoryService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client RepositoryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq RepoCreateRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Repo); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
@@ -86,7 +120,7 @@ func request_RepositoryService_Get_0(ctx context.Context, marshaler runtime.Mars
}
func request_RepositoryService_UpdateREST_0(ctx context.Context, marshaler runtime.Marshaler, client RepositoryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
func request_RepositoryService_Update_0(ctx context.Context, marshaler runtime.Marshaler, client RepositoryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq RepoUpdateRequest
var metadata runtime.ServerMetadata
@@ -101,18 +135,18 @@ func request_RepositoryService_UpdateREST_0(ctx context.Context, marshaler runti
_ = err
)
val, ok = pathParams["url"]
val, ok = pathParams["repo.repo"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "url")
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "repo.repo")
}
protoReq.Url, err = runtime.String(val)
err = runtime.PopulateFieldFromPath(&protoReq, "repo.repo", val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "url", err)
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "repo.repo", err)
}
msg, err := client.UpdateREST(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
msg, err := client.Update(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
@@ -211,6 +245,35 @@ func RegisterRepositoryServiceHandlerClient(ctx context.Context, mux *runtime.Se
})
mux.Handle("GET", pattern_RepositoryService_ListKsonnetApps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RepositoryService_ListKsonnetApps_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RepositoryService_ListKsonnetApps_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RepositoryService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -269,7 +332,7 @@ func RegisterRepositoryServiceHandlerClient(ctx context.Context, mux *runtime.Se
})
mux.Handle("PUT", pattern_RepositoryService_UpdateREST_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("PUT", pattern_RepositoryService_Update_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
@@ -287,14 +350,14 @@ func RegisterRepositoryServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RepositoryService_UpdateREST_0(rctx, inboundMarshaler, client, req, pathParams)
resp, md, err := request_RepositoryService_Update_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RepositoryService_UpdateREST_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_RepositoryService_Update_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -333,11 +396,13 @@ func RegisterRepositoryServiceHandlerClient(ctx context.Context, mux *runtime.Se
var (
pattern_RepositoryService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "repositories"}, ""))
pattern_RepositoryService_ListKsonnetApps_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "repositories", "repo", "ksonnet"}, ""))
pattern_RepositoryService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "repositories"}, ""))
pattern_RepositoryService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "repositories", "repo"}, ""))
pattern_RepositoryService_UpdateREST_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "repositories", "url"}, ""))
pattern_RepositoryService_Update_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "repositories", "repo.repo"}, ""))
pattern_RepositoryService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "repositories", "repo"}, ""))
)
@@ -345,11 +410,13 @@ var (
var (
forward_RepositoryService_List_0 = runtime.ForwardResponseMessage
forward_RepositoryService_ListKsonnetApps_0 = runtime.ForwardResponseMessage
forward_RepositoryService_Create_0 = runtime.ForwardResponseMessage
forward_RepositoryService_Get_0 = runtime.ForwardResponseMessage
forward_RepositoryService_UpdateREST_0 = runtime.ForwardResponseMessage
forward_RepositoryService_Update_0 = runtime.ForwardResponseMessage
forward_RepositoryService_Delete_0 = runtime.ForwardResponseMessage
)

View File

@@ -11,6 +11,42 @@ import "google/api/annotations.proto";
import "k8s.io/api/core/v1/generated.proto";
import "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1/generated.proto";
// RepoKsonnetQuery is a query for Repository contents matching a particular path
message RepoKsonnetQuery {
string repo = 1;
string revision = 2;
}
// RepoKsonnetResponse is a response for Repository contents matching a particular path
message RepoKsonnetResponse {
repeated KsonnetAppSpec items = 1;
}
// KsonnetAppSpec contains Ksonnet app response
// This roughly reflects: ksonnet/ksonnet/metadata/app/schema.go
message KsonnetAppSpec {
string name = 1;
string path = 2;
map<string, KsonnetEnvironment> environments = 3;
}
message KsonnetEnvironment {
// Name is the user defined name of an environment
string name = 1;
// KubernetesVersion is the kubernetes version the targetted cluster is running on.
string k8sVersion = 2;
// Path is the relative project path containing metadata for this environment.
string path = 3;
// Destination stores the cluster address that this environment points to.
KsonnetEnvironmentDestination destination = 4;
}
message KsonnetEnvironmentDestination {
// Server is the Kubernetes server that the cluster is running on.
string server = 1;
// Namespace is the namespace of the Kubernetes server that targets should be deployed to
string namespace = 2;
}
// RepoQuery is a query for Repository resources
message RepoQuery {
@@ -19,9 +55,12 @@ message RepoQuery {
message RepoResponse {}
message RepoCreateRequest {
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository repo = 1;
}
message RepoUpdateRequest {
string url = 1;
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository repo = 2;
github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository repo = 1;
}
// RepositoryService
@@ -32,27 +71,28 @@ service RepositoryService {
option (google.api.http).get = "/api/v1/repositories";
}
// ListKsonnetApps returns list of Ksonnet apps in the repo
rpc ListKsonnetApps(RepoKsonnetQuery) returns (RepoKsonnetResponse) {
option (google.api.http).get = "/api/v1/repositories/{repo}/ksonnet";
}
// Create creates a repo
rpc Create(github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) {
rpc Create(RepoCreateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) {
option (google.api.http) = {
post: "/api/v1/repositories"
body: "*"
body: "repo"
};
}
// Get returns a repo by name
rpc Get(RepoQuery) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) {
// Get returns a repo by name
rpc Get(RepoQuery) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) {
option (google.api.http).get = "/api/v1/repositories/{repo}";
}
// Update updates a repo
rpc Update(github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) {
}
// Update updates a repo (special handler intended to be used only by the gRPC gateway)
rpc UpdateREST(RepoUpdateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) {
rpc Update(RepoUpdateRequest) returns (github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.Repository) {
option (google.api.http) = {
put: "/api/v1/repositories/{url}"
put: "/api/v1/repositories/{repo.repo}"
body: "repo"
};
}

View File

@@ -1,18 +0,0 @@
package repository
import "testing"
func TestRepoURLToSecretName(t *testing.T) {
tables := map[string]string{
"git://git@github.com:argoproj/ARGO-cd.git": "repo-argo-cd-83273445",
"https://github.com/argoproj/ARGO-cd": "repo-argo-cd-1890113693",
"https://github.com/argoproj/argo-cd": "repo-argo-cd-42374749",
"ssh://git@github.com/argoproj/argo-cd.git": "repo-argo-cd-3569564120",
}
for k, v := range tables {
if sn := repoURLToSecretName(k); sn != v {
t.Errorf("Expected secret name %q for repo %q; instead, got %q", v, k, sn)
}
}
}

View File

@@ -3,26 +3,15 @@ package server
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/errors"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server/application"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/server/session"
"github.com/argoproj/argo-cd/server/version"
"github.com/argoproj/argo-cd/util/config"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
jsonutil "github.com/argoproj/argo-cd/util/json"
util_session "github.com/argoproj/argo-cd/util/session"
tlsutil "github.com/argoproj/argo-cd/util/tls"
"github.com/gobuffalo/packr"
golang_proto "github.com/golang/protobuf/proto"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
@@ -36,24 +25,69 @@ import (
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
)
const (
port = 8080
authCookieName = "argocd.argoproj.io/auth-token"
argocd "github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/errors"
"github.com/argoproj/argo-cd/pkg/apiclient"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server/application"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/server/session"
"github.com/argoproj/argo-cd/server/settings"
"github.com/argoproj/argo-cd/server/version"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/dex"
dexutil "github.com/argoproj/argo-cd/util/dex"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
jsonutil "github.com/argoproj/argo-cd/util/json"
"github.com/argoproj/argo-cd/util/rbac"
util_session "github.com/argoproj/argo-cd/util/session"
settings_util "github.com/argoproj/argo-cd/util/settings"
tlsutil "github.com/argoproj/argo-cd/util/tls"
"github.com/argoproj/argo-cd/util/webhook"
)
var (
endpoint = fmt.Sprintf("localhost:%d", port)
// ErrNoSession indicates no auth token was supplied as part of a request
ErrNoSession = status.Errorf(codes.Unauthenticated, "no session information")
)
var backoff = wait.Backoff{
Steps: 5,
Duration: 500 * time.Millisecond,
Factor: 1.0,
Jitter: 0.1,
}
var (
box = packr.NewBox("../util/rbac")
builtinPolicy string
)
func init() {
var err error
builtinPolicy, err = box.MustString("builtin-policy.csv")
errors.CheckError(err)
}
// ArgoCDServer is the API server for ArgoCD
type ArgoCDServer struct {
ArgoCDServerOpts
settings config.ArgoCDSettings
log *log.Entry
ssoClientApp *dexutil.ClientApp
settings *settings_util.ArgoCDSettings
log *log.Entry
sessionMgr *util_session.SessionManager
settingsMgr *settings_util.SettingsManager
enf *rbac.Enforcer
// stopCh is the channel which when closed, will shutdown the ArgoCD server
stopCh chan struct{}
}
type ArgoCDServerOpts struct {
@@ -68,15 +102,23 @@ type ArgoCDServerOpts struct {
// NewServer returns a new instance of the ArgoCD API server
func NewServer(opts ArgoCDServerOpts) *ArgoCDServer {
configManager := config.NewConfigManager(opts.KubeClientset, opts.Namespace)
settings, err := configManager.GetSettings()
if err != nil {
log.Fatal(err)
}
settingsMgr := settings_util.NewSettingsManager(opts.KubeClientset, opts.Namespace)
settings, err := settingsMgr.GetSettings()
errors.CheckError(err)
sessionMgr := util_session.NewSessionManager(settings)
enf := rbac.NewEnforcer(opts.KubeClientset, opts.Namespace, common.ArgoCDRBACConfigMapName, nil)
enf.EnableEnforce(!opts.DisableAuth)
err = enf.SetBuiltinPolicy(builtinPolicy)
errors.CheckError(err)
enf.EnableLog(os.Getenv(common.EnvVarRBACDebug) == "1")
return &ArgoCDServer{
ArgoCDServerOpts: opts,
log: log.NewEntry(log.New()),
settings: *settings,
settings: settings,
sessionMgr: sessionMgr,
settingsMgr: settingsMgr,
enf: enf,
}
}
@@ -84,25 +126,31 @@ func NewServer(opts ArgoCDServerOpts) *ArgoCDServer {
// We use k8s.io/code-generator/cmd/go-to-protobuf to generate the .proto files from the API types.
// k8s.io/ go-to-protobuf uses protoc-gen-gogo, which comes from gogo/protobuf (a fork of
// golang/protobuf).
func (a *ArgoCDServer) Run() {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
func (a *ArgoCDServer) Run(ctx context.Context, port int) {
grpcS := a.newGRPCServer()
var httpS *http.Server
var httpsS *http.Server
if a.useTLS() {
httpS = newRedirectServer()
httpsS = a.newHTTPServer(ctx)
httpS = newRedirectServer(port)
httpsS = a.newHTTPServer(ctx, port)
} else {
httpS = a.newHTTPServer(ctx)
httpS = a.newHTTPServer(ctx, port)
}
// Cmux is used to support servicing gRPC and HTTP1.1+JSON on the same port
conn, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
errors.CheckError(err)
// Start listener
var conn net.Listener
var realErr error
_ = wait.ExponentialBackoff(backoff, func() (bool, error) {
conn, realErr = net.Listen("tcp", fmt.Sprintf(":%d", port))
if realErr != nil {
a.log.Warnf("failed listen: %v", realErr)
return false, nil
}
return true, nil
})
errors.CheckError(realErr)
// Cmux is used to support servicing gRPC and HTTP1.1+JSON on the same port
tcpm := cmux.New(conn)
var tlsm cmux.CMux
var grpcL net.Listener
@@ -122,21 +170,107 @@ func (a *ArgoCDServer) Run() {
}
tlsl = tls.NewListener(tlsl, &tlsConfig)
// Now, we build another mux recursively to match HTTPS and GoRPC.
// Now, we build another mux recursively to match HTTPS and gRPC.
tlsm = cmux.New(tlsl)
httpsL = tlsm.Match(cmux.HTTP1Fast())
grpcL = tlsm.Match(cmux.Any())
}
// Start the muxed listeners for our servers
log.Infof("argocd %s serving on port %d (tls: %v, namespace: %s)", argocd.GetVersion(), port, a.useTLS(), a.Namespace)
go func() { errors.CheckError(grpcS.Serve(grpcL)) }()
go func() { errors.CheckError(httpS.Serve(httpL)) }()
log.Infof("argocd %s serving on port %d (url: %s, tls: %v, namespace: %s, sso: %v)",
argocd.GetVersion(), port, a.settings.URL, a.useTLS(), a.Namespace, a.settings.IsSSOConfigured())
go func() { a.checkServeErr("grpcS", grpcS.Serve(grpcL)) }()
go func() { a.checkServeErr("httpS", httpS.Serve(httpL)) }()
if a.useTLS() {
go func() { errors.CheckError(httpsS.Serve(httpsL)) }()
go func() { errors.CheckError(tlsm.Serve()) }()
go func() { a.checkServeErr("httpsS", httpsS.Serve(httpsL)) }()
go func() { a.checkServeErr("tlsm", tlsm.Serve()) }()
}
err = tcpm.Serve()
go a.watchSettings(ctx)
go a.rbacPolicyLoader(ctx)
go func() { a.checkServeErr("tcpm", tcpm.Serve()) }()
a.stopCh = make(chan struct{})
<-a.stopCh
errors.CheckError(conn.Close())
}
// checkServeErr checks the error from a .Serve() call to decide if it was a graceful shutdown
func (a *ArgoCDServer) checkServeErr(name string, err error) {
if err != nil {
if a.stopCh == nil {
// a nil stopCh indicates a graceful shutdown
log.Infof("graceful shutdown %s: %v", name, err)
} else {
log.Fatalf("%s: %v", name, err)
}
} else {
log.Infof("graceful shutdown %s", name)
}
}
func (a *ArgoCDServer) Shutdown() {
log.Info("Shut down requested")
stopCh := a.stopCh
a.stopCh = nil
if stopCh != nil {
close(stopCh)
}
}
// watchSettings watches the configmap and secret for any setting updates that would warrant a
// restart of the API server.
func (a *ArgoCDServer) watchSettings(ctx context.Context) {
a.settingsMgr.StartNotifier(ctx, a.settings)
updateCh := make(chan struct{}, 1)
a.settingsMgr.Subscribe(updateCh)
prevDexCfgBytes, err := dex.GenerateDexConfigYAML(a.settings)
errors.CheckError(err)
prevGitHubSecret := a.settings.WebhookGitHubSecret
prevGitLabSecret := a.settings.WebhookGitLabSecret
prevBitBucketUUID := a.settings.WebhookBitbucketUUID
var prevCert, prevCertKey string
if a.settings.Certificate != nil {
prevCert, prevCertKey = tlsutil.EncodeX509KeyPairString(*a.settings.Certificate)
}
for {
<-updateCh
newDexCfgBytes, err := dex.GenerateDexConfigYAML(a.settings)
errors.CheckError(err)
if string(newDexCfgBytes) != string(prevDexCfgBytes) {
log.Infof("dex config modified. restarting")
break
}
if prevGitHubSecret != a.settings.WebhookGitHubSecret {
log.Infof("github secret modified. restarting")
break
}
if prevGitLabSecret != a.settings.WebhookGitLabSecret {
log.Infof("gitlab secret modified. restarting")
break
}
if prevBitBucketUUID != a.settings.WebhookBitbucketUUID {
log.Infof("bitbucket uuid modified. restarting")
break
}
var newCert, newCertKey string
if a.settings.Certificate != nil {
newCert, newCertKey = tlsutil.EncodeX509KeyPairString(*a.settings.Certificate)
}
if newCert != prevCert || newCertKey != prevCertKey {
log.Infof("tls certificate modified. restarting")
break
}
}
log.Info("shutting down settings watch")
a.Shutdown()
a.settingsMgr.Unsubscribe(updateCh)
close(updateCh)
}
func (a *ArgoCDServer) rbacPolicyLoader(ctx context.Context) {
err := a.enf.RunPolicyLoader(ctx)
errors.CheckError(err)
}
@@ -158,6 +292,7 @@ func (a *ArgoCDServer) newGRPCServer() *grpc.Server {
grpc_util.PanicLoggerStreamServerInterceptor(a.log),
)))
sOpts = append(sOpts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
bug21955WorkaroundInterceptor,
grpc_logrus.UnaryServerInterceptor(a.log),
grpc_auth.UnaryServerInterceptor(a.authenticate),
grpc_util.ErrorCodeUnaryServerInterceptor(),
@@ -165,37 +300,32 @@ func (a *ArgoCDServer) newGRPCServer() *grpc.Server {
)))
grpcS := grpc.NewServer(sOpts...)
clusterService := cluster.NewServer(a.Namespace, a.KubeClientset, a.AppClientset)
repoService := repository.NewServer(a.Namespace, a.KubeClientset, a.AppClientset)
sessionService := session.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.settings)
applicationService := application.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.RepoClientset, repoService, clusterService)
db := db.NewDB(a.Namespace, a.KubeClientset)
clusterService := cluster.NewServer(db, a.enf)
repoService := repository.NewServer(a.RepoClientset, db, a.enf)
sessionService := session.NewServer(a.sessionMgr)
applicationService := application.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.RepoClientset, db, a.enf)
settingsService := settings.NewServer(a.settingsMgr)
version.RegisterVersionServiceServer(grpcS, &version.Server{})
cluster.RegisterClusterServiceServer(grpcS, clusterService)
application.RegisterApplicationServiceServer(grpcS, applicationService)
repository.RegisterRepositoryServiceServer(grpcS, repoService)
session.RegisterSessionServiceServer(grpcS, sessionService)
settings.RegisterSettingsServiceServer(grpcS, settingsService)
// Register reflection service on gRPC server.
reflection.Register(grpcS)
return grpcS
}
// MakeCookieMetadata generates a string representing a Web cookie. Yum!
func (a *ArgoCDServer) makeCookieMetadata(key, value string, flags ...string) string {
components := []string{
fmt.Sprintf("%s=%s", key, value),
}
if a.ArgoCDServerOpts.Insecure == false {
components = append(components, "Secure")
}
components = append(components, flags...)
return strings.Join(components, "; ")
}
// TranslateGrpcCookieHeader conditionally sets a cookie on the response.
func (a *ArgoCDServer) translateGrpcCookieHeader(ctx context.Context, w http.ResponseWriter, resp golang_proto.Message) error {
if sessionResp, ok := resp.(*session.SessionResponse); ok {
cookie := a.makeCookieMetadata(authCookieName, sessionResp.Token, "path=/")
flags := []string{"path=/"}
if !a.Insecure {
flags = append(flags, "Secure")
}
cookie := util_session.MakeCookieMetadata(common.AuthCookieName, sessionResp.Token, flags...)
w.Header().Set("Set-Cookie", cookie)
}
return nil
@@ -203,11 +333,12 @@ func (a *ArgoCDServer) translateGrpcCookieHeader(ctx context.Context, w http.Res
// newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented
// using grpc-gateway as a proxy to the gRPC server.
func (a *ArgoCDServer) newHTTPServer(ctx context.Context) *http.Server {
func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int) *http.Server {
endpoint := fmt.Sprintf("localhost:%d", port)
mux := http.NewServeMux()
httpS := http.Server{
Addr: endpoint,
Handler: mux,
Handler: &bug21955Workaround{handler: mux},
}
var dOpts []grpc.DialOption
if a.useTLS() {
@@ -215,16 +346,9 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context) *http.Server {
// grpc-gateway is just translating HTTP/HTTPS requests as gRPC requests over localhost,
// so we need to supply the same certificates to establish the connections that a normal,
// external gRPC client would need.
certPool := x509.NewCertPool()
pemCertBytes, _ := tlsutil.EncodeX509KeyPair(*a.settings.Certificate)
ok := certPool.AppendCertsFromPEM(pemCertBytes)
if !ok {
panic("bad certs")
}
dCreds := credentials.NewTLS(&tls.Config{
RootCAs: certPool,
InsecureSkipVerify: true,
})
tlsConfig := a.settings.TLSConfig()
tlsConfig.InsecureSkipVerify = true
dCreds := credentials.NewTLS(tlsConfig)
dOpts = append(dOpts, grpc.WithTransportCredentials(dCreds))
} else {
dOpts = append(dOpts, grpc.WithInsecure())
@@ -245,6 +369,14 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context) *http.Server {
mustRegisterGWHandler(application.RegisterApplicationServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts)
mustRegisterGWHandler(repository.RegisterRepositoryServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts)
mustRegisterGWHandler(session.RegisterSessionServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts)
mustRegisterGWHandler(settings.RegisterSettingsServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts)
// Dex reverse proxy and client app and OAuth2 login/callback
a.registerDexHandlers(mux)
// Webhook handler for git events
acdWebhookHandler := webhook.NewHandler(a.Namespace, a.AppClientset, a.settings)
mux.HandleFunc("/api/webhook", acdWebhookHandler.Handler)
if a.StaticAssetsDir != "" {
mux.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
@@ -268,10 +400,26 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context) *http.Server {
return &httpS
}
// registerDexHandlers will register dex HTTP handlers, creating the the OAuth client app
func (a *ArgoCDServer) registerDexHandlers(mux *http.ServeMux) {
if !a.settings.IsSSOConfigured() {
return
}
// Run dex OpenID Connect Identity Provider behind a reverse proxy (served at /api/dex)
var err error
mux.HandleFunc(common.DexAPIEndpoint+"/", dexutil.NewDexHTTPReverseProxy())
tlsConfig := a.settings.TLSConfig()
tlsConfig.InsecureSkipVerify = true
a.ssoClientApp, err = dexutil.NewClientApp(a.settings, a.sessionMgr)
errors.CheckError(err)
mux.HandleFunc(common.LoginEndpoint, a.ssoClientApp.HandleLogin)
mux.HandleFunc(common.CallbackEndpoint, a.ssoClientApp.HandleCallback)
}
// newRedirectServer returns an HTTP server which does a 307 redirect to the HTTPS server
func newRedirectServer() *http.Server {
func newRedirectServer(port int) *http.Server {
return &http.Server{
Addr: endpoint,
Addr: fmt.Sprintf("localhost:%d", port),
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
target := "https://" + req.Host + req.URL.Path
if len(req.URL.RawQuery) > 0 {
@@ -292,41 +440,111 @@ func mustRegisterGWHandler(register registerFunc, ctx context.Context, mux *runt
}
}
// parseTokens tests a slice of strings and returns `true` only if any of them are valid.
func (a *ArgoCDServer) parseTokens(tokens []string) bool {
mgr := util_session.MakeSessionManager(a.settings.ServerSignature)
for _, token := range tokens {
_, err := mgr.Parse(token)
if err == nil {
return true
}
}
return false
}
// Authenticate checks for the presence of a token when accessing server-side resources.
// Authenticate checks for the presence of a valid token when accessing server-side resources.
func (a *ArgoCDServer) authenticate(ctx context.Context) (context.Context, error) {
if a.DisableAuth {
return ctx, nil
}
if md, ok := metadata.FromIncomingContext(ctx); ok {
tokens := md["tokens"]
// Extract only the value portion of cookie-stored tokens
for _, cookieToken := range md["grpcgateway-cookie"] {
tokenPair := strings.SplitN(cookieToken, "=", 2)
if len(tokenPair) == 2 {
tokens = append(tokens, tokenPair[1])
}
}
// Check both gRPC-provided tokens and Web-provided (cookie-based) ones
if a.parseTokens(tokens) {
return ctx, nil
}
return ctx, status.Errorf(codes.Unauthenticated, "user is not allowed access")
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return ctx, ErrNoSession
}
return ctx, status.Errorf(codes.Unauthenticated, "empty metadata")
tokenString := getToken(md)
if tokenString == "" {
return ctx, ErrNoSession
}
claims, err := a.sessionMgr.VerifyToken(tokenString)
if err != nil {
return ctx, status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
}
// Add claims to the context to inspect for RBAC
ctx = context.WithValue(ctx, "claims", claims)
return ctx, nil
}
// getToken extracts the token from gRPC metadata or cookie headers
func getToken(md metadata.MD) string {
// check the "token" metadata
tokens, ok := md[apiclient.MetaDataTokenKey]
if ok && len(tokens) > 0 {
return tokens[0]
}
// check the legacy key (v0.3.2 and below). 'tokens' was renamed to 'token'
tokens, ok = md["tokens"]
if ok && len(tokens) > 0 {
return tokens[0]
}
// check the HTTP cookie
for _, cookieToken := range md["grpcgateway-cookie"] {
header := http.Header{}
header.Add("Cookie", cookieToken)
request := http.Request{Header: header}
token, err := request.Cookie(common.AuthCookieName)
if err == nil {
return token.Value
}
}
return ""
}
// Workaround for https://github.com/golang/go/issues/21955 to support escaped URLs in URL path.
type bug21955Workaround struct {
handler http.Handler
}
func (bf *bug21955Workaround) ServeHTTP(w http.ResponseWriter, r *http.Request) {
paths := map[string][]string{
"/api/v1/repositories/": {"ksonnet"},
"/api/v1/clusters/": {},
}
for path, subPaths := range paths {
if strings.Index(r.URL.Path, path) > -1 {
postfix := ""
for _, subPath := range subPaths {
if strings.LastIndex(r.URL.Path, subPath) == len(r.URL.Path)-len(subPath) {
postfix = "/" + subPath
r.URL.Path = r.URL.Path[0 : len(r.URL.Path)-len(subPath)-1]
break
}
}
r.URL.Path = path + url.QueryEscape(r.URL.Path[len(path):]) + postfix
break
}
}
bf.handler.ServeHTTP(w, r)
}
func bug21955WorkaroundInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
if rq, ok := req.(*repository.RepoQuery); ok {
repo, err := url.QueryUnescape(rq.Repo)
if err != nil {
return nil, err
}
rq.Repo = repo
} else if rk, ok := req.(*repository.RepoKsonnetQuery); ok {
repo, err := url.QueryUnescape(rk.Repo)
if err != nil {
return nil, err
}
rk.Repo = repo
} else if ru, ok := req.(*repository.RepoUpdateRequest); ok {
repo, err := url.QueryUnescape(ru.Repo.Repo)
if err != nil {
return nil, err
}
ru.Repo.Repo = repo
} else if cq, ok := req.(*cluster.ClusterQuery); ok {
server, err := url.QueryUnescape(cq.Server)
if err != nil {
return nil, err
}
cq.Server = server
} else if cu, ok := req.(*cluster.ClusterUpdateRequest); ok {
server, err := url.QueryUnescape(cu.Cluster.Server)
if err != nil {
return nil, err
}
cu.Cluster.Server = server
}
return handler(ctx, req)
}

View File

@@ -2,69 +2,62 @@ package session
import (
"context"
"fmt"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/util/config"
"github.com/argoproj/argo-cd/util/password"
"github.com/argoproj/argo-cd/util/session"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/client-go/kubernetes"
"github.com/argoproj/argo-cd/util/jwt"
sessionmgr "github.com/argoproj/argo-cd/util/session"
)
// Server provides a Session service
type Server struct {
ns string
kubeclientset kubernetes.Interface
appclientset appclientset.Interface
serversettings config.ArgoCDSettings
mgr *sessionmgr.SessionManager
}
// NewServer returns a new instance of the Session service
func NewServer(namespace string, kubeclientset kubernetes.Interface, appclientset appclientset.Interface, serversettings config.ArgoCDSettings) *Server {
func NewServer(mgr *sessionmgr.SessionManager) *Server {
return &Server{
ns: namespace,
appclientset: appclientset,
kubeclientset: kubeclientset,
serversettings: serversettings,
mgr: mgr,
}
}
// invalidLoginMessage, for security purposes, doesn't say whether the username or password was invalid. This does not mitigate the potential for timing attacks to determine which is which.
const (
invalidLoginError = "Invalid username or password"
blankPasswordError = "Blank passwords are not allowed"
)
// Create an authentication cookie for the client.
// Create generates a non-expiring JWT token signed by ArgoCD. This endpoint is used in two circumstances:
// 1. Web/CLI logins for local users (i.e. admin), for when SSO is not configured. In this case,
// username/password.
// 2. CLI login which completed an OAuth2 login flow but wish to store a permanent token in their config
func (s *Server) Create(ctx context.Context, q *SessionCreateRequest) (*SessionResponse, error) {
if q.Password == "" {
err := status.Errorf(codes.Unauthenticated, blankPasswordError)
return nil, err
var tokenString string
var err error
if q.Password != "" {
// first case
err = s.mgr.VerifyUsernamePassword(q.Username, q.Password)
if err != nil {
return nil, err
}
tokenString, err = s.mgr.Create(q.Username)
if err != nil {
return nil, err
}
} else if q.Token != "" {
// second case
claimsIf, err := s.mgr.VerifyToken(q.Token)
if err != nil {
return nil, err
}
claims, err := jwt.MapClaims(claimsIf)
if err != nil {
return nil, err
}
tokenString, err = s.mgr.ReissueClaims(claims)
if err != nil {
return nil, fmt.Errorf("Failed to resign claims: %v", err)
}
} else {
return nil, status.Errorf(codes.Unauthenticated, "no credentials supplied")
}
passwordHash, ok := s.serversettings.LocalUsers[q.Username]
if !ok {
// Username was not found in local user store.
// Ensure we still send password to hashing algorithm for comparison.
// This mitigates potential for timing attacks that benefit from short-circuiting,
// provided the hashing library/algorithm in use doesn't itself short-circuit.
passwordHash = ""
}
valid, _ := password.VerifyPassword(q.Password, passwordHash)
if !valid {
err := status.Errorf(codes.Unauthenticated, invalidLoginError)
return nil, err
}
sessionManager := session.MakeSessionManager(s.serversettings.ServerSignature)
token, err := sessionManager.Create(q.Username)
if err != nil {
token = ""
}
return &SessionResponse{token}, err
return &SessionResponse{Token: tokenString}, nil
}
// Delete an authentication cookie from the client. This makes sense only for the Web client.

View File

@@ -46,6 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type SessionCreateRequest struct {
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
}
func (m *SessionCreateRequest) Reset() { *m = SessionCreateRequest{} }
@@ -67,6 +68,13 @@ func (m *SessionCreateRequest) GetPassword() string {
return ""
}
func (m *SessionCreateRequest) GetToken() string {
if m != nil {
return m.Token
}
return ""
}
// SessionDeleteRequest is for logging out.
type SessionDeleteRequest struct {
}
@@ -235,6 +243,12 @@ func (m *SessionCreateRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintSession(dAtA, i, uint64(len(m.Password)))
i += copy(dAtA[i:], m.Password)
}
if len(m.Token) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintSession(dAtA, i, uint64(len(m.Token)))
i += copy(dAtA[i:], m.Token)
}
return i, nil
}
@@ -300,6 +314,10 @@ func (m *SessionCreateRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSession(uint64(l))
}
l = len(m.Token)
if l > 0 {
n += 1 + l + sovSession(uint64(l))
}
return n
}
@@ -419,6 +437,35 @@ func (m *SessionCreateRequest) Unmarshal(dAtA []byte) error {
}
m.Password = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSession
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSession
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Token = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSession(dAtA[iNdEx:])
@@ -677,27 +724,28 @@ var (
func init() { proto.RegisterFile("server/session/session.proto", fileDescriptorSession) }
var fileDescriptorSession = []byte{
// 349 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4a, 0xfb, 0x40,
0x10, 0xc6, 0xd9, 0xc2, 0xbf, 0x7f, 0xdd, 0x83, 0xc5, 0x10, 0xb4, 0x84, 0x5a, 0x24, 0x17, 0xa5,
0x60, 0x96, 0xea, 0xa5, 0x78, 0x54, 0x2f, 0x5e, 0x3c, 0xb4, 0xb7, 0x82, 0x87, 0x6d, 0x3a, 0x6c,
0xd7, 0xa6, 0x3b, 0xeb, 0xee, 0x36, 0xde, 0x7d, 0x05, 0x5f, 0x4a, 0xf0, 0x22, 0xf8, 0x02, 0x52,
0x7c, 0x10, 0xe9, 0x26, 0x29, 0xb4, 0x95, 0x9e, 0xb2, 0xdf, 0x7e, 0x93, 0xdf, 0x7c, 0x99, 0x09,
0x6d, 0x59, 0x30, 0x39, 0x18, 0x66, 0xc1, 0x5a, 0x89, 0xaa, 0x7a, 0x26, 0xda, 0xa0, 0xc3, 0xe0,
0x7f, 0x29, 0xa3, 0x50, 0xa0, 0x40, 0x7f, 0xc7, 0x96, 0xa7, 0xc2, 0x8e, 0x5a, 0x02, 0x51, 0x64,
0xc0, 0xb8, 0x96, 0x8c, 0x2b, 0x85, 0x8e, 0x3b, 0x89, 0xca, 0x96, 0x6e, 0x3c, 0xed, 0xd9, 0x44,
0xa2, 0x77, 0x53, 0x34, 0xc0, 0xf2, 0x2e, 0x13, 0xa0, 0xc0, 0x70, 0x07, 0xe3, 0xb2, 0xe6, 0x5e,
0x48, 0x37, 0x99, 0x8f, 0x92, 0x14, 0x67, 0x8c, 0x1b, 0xdf, 0xe2, 0xc9, 0x1f, 0x2e, 0xd2, 0x31,
0xd3, 0x53, 0xb1, 0x7c, 0xd9, 0x32, 0xae, 0x75, 0x26, 0x53, 0x0f, 0x67, 0x79, 0x97, 0x67, 0x7a,
0xc2, 0xb7, 0x50, 0xf1, 0x03, 0x0d, 0x07, 0x45, 0xda, 0x5b, 0x03, 0xdc, 0x41, 0x1f, 0x9e, 0xe7,
0x60, 0x5d, 0x10, 0xd1, 0xbd, 0xb9, 0x05, 0xa3, 0xf8, 0x0c, 0x9a, 0xe4, 0x94, 0x9c, 0xef, 0xf7,
0x57, 0x7a, 0xe9, 0x69, 0x6e, 0xed, 0x0b, 0x9a, 0x71, 0xb3, 0x56, 0x78, 0x95, 0x8e, 0x8f, 0x56,
0xbc, 0x3b, 0xc8, 0x60, 0xc5, 0x8b, 0xcf, 0x68, 0xa3, 0xbc, 0xef, 0x83, 0xd5, 0xa8, 0x2c, 0x04,
0x21, 0xfd, 0xe7, 0x70, 0x0a, 0xaa, 0xe4, 0x17, 0xe2, 0xf2, 0x83, 0xd0, 0x83, 0xb2, 0x72, 0x00,
0x26, 0x97, 0x29, 0x04, 0x8f, 0xb4, 0x5e, 0x84, 0x0b, 0x4e, 0x92, 0x6a, 0xd2, 0x7f, 0x85, 0x8e,
0x9a, 0x9b, 0x76, 0xd5, 0x2b, 0x8e, 0x5e, 0xbf, 0x7e, 0xde, 0x6a, 0x61, 0xdc, 0xf0, 0x73, 0xcd,
0xbb, 0xd5, 0xc6, 0xae, 0x49, 0x27, 0x18, 0xd2, 0x7a, 0x91, 0x75, 0x1b, 0xbf, 0xf6, 0x0d, 0x3b,
0xf0, 0xc7, 0x1e, 0x7f, 0xd8, 0xd9, 0xc4, 0xdf, 0xf4, 0xde, 0x17, 0x6d, 0xf2, 0xb9, 0x68, 0x93,
0xef, 0x45, 0x9b, 0x0c, 0x3b, 0xbb, 0xf6, 0xb6, 0xfe, 0x4b, 0x8d, 0xea, 0x7e, 0x3f, 0x57, 0xbf,
0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0xad, 0xb8, 0xb8, 0x6b, 0x02, 0x00, 0x00,
// 356 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xb1, 0x4e, 0xeb, 0x30,
0x14, 0x86, 0xe5, 0x5e, 0xdd, 0xde, 0x7b, 0x3d, 0xdc, 0x8a, 0x28, 0x82, 0x28, 0x2a, 0x15, 0xca,
0x02, 0xaa, 0x44, 0xac, 0xc2, 0x52, 0x31, 0x02, 0x0b, 0x6b, 0xbb, 0x55, 0x62, 0x70, 0x93, 0xa3,
0xd4, 0x34, 0xf5, 0x31, 0xb6, 0x1b, 0x76, 0x5e, 0x81, 0x97, 0x42, 0x62, 0x41, 0xe2, 0x05, 0x50,
0xc5, 0x83, 0xa0, 0x3a, 0x49, 0xa1, 0x2d, 0xea, 0x14, 0xff, 0xfe, 0x9d, 0xef, 0x3f, 0x3e, 0xc7,
0xb4, 0x6d, 0x40, 0x17, 0xa0, 0x99, 0x01, 0x63, 0x04, 0xca, 0xfa, 0x1b, 0x2b, 0x8d, 0x16, 0xbd,
0x3f, 0x95, 0x0c, 0xfd, 0x0c, 0x33, 0x74, 0x7b, 0x6c, 0xb9, 0x2a, 0xed, 0xb0, 0x9d, 0x21, 0x66,
0x39, 0x30, 0xae, 0x04, 0xe3, 0x52, 0xa2, 0xe5, 0x56, 0xa0, 0x34, 0x95, 0x1b, 0x4d, 0xfb, 0x26,
0x16, 0xe8, 0xdc, 0x04, 0x35, 0xb0, 0xa2, 0xc7, 0x32, 0x90, 0xa0, 0xb9, 0x85, 0xb4, 0x3a, 0x73,
0x93, 0x09, 0x3b, 0x99, 0x8f, 0xe3, 0x04, 0x67, 0x8c, 0x6b, 0x17, 0x71, 0xe7, 0x16, 0xa7, 0x49,
0xca, 0xd4, 0x34, 0x5b, 0xfe, 0x6c, 0x18, 0x57, 0x2a, 0x17, 0x89, 0x83, 0xb3, 0xa2, 0xc7, 0x73,
0x35, 0xe1, 0x5b, 0xa8, 0x28, 0xa5, 0xfe, 0xb0, 0xac, 0xf6, 0x4a, 0x03, 0xb7, 0x30, 0x80, 0xfb,
0x39, 0x18, 0xeb, 0x85, 0xf4, 0xef, 0xdc, 0x80, 0x96, 0x7c, 0x06, 0x01, 0x39, 0x22, 0x27, 0xff,
0x06, 0x2b, 0xbd, 0xf4, 0x14, 0x37, 0xe6, 0x01, 0x75, 0x1a, 0x34, 0x4a, 0xaf, 0xd6, 0x9e, 0x4f,
0x7f, 0x5b, 0x9c, 0x82, 0x0c, 0x7e, 0x39, 0xa3, 0x14, 0xd1, 0xfe, 0x2a, 0xe5, 0x1a, 0x72, 0x58,
0xa5, 0x44, 0xc7, 0xb4, 0x55, 0xed, 0x0f, 0xc0, 0x28, 0x94, 0x06, 0xbe, 0x00, 0xe4, 0x1b, 0xe0,
0xec, 0x85, 0xd0, 0xff, 0xd5, 0xc9, 0x21, 0xe8, 0x42, 0x24, 0xe0, 0xdd, 0xd2, 0x66, 0x59, 0xb2,
0x77, 0x18, 0xd7, 0xfd, 0xff, 0xe9, 0x2a, 0x61, 0xb0, 0x69, 0xd7, 0x59, 0x51, 0xf8, 0xf8, 0xf6,
0xf1, 0xd4, 0xf0, 0xa3, 0x96, 0xeb, 0x76, 0xd1, 0xab, 0xe7, 0x78, 0x41, 0xba, 0xde, 0x88, 0x36,
0xcb, 0x5a, 0xb7, 0xf1, 0x6b, 0x77, 0xd8, 0x81, 0x3f, 0x70, 0xf8, 0xbd, 0xee, 0x26, 0xfe, 0xb2,
0xff, 0xbc, 0xe8, 0x90, 0xd7, 0x45, 0x87, 0xbc, 0x2f, 0x3a, 0x64, 0xd4, 0xdd, 0x35, 0xcd, 0xf5,
0x87, 0x36, 0x6e, 0xba, 0xa9, 0x9d, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x28, 0x53, 0xc6,
0x81, 0x02, 0x00, 0x00,
}

View File

@@ -16,6 +16,7 @@ import "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1/generated.prot
message SessionCreateRequest {
string username = 1;
string password = 2;
string token = 3;
}
// SessionDeleteRequest is for logging out.

View File

@@ -0,0 +1,41 @@
package settings
import (
"github.com/argoproj/argo-cd/util/settings"
"github.com/ghodss/yaml"
"golang.org/x/net/context"
)
// Server provides a Settings service
type Server struct {
mgr *settings.SettingsManager
}
// NewServer returns a new instance of the Repository service
func NewServer(mgr *settings.SettingsManager) *Server {
return &Server{
mgr: mgr,
}
}
// Get returns ArgoCD settings
func (s *Server) Get(ctx context.Context, q *SettingsQuery) (*Settings, error) {
argoCDSettings, err := s.mgr.GetSettings()
if err != nil {
return nil, err
}
set := Settings{
URL: argoCDSettings.URL,
}
var cfg DexConfig
err = yaml.Unmarshal([]byte(argoCDSettings.DexConfig), &cfg)
if err == nil {
set.DexConfig = &cfg
}
return &set, nil
}
// AuthFuncOverride disables authentication for settings service
func (s *Server) AuthFuncOverride(ctx context.Context, fullMethodName string) (context.Context, error) {
return ctx, nil
}

View File

@@ -0,0 +1,859 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/settings/settings.proto
/*
Package settings is a generated protocol buffer package.
Settings Service
Settings Service API retrives ArgoCD settings
It is generated from these files:
server/settings/settings.proto
It has these top-level messages:
SettingsQuery
Settings
DexConfig
Connector
*/
package settings
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// SettingsQuery is a query for ArgoCD settings
type SettingsQuery struct {
}
func (m *SettingsQuery) Reset() { *m = SettingsQuery{} }
func (m *SettingsQuery) String() string { return proto.CompactTextString(m) }
func (*SettingsQuery) ProtoMessage() {}
func (*SettingsQuery) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{0} }
type Settings struct {
URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
DexConfig *DexConfig `protobuf:"bytes,2,opt,name=dexConfig" json:"dexConfig,omitempty"`
}
func (m *Settings) Reset() { *m = Settings{} }
func (m *Settings) String() string { return proto.CompactTextString(m) }
func (*Settings) ProtoMessage() {}
func (*Settings) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{1} }
func (m *Settings) GetURL() string {
if m != nil {
return m.URL
}
return ""
}
func (m *Settings) GetDexConfig() *DexConfig {
if m != nil {
return m.DexConfig
}
return nil
}
type DexConfig struct {
Connectors []*Connector `protobuf:"bytes,1,rep,name=connectors" json:"connectors,omitempty"`
}
func (m *DexConfig) Reset() { *m = DexConfig{} }
func (m *DexConfig) String() string { return proto.CompactTextString(m) }
func (*DexConfig) ProtoMessage() {}
func (*DexConfig) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{2} }
func (m *DexConfig) GetConnectors() []*Connector {
if m != nil {
return m.Connectors
}
return nil
}
type Connector struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
}
func (m *Connector) Reset() { *m = Connector{} }
func (m *Connector) String() string { return proto.CompactTextString(m) }
func (*Connector) ProtoMessage() {}
func (*Connector) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{3} }
func (m *Connector) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Connector) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func init() {
proto.RegisterType((*SettingsQuery)(nil), "cluster.SettingsQuery")
proto.RegisterType((*Settings)(nil), "cluster.Settings")
proto.RegisterType((*DexConfig)(nil), "cluster.DexConfig")
proto.RegisterType((*Connector)(nil), "cluster.Connector")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for SettingsService service
type SettingsServiceClient interface {
// Get returns ArgoCD settings
Get(ctx context.Context, in *SettingsQuery, opts ...grpc.CallOption) (*Settings, error)
}
type settingsServiceClient struct {
cc *grpc.ClientConn
}
func NewSettingsServiceClient(cc *grpc.ClientConn) SettingsServiceClient {
return &settingsServiceClient{cc}
}
func (c *settingsServiceClient) Get(ctx context.Context, in *SettingsQuery, opts ...grpc.CallOption) (*Settings, error) {
out := new(Settings)
err := grpc.Invoke(ctx, "/cluster.SettingsService/Get", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for SettingsService service
type SettingsServiceServer interface {
// Get returns ArgoCD settings
Get(context.Context, *SettingsQuery) (*Settings, error)
}
func RegisterSettingsServiceServer(s *grpc.Server, srv SettingsServiceServer) {
s.RegisterService(&_SettingsService_serviceDesc, srv)
}
func _SettingsService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SettingsQuery)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SettingsServiceServer).Get(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cluster.SettingsService/Get",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SettingsServiceServer).Get(ctx, req.(*SettingsQuery))
}
return interceptor(ctx, in, info, handler)
}
var _SettingsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "cluster.SettingsService",
HandlerType: (*SettingsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Get",
Handler: _SettingsService_Get_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "server/settings/settings.proto",
}
func (m *SettingsQuery) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SettingsQuery) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
return i, nil
}
func (m *Settings) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Settings) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.URL) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSettings(dAtA, i, uint64(len(m.URL)))
i += copy(dAtA[i:], m.URL)
}
if m.DexConfig != nil {
dAtA[i] = 0x12
i++
i = encodeVarintSettings(dAtA, i, uint64(m.DexConfig.Size()))
n1, err := m.DexConfig.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
return i, nil
}
func (m *DexConfig) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *DexConfig) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Connectors) > 0 {
for _, msg := range m.Connectors {
dAtA[i] = 0xa
i++
i = encodeVarintSettings(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *Connector) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Connector) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSettings(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Type) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintSettings(dAtA, i, uint64(len(m.Type)))
i += copy(dAtA[i:], m.Type)
}
return i, nil
}
func encodeVarintSettings(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *SettingsQuery) Size() (n int) {
var l int
_ = l
return n
}
func (m *Settings) Size() (n int) {
var l int
_ = l
l = len(m.URL)
if l > 0 {
n += 1 + l + sovSettings(uint64(l))
}
if m.DexConfig != nil {
l = m.DexConfig.Size()
n += 1 + l + sovSettings(uint64(l))
}
return n
}
func (m *DexConfig) Size() (n int) {
var l int
_ = l
if len(m.Connectors) > 0 {
for _, e := range m.Connectors {
l = e.Size()
n += 1 + l + sovSettings(uint64(l))
}
}
return n
}
func (m *Connector) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovSettings(uint64(l))
}
l = len(m.Type)
if l > 0 {
n += 1 + l + sovSettings(uint64(l))
}
return n
}
func sovSettings(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozSettings(x uint64) (n int) {
return sovSettings(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *SettingsQuery) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SettingsQuery: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SettingsQuery: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipSettings(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSettings
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Settings) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Settings: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Settings: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSettings
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.URL = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DexConfig", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthSettings
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.DexConfig == nil {
m.DexConfig = &DexConfig{}
}
if err := m.DexConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSettings(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSettings
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *DexConfig) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DexConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DexConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Connectors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthSettings
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Connectors = append(m.Connectors, &Connector{})
if err := m.Connectors[len(m.Connectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSettings(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSettings
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Connector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Connector: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Connector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSettings
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSettings
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSettings
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSettings(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSettings
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipSettings(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSettings
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSettings
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSettings
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthSettings
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSettings
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipSettings(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthSettings = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSettings = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("server/settings/settings.proto", fileDescriptorSettings) }
var fileDescriptorSettings = []byte{
// 322 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x41, 0x4b, 0xc3, 0x40,
0x10, 0x85, 0xd9, 0x46, 0xac, 0x19, 0x91, 0xea, 0x22, 0x12, 0x8b, 0xc4, 0x92, 0x53, 0x41, 0x4c,
0xb4, 0x3d, 0x79, 0x12, 0x5a, 0x41, 0x10, 0x2f, 0xa6, 0x88, 0x20, 0x78, 0x48, 0xd3, 0x71, 0x8d,
0xb4, 0x3b, 0x65, 0xb3, 0x29, 0xf6, 0xea, 0x5f, 0xf0, 0x4f, 0x79, 0x14, 0xbc, 0x8b, 0x04, 0x7f,
0x88, 0x74, 0xdb, 0x44, 0xab, 0xb7, 0xc7, 0xf7, 0x66, 0x92, 0xb7, 0xf3, 0xc0, 0x4d, 0x51, 0x4d,
0x50, 0x05, 0x29, 0x6a, 0x9d, 0x48, 0x91, 0x96, 0xc2, 0x1f, 0x2b, 0xd2, 0xc4, 0xab, 0xf1, 0x30,
0x4b, 0x35, 0xaa, 0xfa, 0xb6, 0x20, 0x41, 0x86, 0x05, 0x33, 0x35, 0xb7, 0xeb, 0x7b, 0x82, 0x48,
0x0c, 0x31, 0x88, 0xc6, 0x49, 0x10, 0x49, 0x49, 0x3a, 0xd2, 0x09, 0xc9, 0xc5, 0xb2, 0x57, 0x83,
0x8d, 0xde, 0xe2, 0x73, 0x57, 0x19, 0xaa, 0xa9, 0x77, 0x03, 0x6b, 0x05, 0xe0, 0xbb, 0x60, 0x65,
0x6a, 0xe8, 0xb0, 0x06, 0x6b, 0xda, 0x9d, 0x6a, 0xfe, 0xb1, 0x6f, 0x5d, 0x87, 0x97, 0xe1, 0x8c,
0xf1, 0x23, 0xb0, 0x07, 0xf8, 0xd4, 0x25, 0x79, 0x9f, 0x08, 0xa7, 0xd2, 0x60, 0xcd, 0xf5, 0x16,
0xf7, 0x17, 0x41, 0xfc, 0xb3, 0xc2, 0x09, 0x7f, 0x86, 0xbc, 0x53, 0xb0, 0x4b, 0xce, 0x5b, 0x00,
0x31, 0x49, 0x89, 0xb1, 0x26, 0x95, 0x3a, 0xac, 0x61, 0x2d, 0xed, 0x77, 0x0b, 0x2b, 0xfc, 0x35,
0xe5, 0xb5, 0xc1, 0x2e, 0x0d, 0xce, 0x61, 0x45, 0x46, 0x23, 0x9c, 0x67, 0x0b, 0x8d, 0x9e, 0x31,
0x3d, 0x1d, 0xa3, 0x89, 0x63, 0x87, 0x46, 0xb7, 0xee, 0xa0, 0x56, 0x3c, 0xa7, 0x87, 0x6a, 0x92,
0xc4, 0xc8, 0x2f, 0xc0, 0x3a, 0x47, 0xcd, 0x77, 0xca, 0xdf, 0x2d, 0x1d, 0xa0, 0xbe, 0xf5, 0x8f,
0x7b, 0xce, 0xf3, 0xfb, 0xd7, 0x4b, 0x85, 0xf3, 0x4d, 0x73, 0xc4, 0xc9, 0x71, 0xd9, 0x40, 0xe7,
0xe4, 0x35, 0x77, 0xd9, 0x5b, 0xee, 0xb2, 0xcf, 0xdc, 0x65, 0xb7, 0x07, 0x22, 0xd1, 0x0f, 0x59,
0xdf, 0x8f, 0x69, 0x14, 0x44, 0xca, 0x74, 0xf1, 0x68, 0xc4, 0x61, 0x3c, 0x08, 0xfe, 0xb4, 0xd8,
0x5f, 0x35, 0x05, 0xb4, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xef, 0x0e, 0xd5, 0xb9, 0xdf, 0x01,
0x00, 0x00,
}

View File

@@ -0,0 +1,116 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: server/settings/settings.proto
/*
Package settings is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package settings
import (
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_SettingsService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client SettingsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq SettingsQuery
var metadata runtime.ServerMetadata
msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterSettingsServiceHandlerFromEndpoint is same as RegisterSettingsServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterSettingsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterSettingsServiceHandler(ctx, mux, conn)
}
// RegisterSettingsServiceHandler registers the http handlers for service SettingsService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterSettingsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterSettingsServiceHandlerClient(ctx, mux, NewSettingsServiceClient(conn))
}
// RegisterSettingsServiceHandler registers the http handlers for service SettingsService to "mux".
// The handlers forward requests to the grpc endpoint over the given implementation of "SettingsServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SettingsServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "SettingsServiceClient" to call the correct interceptors.
func RegisterSettingsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SettingsServiceClient) error {
mux.Handle("GET", pattern_SettingsService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_SettingsService_Get_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_SettingsService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_SettingsService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "settings"}, ""))
)
var (
forward_SettingsService_Get_0 = runtime.ForwardResponseMessage
)

View File

@@ -0,0 +1,38 @@
syntax = "proto3";
option go_package = "github.com/argoproj/argo-cd/server/settings";
// Settings Service
//
// Settings Service API retrives ArgoCD settings
package cluster;
import "gogoproto/gogo.proto";
import "google/api/annotations.proto";
// SettingsQuery is a query for ArgoCD settings
message SettingsQuery {
}
message Settings {
string url = 1 [(gogoproto.customname) = "URL"];
DexConfig dexConfig = 2;
}
message DexConfig {
repeated Connector connectors = 1;
}
message Connector {
string name = 1;
string type = 2;
}
// SettingsService
service SettingsService {
// Get returns ArgoCD settings
rpc Get(SettingsQuery) returns (Settings) {
option (google.api.http).get = "/api/v1/settings";
}
}

View File

@@ -0,0 +1,187 @@
package e2e
import (
"strconv"
"testing"
"time"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
"k8s.io/apimachinery/pkg/api/errors"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
func TestAppManagement(t *testing.T) {
testApp := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Source: v1alpha1.ApplicationSource{
RepoURL: "https://github.com/argoproj/argo-cd.git", Path: ".", Environment: "minikube",
},
Destination: v1alpha1.ApplicationDestination{
Server: fixture.Config.Host,
Namespace: fixture.Namespace,
},
},
}
t.Run("TestAppCreation", func(t *testing.T) {
appName := "app-" + strconv.FormatInt(time.Now().Unix(), 10)
_, err := fixture.RunCli("app", "create",
"--name", appName,
"--repo", "https://github.com/argoproj/argo-cd.git",
"--env", "minikube",
"--path", ".",
"--dest-server", fixture.Config.Host,
"--dest-namespace", fixture.Namespace)
if err != nil {
t.Fatalf("Unable to create app %v", err)
}
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(appName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unable to get app %v", err)
}
assert.Equal(t, appName, app.Name)
assert.Equal(t, "https://github.com/argoproj/argo-cd.git", app.Spec.Source.RepoURL)
assert.Equal(t, "minikube", app.Spec.Source.Environment)
assert.Equal(t, ".", app.Spec.Source.Path)
assert.Equal(t, fixture.Namespace, app.Spec.Destination.Namespace)
assert.Equal(t, fixture.Config.Host, app.Spec.Destination.Server)
})
t.Run("TestAppDeletion", func(t *testing.T) {
app := fixture.CreateApp(t, testApp)
_, err := fixture.RunCli("app", "delete", app.Name)
if err != nil {
t.Fatalf("Unable to delete app %v", err)
}
_, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.Name, metav1.GetOptions{})
assert.NotNil(t, err)
assert.True(t, errors.IsNotFound(err))
})
t.Run("TestTrackAppStateAndSyncApp", func(t *testing.T) {
app := fixture.CreateApp(t, testApp)
WaitUntil(t, func() (done bool, err error) {
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status != v1alpha1.ComparisonStatusUnknown, err
})
// sync app and make sure it reaches InSync state
_, err := fixture.RunCli("app", "sync", app.Name)
if err != nil {
t.Fatalf("Unable to sync app %v", err)
}
WaitUntil(t, func() (done bool, err error) {
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status == v1alpha1.ComparisonStatusSynced, err
})
assert.Equal(t, v1alpha1.ComparisonStatusSynced, app.Status.ComparisonResult.Status)
assert.True(t, app.Status.OperationState.SyncResult != nil)
assert.True(t, app.Status.OperationState.Phase == v1alpha1.OperationSucceeded)
})
t.Run("TestAppRollbackSuccessful", func(t *testing.T) {
appWithHistory := testApp.DeepCopy()
// create app and ensure it's comparion status is not ComparisonStatusUnknown
app := fixture.CreateApp(t, appWithHistory)
app.Status.History = []v1alpha1.DeploymentInfo{{
ID: 1,
Revision: "abc",
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
}, {
ID: 2,
Revision: "cdb",
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
}}
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Update(app)
if err != nil {
t.Fatalf("Unable to update app %v", err)
}
WaitUntil(t, func() (done bool, err error) {
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status != v1alpha1.ComparisonStatusUnknown, err
})
// sync app and make sure it reaches InSync state
_, err = fixture.RunCli("app", "rollback", app.Name, "1")
if err != nil {
t.Fatalf("Unable to sync app %v", err)
}
WaitUntil(t, func() (done bool, err error) {
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status == v1alpha1.ComparisonStatusSynced, err
})
assert.Equal(t, v1alpha1.ComparisonStatusSynced, app.Status.ComparisonResult.Status)
assert.True(t, app.Status.OperationState.RollbackResult != nil)
assert.Equal(t, 2, len(app.Status.OperationState.RollbackResult.Resources))
assert.True(t, app.Status.OperationState.Phase == v1alpha1.OperationSucceeded)
assert.Equal(t, 3, len(app.Status.History))
})
t.Run("TestComparisonFailsIfClusterNotAdded", func(t *testing.T) {
invalidApp := testApp.DeepCopy()
invalidApp.Spec.Destination.Server = "https://not-registered-cluster/api"
app := fixture.CreateApp(t, invalidApp)
WaitUntil(t, func() (done bool, err error) {
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status != v1alpha1.ComparisonStatusUnknown, err
})
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unable to get app %v", err)
}
assert.Equal(t, v1alpha1.ComparisonStatusError, app.Status.ComparisonResult.Status)
})
t.Run("TestArgoCDWaitEnsureAppIsNotCrashing", func(t *testing.T) {
updatedApp := testApp.DeepCopy()
// deploy app and make sure it is healthy
app := fixture.CreateApp(t, updatedApp)
_, err := fixture.RunCli("app", "sync", app.Name)
if err != nil {
t.Fatalf("Unable to sync app %v", err)
}
WaitUntil(t, func() (done bool, err error) {
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status == v1alpha1.ComparisonStatusSynced && app.Status.Health.Status == v1alpha1.HealthStatusHealthy, err
})
// deploy app which fails and make sure it became unhealthy
app.Spec.Source.ComponentParameterOverrides = append(
app.Spec.Source.ComponentParameterOverrides,
v1alpha1.ComponentParameter{Name: "command", Value: "wrong-command", Component: "guestbook-ui"})
_, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Update(app)
if err != nil {
t.Fatalf("Unable to set app parameter %v", err)
}
_, err = fixture.RunCli("app", "sync", app.Name)
if err != nil {
t.Fatalf("Unable to sync app %v", err)
}
WaitUntil(t, func() (done bool, err error) {
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status == v1alpha1.ComparisonStatusSynced && app.Status.Health.Status == v1alpha1.HealthStatusDegraded, err
})
})
}

View File

@@ -1,70 +0,0 @@
package e2e
import (
"context"
"fmt"
"testing"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
func TestController(t *testing.T) {
testApp := &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{GenerateName: "e2e-test"},
Spec: v1alpha1.ApplicationSpec{Source: v1alpha1.ApplicationSource{
RepoURL: "https://github.com/ksonnet/ksonnet.git", Path: ".", Environment: "default",
}},
}
t.Run("TestComparisonErrorIfRepoDoesNotExist", func(t *testing.T) {
ctrl := fixture.CreateController()
ctx, cancel := context.WithCancel(context.Background())
go ctrl.Run(ctx, 1)
defer cancel()
app := fixture.CreateApp(t, testApp)
PollUntil(t, func() (done bool, err error) {
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status != v1alpha1.ComparisonStatusUnknown, err
})
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(fmt.Sprintf("Unable to get app %v", err))
}
assert.Equal(t, app.Status.ComparisonResult.Status, v1alpha1.ComparisonStatusError)
})
t.Run("TestComparisonFailsIfClusterNotAdded", func(t *testing.T) {
ctrl := fixture.CreateController()
ctx, cancel := context.WithCancel(context.Background())
go ctrl.Run(ctx, 1)
defer cancel()
_, err := fixture.ApiRepoService.Create(context.Background(), &v1alpha1.Repository{Repo: testApp.Spec.Source.RepoURL, Username: "", Password: ""})
if err != nil {
t.Fatal(fmt.Sprintf("Unable to create repo %v", err))
}
app := fixture.CreateApp(t, testApp)
PollUntil(t, func() (done bool, err error) {
app, err := fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
return err == nil && app.Status.ComparisonResult.Status != v1alpha1.ComparisonStatusUnknown, err
})
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(fmt.Sprintf("Unable to get app %v", err))
}
assert.Equal(t, app.Status.ComparisonResult.Status, v1alpha1.ComparisonStatusError)
})
}

View File

@@ -1,6 +1,8 @@
package e2e
import (
"context"
"encoding/json"
"fmt"
"log"
"net"
@@ -9,22 +11,35 @@ import (
"testing"
"time"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/install"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server/cluster"
apirepository "github.com/argoproj/argo-cd/server/repository"
"google.golang.org/grpc"
"k8s.io/api/core/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"strings"
"github.com/argoproj/argo-cd/cmd/argocd/commands"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
"github.com/argoproj/argo-cd/install"
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/server"
"github.com/argoproj/argo-cd/server/application"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/rbac"
"github.com/argoproj/argo-cd/util/settings"
)
const (
@@ -33,17 +48,18 @@ const (
// Fixture represents e2e tests fixture.
type Fixture struct {
Config *rest.Config
KubeClient kubernetes.Interface
ExtensionsClient apiextensionsclient.Interface
AppClient appclientset.Interface
ApiRepoService apirepository.RepositoryServiceServer
RepoClientset reposerver.Clientset
AppComparator controller.AppComparator
Namespace string
InstanceID string
repoServerGRPC *grpc.Server
repoServerListener net.Listener
Config *rest.Config
KubeClient kubernetes.Interface
ExtensionsClient apiextensionsclient.Interface
AppClient appclientset.Interface
DB db.ArgoDB
Namespace string
InstanceID string
RepoServerAddress string
ApiServerAddress string
Enforcer *rbac.Enforcer
tearDownCallback func()
}
func createNamespace(kubeClient *kubernetes.Clientset) (string, error) {
@@ -59,28 +75,153 @@ func createNamespace(kubeClient *kubernetes.Clientset) (string, error) {
return cns.Name, nil
}
func getFreePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer util.Close(l)
return l.Addr().(*net.TCPAddr).Port, nil
}
func (f *Fixture) setup() error {
installer, err := install.NewInstaller(f.Config, install.InstallOptions{})
if err != nil {
return err
}
listener, err := net.Listen("tcp", "127.0.0.1:0")
installer.InstallApplicationCRD()
cm := v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDRBACConfigMapName,
},
Data: map[string]string{
rbac.ConfigMapPolicyDefaultKey: "role:admin",
},
}
_, err = f.KubeClient.CoreV1().ConfigMaps(f.Namespace).Create(&cm)
if err != nil {
return err
}
f.repoServerListener = listener
settingsMgr := settings.NewSettingsManager(f.KubeClient, f.Namespace)
err = settingsMgr.SaveSettings(&settings.ArgoCDSettings{})
if err != nil {
return err
}
err = f.ensureClusterRegistered()
if err != nil {
return err
}
apiServerPort, err := getFreePort()
if err != nil {
return err
}
memCache := cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
repoServerGRPC := reposerver.NewServer(&FakeGitClientFactory{}, memCache).CreateGRPC()
repoServerListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return err
}
f.RepoServerAddress = repoServerListener.Addr().String()
f.ApiServerAddress = fmt.Sprintf("127.0.0.1:%d", apiServerPort)
apiServer := server.NewServer(server.ArgoCDServerOpts{
Namespace: f.Namespace,
AppClientset: f.AppClient,
DisableAuth: true,
Insecure: true,
KubeClientset: f.KubeClient,
RepoClientset: reposerver.NewRepositoryServerClientset(f.RepoServerAddress),
})
ctx, cancel := context.WithCancel(context.Background())
go func() {
err = f.repoServerGRPC.Serve(listener)
apiServer.Run(ctx, apiServerPort)
}()
installer.InstallApplicationCRD()
err = waitUntilE(func() (done bool, err error) {
clientset, err := f.NewApiClientset()
if err != nil {
return false, nil
}
conn, appClient, err := clientset.NewApplicationClient()
if err != nil {
return false, nil
}
defer util.Close(conn)
_, err = appClient.List(context.Background(), &application.ApplicationQuery{})
return err == nil, nil
})
ctrl := f.createController()
ctrlCtx, cancelCtrl := context.WithCancel(context.Background())
go ctrl.Run(ctrlCtx, 1, 1)
go func() {
err = repoServerGRPC.Serve(repoServerListener)
}()
f.tearDownCallback = func() {
cancel()
cancelCtrl()
repoServerGRPC.Stop()
}
return err
}
func (f *Fixture) ensureClusterRegistered() error {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := clientcmd.ConfigOverrides{}
clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
// Install RBAC resources for managing the cluster
managerBearerToken := common.InstallClusterManagerRBAC(conf)
clst := commands.NewCluster(f.Config.Host, conf, managerBearerToken)
clstCreateReq := cluster.ClusterCreateRequest{Cluster: clst}
_, err = cluster.NewServer(f.DB, f.Enforcer).Create(context.Background(), &clstCreateReq)
return err
}
// TearDown deletes fixture resources.
func (f *Fixture) TearDown() {
err := f.KubeClient.CoreV1().Namespaces().Delete(f.Namespace, &metav1.DeleteOptions{})
if err != nil {
f.repoServerGRPC.Stop()
if f.tearDownCallback != nil {
f.tearDownCallback()
}
apps, err := f.AppClient.ArgoprojV1alpha1().Applications(f.Namespace).List(metav1.ListOptions{})
if err == nil {
for _, app := range apps.Items {
if len(app.Finalizers) > 0 {
var patch []byte
patch, err = json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": make([]string, 0),
},
})
if err == nil {
_, err = f.AppClient.ArgoprojV1alpha1().Applications(app.Namespace).Patch(app.Name, types.MergePatchType, patch)
}
}
if err != nil {
break
}
}
}
if err == nil {
err = f.KubeClient.CoreV1().Namespaces().Delete(f.Namespace, &metav1.DeleteOptions{})
}
if err != nil {
println("Unable to tear down fixture")
@@ -101,29 +242,30 @@ func GetKubeConfig(configPath string, overrides clientcmd.ConfigOverrides) *rest
return restConfig
}
// NewFixture creates e2e tests fixture.
// NewFixture creates e2e tests fixture: ensures that Application CRD is installed, creates temporal namespace, starts repo and api server,
// configure currently available cluster.
func NewFixture() (*Fixture, error) {
config := GetKubeConfig("", clientcmd.ConfigOverrides{})
extensionsClient := apiextensionsclient.NewForConfigOrDie(config)
appClient := appclientset.NewForConfigOrDie(config)
kubeClient := kubernetes.NewForConfigOrDie(config)
namespace, err := createNamespace(kubeClient)
clusterService := cluster.NewServer(namespace, kubeClient, appClient)
repoServerGRPC := reposerver.NewServer(kubeClient, namespace).CreateGRPC(&FakeGitClient{})
if err != nil {
return nil, err
}
appComparator := controller.NewKsonnetAppComparator(clusterService)
db := db.NewDB(namespace, kubeClient)
enforcer := rbac.NewEnforcer(kubeClient, namespace, common.ArgoCDRBACConfigMapName, nil)
enforcer.SetDefaultRole("role:admin")
fixture := &Fixture{
Config: config,
ExtensionsClient: extensionsClient,
AppClient: appClient,
DB: db,
KubeClient: kubeClient,
Namespace: namespace,
InstanceID: namespace,
ApiRepoService: apirepository.NewServer(namespace, kubeClient, appClient),
AppComparator: appComparator,
repoServerGRPC: repoServerGRPC,
Enforcer: enforcer,
}
err = fixture.setup()
if err != nil {
@@ -134,6 +276,8 @@ func NewFixture() (*Fixture, error) {
// CreateApp creates application with appropriate controller instance id.
func (f *Fixture) CreateApp(t *testing.T, application *v1alpha1.Application) *v1alpha1.Application {
application = application.DeepCopy()
application.Name = fmt.Sprintf("e2e-test-%v", time.Now().Unix())
labels := application.ObjectMeta.Labels
if labels == nil {
labels = make(map[string]string)
@@ -141,6 +285,10 @@ func (f *Fixture) CreateApp(t *testing.T, application *v1alpha1.Application) *v1
}
labels[common.LabelKeyApplicationControllerInstanceID] = f.InstanceID
application.Spec.Source.ComponentParameterOverrides = append(
application.Spec.Source.ComponentParameterOverrides,
v1alpha1.ComponentParameter{Name: "name", Value: application.Name, Component: "guestbook-ui"})
app, err := f.AppClient.ArgoprojV1alpha1().Applications(f.Namespace).Create(application)
if err != nil {
t.Fatal(fmt.Sprintf("Unable to create app %v", err))
@@ -148,22 +296,51 @@ func (f *Fixture) CreateApp(t *testing.T, application *v1alpha1.Application) *v1
return app
}
// CreateController creates new controller instance
func (f *Fixture) CreateController() *controller.ApplicationController {
// createController creates new controller instance
func (f *Fixture) createController() *controller.ApplicationController {
appStateManager := controller.NewAppStateManager(
f.DB, f.AppClient, reposerver.NewRepositoryServerClientset(f.RepoServerAddress), f.Namespace)
appHealthManager := controller.NewAppHealthManager(f.DB, f.Namespace)
return controller.NewApplicationController(
f.Namespace,
f.KubeClient,
f.AppClient,
reposerver.NewRepositoryServerClientset(f.repoServerListener.Addr().String()),
f.ApiRepoService,
cluster.NewServer(f.Namespace, f.KubeClient, f.AppClient),
f.AppComparator,
time.Second,
f.DB,
appStateManager,
appHealthManager,
10*time.Second,
&controller.ApplicationControllerConfig{Namespace: f.Namespace, InstanceID: f.InstanceID})
}
// PollUntil periodically executes specified condition until it returns true.
func PollUntil(t *testing.T, condition wait.ConditionFunc) {
func (f *Fixture) NewApiClientset() (argocdclient.Client, error) {
return argocdclient.NewClient(&argocdclient.ClientOptions{
Insecure: true,
PlainText: true,
ServerAddr: f.ApiServerAddress,
})
}
func (f *Fixture) RunCli(args ...string) (string, error) {
args = append([]string{"run", "../../cmd/argocd/main.go"}, args...)
cmd := exec.Command("go", append(args, "--server", f.ApiServerAddress, "--plaintext")...)
outBytes, err := cmd.Output()
if err != nil {
exErr, ok := err.(*exec.ExitError)
if !ok {
return "", err
}
errOutput := string(exErr.Stderr)
if outBytes != nil {
errOutput = string(outBytes) + "\n" + errOutput
}
return "", fmt.Errorf(strings.TrimSpace(errOutput))
}
return string(outBytes), nil
}
func waitUntilE(condition wait.ConditionFunc) error {
stop := make(chan struct{})
isClosed := false
makeSureClosed := func() {
@@ -177,35 +354,66 @@ func PollUntil(t *testing.T, condition wait.ConditionFunc) {
time.Sleep(TestTimeout)
makeSureClosed()
}()
err := wait.PollUntil(time.Second, condition, stop)
return wait.PollUntil(time.Second, condition, stop)
}
// WaitUntil periodically executes specified condition until it returns true.
func WaitUntil(t *testing.T, condition wait.ConditionFunc) {
err := waitUntilE(condition)
if err != nil {
t.Fatal("Failed to wait for expected condition")
}
}
// FakeGitClient is a test git client implementation which always clone local test repo.
type FakeGitClient struct {
type FakeGitClientFactory struct{}
func (f *FakeGitClientFactory) NewClient(repoURL, path, username, password, sshPrivateKey string) git.Client {
return &FakeGitClient{
root: path,
}
}
func (c *FakeGitClient) CloneOrFetch(repo string, username string, password string, sshPrivateKey string, repoPath string) error {
_, err := exec.Command("rm", "-rf", repoPath).Output()
// FakeGitClient is a test git client implementation which always clone local test repo.
type FakeGitClient struct {
root string
}
func (c *FakeGitClient) Init() error {
_, err := exec.Command("rm", "-rf", c.root).Output()
if err != nil {
return err
}
_, err = exec.Command("cp", "-r", "../../examples/guestbook", repoPath).Output()
_, err = exec.Command("cp", "-r", "../../examples/guestbook", c.root).Output()
return err
}
func (c *FakeGitClient) Checkout(repoPath string, sha string) (string, error) {
// do nothing
return "latest", nil
func (c *FakeGitClient) Root() string {
return c.root
}
func (c *FakeGitClient) Reset(repoPath string) error {
func (c *FakeGitClient) Fetch() error {
// do nothing
return nil
}
func (c *FakeGitClient) CommitSHA(repoPath string) (string, error) {
func (c *FakeGitClient) Checkout(revision string) error {
// do nothing
return nil
}
func (c *FakeGitClient) Reset() error {
// do nothing
return nil
}
func (c *FakeGitClient) LsRemote(s string) (string, error) {
return "abcdef123456890", nil
}
func (c *FakeGitClient) LsFiles(s string) ([]string, error) {
return []string{"abcdef123456890"}, nil
}
func (c *FakeGitClient) CommitSHA() (string, error) {
return "abcdef123456890", nil
}

View File

@@ -0,0 +1,40 @@
package e2e
import (
"testing"
"context"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/util"
"github.com/stretchr/testify/assert"
)
func TestRepoManagement(t *testing.T) {
t.Run("TestAddRemovePublicRepo", func(t *testing.T) {
repoUrl := "https://github.com/argoproj/argo-cd.git"
_, err := fixture.RunCli("repo", "add", repoUrl)
assert.Nil(t, err)
clientSet, err := fixture.NewApiClientset()
assert.Nil(t, err)
conn, repoClient, err := clientSet.NewRepoClient()
assert.Nil(t, err)
defer util.Close(conn)
repo, err := repoClient.Get(context.Background(), &repository.RepoQuery{
Repo: repoUrl,
})
assert.Nil(t, err)
assert.Equal(t, repoUrl, repo.Repo)
_, err = fixture.RunCli("repo", "rm", repoUrl)
assert.Nil(t, err)
_, err = repoClient.Get(context.Background(), &repository.RepoQuery{
Repo: repoUrl,
})
assert.NotNil(t, err)
})
}

View File

@@ -1,23 +1,106 @@
package argo
import (
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver/repository"
"context"
"encoding/json"
"errors"
"fmt"
"time"
log "github.com/sirupsen/logrus"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/pkg/client/clientset/versioned/typed/application/v1alpha1"
)
// ResolveServerNamespace resolves server and namespace to use given an application spec,
// and a manifest response. It looks to explicit server/namespace overridden in the app CRD spec
// and falls back to the server/namespace defined in the ksonnet environment
func ResolveServerNamespace(destination *appv1.ApplicationDestination, manifestInfo *repository.ManifestResponse) (string, string) {
server := manifestInfo.Server
namespace := manifestInfo.Namespace
if destination != nil {
if destination.Server != "" {
server = destination.Server
// RefreshApp updates the refresh annotation of an application to coerce the controller to process it
func RefreshApp(appIf v1alpha1.ApplicationInterface, name string) (*argoappv1.Application, error) {
refreshString := time.Now().UTC().Format(time.RFC3339)
metadata := map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]string{
common.AnnotationKeyRefresh: refreshString,
},
},
"status": map[string]interface{}{
"comparisonResult": map[string]interface{}{
"comparedAt": nil,
},
},
}
var err error
patch, err := json.Marshal(metadata)
if err != nil {
return nil, err
}
for attempt := 0; attempt < 5; attempt++ {
app, err := appIf.Patch(name, types.MergePatchType, patch)
if err != nil {
if !apierr.IsConflict(err) {
return nil, err
}
} else {
log.Infof("Refreshed app '%s' for controller reprocessing (%s)", name, refreshString)
return app, nil
}
if destination.Namespace != "" {
namespace = destination.Namespace
time.Sleep(100 * time.Millisecond)
}
return nil, err
}
// WaitForRefresh watches a workflow until its comparison timestamp is after the refresh timestamp
func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *time.Duration) (*argoappv1.Application, error) {
ctx := context.Background()
var cancel context.CancelFunc
if timeout != nil {
ctx, cancel = context.WithTimeout(ctx, *timeout)
defer cancel()
}
fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name))
listOpts := metav1.ListOptions{FieldSelector: fieldSelector.String()}
watchIf, err := appIf.Watch(listOpts)
if err != nil {
return nil, err
}
defer watchIf.Stop()
for {
select {
case <-ctx.Done():
err := ctx.Err()
if err != nil {
if err == context.DeadlineExceeded {
return nil, fmt.Errorf("Timed out (%v) waiting for application to refresh", timeout)
}
return nil, fmt.Errorf("Error waiting for refresh: %v", err)
}
return nil, fmt.Errorf("Application watch on %s closed", name)
case next := <-watchIf.ResultChan():
if next.Type == watch.Error {
errMsg := "Application watch completed with error"
if status, ok := next.Object.(*metav1.Status); ok {
errMsg = fmt.Sprintf("%s: %v", errMsg, status)
}
return nil, errors.New(errMsg)
}
app, ok := next.Object.(*argoappv1.Application)
if !ok {
return nil, fmt.Errorf("Application event object failed conversion: %v", next)
}
refreshTimestampStr := app.ObjectMeta.Annotations[common.AnnotationKeyRefresh]
refreshTimestamp, err := time.Parse(time.RFC3339, refreshTimestampStr)
if err != nil {
return nil, fmt.Errorf("Unable to parse '%s': %v", common.AnnotationKeyRefresh, err)
}
if app.Status.ComparisonResult.ComparedAt.After(refreshTimestamp) {
return app, nil
}
}
}
return server, namespace
}

61
util/argo/argo_test.go Normal file
View File

@@ -0,0 +1,61 @@
package argo
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
testcore "k8s.io/client-go/testing"
)
func TestRefreshApp(t *testing.T) {
var testApp argoappv1.Application
testApp.Name = "test-app"
testApp.Namespace = "default"
appClientset := appclientset.NewSimpleClientset(&testApp)
appIf := appClientset.ArgoprojV1alpha1().Applications("default")
_, err := RefreshApp(appIf, "test-app")
assert.Nil(t, err)
// For some reason, the fake Application inferface doesn't reflect the patch status after Patch(),
// so can't verify it was set in unit tests.
//_, ok := newApp.Annotations[common.AnnotationKeyRefresh]
//assert.True(t, ok)
}
func TestWaitForRefresh(t *testing.T) {
appClientset := appclientset.NewSimpleClientset()
// Verify timeout
appIf := appClientset.ArgoprojV1alpha1().Applications("default")
oneHundredMs := 100 * time.Millisecond
app, err := WaitForRefresh(appIf, "test-app", &oneHundredMs)
assert.NotNil(t, err)
assert.Nil(t, app)
assert.Contains(t, strings.ToLower(err.Error()), "timed out")
// Verify success
var testApp argoappv1.Application
testApp.Name = "test-app"
testApp.Namespace = "default"
testApp.ObjectMeta.Annotations = map[string]string{
common.AnnotationKeyRefresh: time.Now().UTC().Format(time.RFC3339),
}
testApp.Status.ComparisonResult.ComparedAt = metav1.Time{Time: time.Now().UTC()}
appClientset = appclientset.NewSimpleClientset()
appIf = appClientset.ArgoprojV1alpha1().Applications("default")
watcher := watch.NewFake()
appClientset.PrependWatchReactor("applications", testcore.DefaultWatchReactor(watcher, nil))
// simulate add/update/delete watch events
go watcher.Add(&testApp)
app, err = WaitForRefresh(appIf, "test-app", &oneHundredMs)
assert.Nil(t, err)
assert.NotNil(t, app)
}

20
util/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,20 @@
package cache
import (
"errors"
"time"
)
var ErrCacheMiss = errors.New("cache: key is missing")
type Item struct {
Key string
Object interface{}
// Expiration is the cache expiration time.
Expiration time.Duration
}
type Cache interface {
Set(item *Item) error
Get(key string, obj interface{}) error
}

34
util/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,34 @@
package cache
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
type testStruct struct {
Foo string
Bar []byte
}
func TestCache(t *testing.T) {
c := NewInMemoryCache(time.Hour)
var obj testStruct
err := c.Get("key", &obj)
assert.Equal(t, err, ErrCacheMiss)
cacheObj := testStruct{
Foo: "foo",
Bar: []byte("bar"),
}
c.Set(&Item{
Key: "key",
Object: &cacheObj,
})
cacheObj.Foo = "baz"
err = c.Get("key", &obj)
assert.Nil(t, err)
assert.EqualValues(t, string(obj.Foo), "foo")
assert.EqualValues(t, string(obj.Bar), "bar")
}

38
util/cache/inmemory.go vendored Normal file
View File

@@ -0,0 +1,38 @@
package cache
import (
"bytes"
"encoding/gob"
"time"
gocache "github.com/patrickmn/go-cache"
)
func NewInMemoryCache(expiration time.Duration) Cache {
return &inMemoryCache{
memCache: gocache.New(expiration, 1*time.Minute),
}
}
type inMemoryCache struct {
memCache *gocache.Cache
}
func (i *inMemoryCache) Set(item *Item) error {
var buf bytes.Buffer
err := gob.NewEncoder(&buf).Encode(item.Object)
if err != nil {
return err
}
i.memCache.Set(item.Key, buf, item.Expiration)
return nil
}
func (i *inMemoryCache) Get(key string, obj interface{}) error {
bufIf, found := i.memCache.Get(key)
if !found {
return ErrCacheMiss
}
buf := bufIf.(bytes.Buffer)
return gob.NewDecoder(&buf).Decode(obj)
}

49
util/cache/redis.go vendored Normal file
View File

@@ -0,0 +1,49 @@
package cache
import (
"time"
rediscache "github.com/go-redis/cache"
"github.com/go-redis/redis"
"github.com/vmihailenco/msgpack"
)
func NewRedisCache(client *redis.Client, expiration time.Duration) Cache {
return &redisCache{
expiration: expiration,
codec: &rediscache.Codec{
Redis: client,
Marshal: func(v interface{}) ([]byte, error) {
return msgpack.Marshal(v)
},
Unmarshal: func(b []byte, v interface{}) error {
return msgpack.Unmarshal(b, v)
},
},
}
}
type redisCache struct {
expiration time.Duration
codec *rediscache.Codec
}
func (r *redisCache) Set(item *Item) error {
expiration := item.Expiration
if expiration == 0 {
expiration = r.expiration
}
return r.codec.Set(&rediscache.Item{
Key: item.Key,
Object: item.Object,
Expiration: expiration,
})
}
func (r *redisCache) Get(key string, obj interface{}) error {
err := r.codec.Get(key, obj)
if err == rediscache.ErrCacheMiss {
return ErrCacheMiss
}
return err
}

View File

@@ -55,24 +55,53 @@ func AddKubectlFlagsToCmd(cmd *cobra.Command) clientcmd.ClientConfig {
return clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
}
// PromptCredentials is a helper to prompt the user for a username and password
// PromptCredentials is a helper to prompt the user for a username and password (unless already supplied)
func PromptCredentials(username, password string) (string, string) {
for username == "" {
return PromptUsername(username), PromptPassword(password)
}
// PromptUsername prompts the user for a username value
func PromptUsername(username string) string {
return PromptMessage("Username", username)
}
// PromptMessage prompts the user for a value (unless already supplied)
func PromptMessage(message, value string) string {
for value == "" {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Username: ")
usernameRaw, err := reader.ReadString('\n')
fmt.Print(message + ": ")
valueRaw, err := reader.ReadString('\n')
errors.CheckError(err)
username = strings.TrimSpace(usernameRaw)
value = strings.TrimSpace(valueRaw)
}
return value
}
// PromptPassword prompts the user for a password, without local echo. (unless already supplied)
func PromptPassword(password string) string {
for password == "" {
fmt.Print("Password: ")
passwordRaw, err := terminal.ReadPassword(syscall.Stdin)
errors.CheckError(err)
password = string(passwordRaw)
if password == "" {
fmt.Print("\n")
fmt.Print("\n")
}
return password
}
// AskToProceed prompts the user with a message (typically a yes or no question) and returns whether
// or not they responded in the affirmative or negative.
func AskToProceed(message string) bool {
for {
fmt.Print(message)
reader := bufio.NewReader(os.Stdin)
proceedRaw, err := reader.ReadString('\n')
errors.CheckError(err)
switch strings.ToLower(strings.TrimSpace(proceedRaw)) {
case "y", "yes":
return true
case "n", "no":
return false
}
}
fmt.Print("\n")
return username, password
}

Some files were not shown because too many files have changed in this diff Show More