Compare commits

...

52 Commits

Author SHA1 Message Date
Tom Wieczorek
5732f19d20 Update to 0.10.6 also in kustomization (#816) 2018-11-26 14:36:57 -08:00
Jesse Suen
0dfcd2f7d8 Update version to v0.10.6 2018-11-14 17:20:31 -08:00
Jesse Suen
7e49cff7e9 Fix issue preventing in-cluster app sync due to go-client changes (issue #774) 2018-11-14 17:17:08 -08:00
Conor Fennell
5d185b6584 add argo cluster permission to view logs (#766) 2018-11-13 18:47:49 -08:00
Jesse Suen
7bfa374b40 Bump up the default status/operation processors to 20/10 respectively 2018-11-13 18:18:21 -08:00
Jesse Suen
6c20e0f7d7 Bump version to v0.10.5 2018-11-13 15:28:02 -08:00
Jesse Suen
93d2dd7ed0 Update dependencies to k8s v1.12 and client-go v9.0 (#729)
Update dependencies to k8s v1.12 and client-go v9.0 (resolves #353)
Fix issue where applications could not be deleted on k8s v1.12 (resolves #718)
Refactor k8s dynamic resource libraries to promote code reuse
2018-11-13 14:59:12 -08:00
Jesse Suen
89ece31762 Update version to v0.10.4 2018-11-07 17:25:16 -08:00
Jesse Suen
d565a0a119 Health check is not discerning apiVersion when assessing CRDs (issue #753) (#754) 2018-11-07 17:24:20 -08:00
Mario Duarte
1e2b554f01 Fix nil pointer dereference in util/health (#723) 2018-11-07 17:24:14 -08:00
Alessandro Marrella
8cb2f5d7e4 Updated helm (#749) 2018-11-07 16:35:16 -08:00
Alexander Matyushentsev
c5814d5946 Update version to v0.10.3 2018-10-28 23:31:08 -07:00
Alexander Matyushentsev
a4a81d1de9 Fix applying TLS version settings (#731) 2018-10-28 23:30:00 -07:00
Tom Wieczorek
cb27cec021 Update to kustomize 1.0.10 (#728) (#728)
See also kubernetes-sigs/kustomize#514
2018-10-28 22:47:42 -07:00
Alexander Matyushentsev
e13e13e7ae Update manifests to v0.10.2 2018-10-25 11:52:17 -07:00
Tom Wieczorek
88d41f8efa Update to kustomize 1.0.9 (#722) 2018-10-25 11:51:22 -07:00
dthomson25
dbe09104a1 Fix app refresh err when k8s patch is too slow (#724) 2018-10-25 11:51:18 -07:00
Alexander Matyushentsev
6a18870ec1 Update manifests to v0.10.1 2018-10-24 12:57:02 -07:00
Jesse Suen
ca9f992fc2 Update version to v0.10.1 2018-10-24 11:40:52 -07:00
Jesse Suen
063ff34f00 Handle case where OIDC settings become invalid after dex server restart (issue #710) (#715) 2018-10-24 11:39:53 -07:00
Jesse Suen
a9980c3025 git clean also needs to clean files under gitignore (issue #711) (#712) 2018-10-19 22:11:25 -07:00
Alexander Matyushentsev
3f5967c83e Update manifests to v0.10.0 2018-10-19 14:29:37 -07:00
Jesse Suen
22b77f5b34 RBAC for cluster wide install was missing permissions to list events across namespaces (resolves #704) (#705) 2018-10-19 00:04:03 -07:00
Alexander Matyushentsev
c76db90437 Issue #628 - Remove RollbackOperation in favor of Sync with ParameterOverrides (#706) 2018-10-18 20:23:22 -07:00
Alexander Matyushentsev
080f7ff4e0 Add 0.10 changelog (#700) 2018-10-18 18:21:49 -07:00
Alexander Matyushentsev
c5730c8f5f Issue #672 - Metrics endpoint not reachable through the metrics kubernetes service (#692) 2018-10-17 09:44:12 -07:00
Alexander Matyushentsev
d46f284d9f Issue #690 - Increase GRPC message limit (#691) 2018-10-16 16:34:26 -07:00
Alexander Matyushentsev
221f19ae15 Add argocd-util cluster-kubeconfig command (#689) 2018-10-16 16:17:58 -07:00
Alexander Matyushentsev
550cb277df Issue #686 - Resource is always out of sync if it has only 'ksonnet.io/component' label (#688) 2018-10-15 12:57:30 -07:00
Alexander Matyushentsev
9c79af9340 Issue #682 - Operation stuck in 'in progress' state if application has no resources (#684) 2018-10-11 11:18:17 -04:00
Andrew Merenbach
1ba52c8880 Allow more fine-grained sync (closes #508) (#666) 2018-10-10 10:12:20 -07:00
Andrew Merenbach
92629067f7 Upgrade testify (#667)
* Update Gopkg.toml

* Update Gopkg.lock
2018-10-08 11:01:00 -07:00
Alexander Matyushentsev
93a808e65a Issue #627 - Cluster watch needs to be restarted when CRDs get created (#678) 2018-10-05 13:18:12 -04:00
Alexander Matyushentsev
bf99b251f8 Issue #679 - Default project is created without permission to deploy cluster level resources (#680) 2018-10-05 11:42:54 -04:00
Alexander Matyushentsev
f491540636 Issue #426 - Support public not-connected repo in app creation UI (#675) 2018-10-04 12:46:39 -04:00
dthomson25
7f84f7d541 Add project get permission automatically to roles (#665) 2018-10-01 12:44:06 -07:00
Alexander Matyushentsev
42b01f7126 Add v0.9.2 changelog (#662) 2018-09-28 13:14:06 -04:00
Andrew Merenbach
7e5c17939b Add errgroup dependency for Packr (#648) 2018-09-27 18:27:09 -07:00
Alexander Matyushentsev
d6937ec629 Issue #650 - Temporary ignore service catalog resources (#661) 2018-09-27 20:58:45 -04:00
Andrew Merenbach
f5a32f47d3 Update generated files (#660) 2018-09-27 13:07:16 -07:00
Jesse Suen
316fcc6126 Fix issue where argocd-server logged credentials in plain text during repo add (issue #653) 2018-09-27 12:48:23 -07:00
Jesse Suen
e163177a12 Switch to go-git for all remote git interactions including auth (issue #651) 2018-09-27 12:48:23 -07:00
Jesse Suen
1fe257c71e Do not append .git extension during normalization for Azure hosted git (issue #643) (#645) 2018-09-27 11:54:04 -07:00
Andrew Merenbach
1eaa813f28 Use ksonnet CLI instead of ksonnet libs (#590) (#626) 2018-09-27 11:52:08 -07:00
dthomson25
924dad8980 Normalize policies by always adding space after comma (#659) 2018-09-27 11:24:48 -07:00
dthomson25
1ba10a1a20 Remove default params from app history (#649) 2018-09-27 11:24:25 -07:00
Stephen Haynes
ab02e10791 update to kustomize 1.0.8 (#644) 2018-09-26 14:24:59 -07:00
Jesse Suen
dd94e5e5c3 Add version check during release to ensure compiled version is accurate (#646) 2018-09-26 07:40:42 -07:00
Jesse Suen
1fcb90c4d9 Documentation clarifications and fixes (#642) 2018-09-25 08:01:41 -07:00
Alexander Matyushentsev
523c7ddf82 Update getting_started.md with new version; update releasing steps (#641) 2018-09-24 15:43:06 -07:00
Jesse Suen
3577a68d2d Update documentation with auto-sync and projects (issue #521) (#616) 2018-09-24 15:27:30 -07:00
Alexander Matyushentsev
d963f5fcc5 Issue #639 - Repo server unable to execute ls-remote for private repos (#640) 2018-09-24 14:20:52 -07:00
79 changed files with 2648 additions and 1964 deletions

View File

@@ -1,18 +1,78 @@
# Changelog
## v0.9.0
## v0.10.0 (TBD)
### Changes since v0.9:
+ Allow more fine-grained sync (issue #508)
+ Display init container logs (issue #681)
+ Redirect to /auth/login instead of /login when SSO token is used for authenticaion (issue #348)
+ Support ability to use a helm values files from a URL (issue #624)
+ Support public not-connected repo in app creation UI (issue #426)
+ Use ksonnet CLI instead of ksonnet libs (issue #626)
+ We should be able to select the order of the `yaml` files while creating a Helm App (#664)
* Remove default params from app history (issue #556)
* Update to ksonnet v0.13.0
* Update to kustomize 1.0.8
- API Server fails to return apps due to grpc max message size limit (issue #690)
- App Creation UI for Helm Apps shows only files prefixed with `values-` (issue #663)
- App creation UI should allow specifying values files outside of helm app directory bug (issue #658)
- argocd-server logs credentials in plain text when adding git repositories (issue #653)
- Azure Repos do not work as a repository (issue #643)
- Better update conflict error handing during app editing (issue #685)
- Cluster watch needs to be restarted when CRDs get created (issue #627)
- Credentials not being accepted for Google Source Repositories (issue #651)
- Default project is created without permission to deploy cluster level resources (issue #679)
- Generate role token click resets policy changes (issue #655)
- Input type text instead of password on Connect repo panel (issue #693)
- Metrics endpoint not reachable through the metrics kubernetes service (issue #672)
- Operation stuck in 'in progress' state if application has no resources (issue #682)
- Project should influence options for cluster and namespace during app creation (issue #592)
- Repo server unable to execute ls-remote for private repos (issue #639)
- Resource is always out of sync if it has only 'ksonnet.io/component' label (issue #686)
- Resource nodes are 'jumping' on app details page (issue #683)
- Sync always suggest using latest revision instead of target UI bug (issue #669)
- Temporary ignore service catalog resources (issue #650)
## v0.9.2 (2018-09-28)
* Update to kustomize 1.0.8
- Fix issue where argocd-server logged credentials in plain text during repo add (issue #653)
- Credentials not being accepted for Google Source Repositories (issue #651)
- Azure Repos do not work as a repository (issue #643)
- Temporary ignore service catalog resources (issue #650)
- Normalize policies by always adding space after comma
## v0.9.1 (2018-09-24)
- Repo server unable to execute ls-remote for private repos (issue #639)
## v0.9.0 (2018-09-24)
### Notes about upgrading from v0.8
* The `server.crt` and `server.key` fields of `argocd-secret` had been renamed to `tls.crt` and `tls.key` for
better integration with cert manager(issue #617). Existing `argocd-secret` should be updated accordingly to
preserve existing TLS certificate.
* Cluster wide resources should be allowed in default project (due to issue #330):
```
argocd project allow-cluster-resource default '*' '*'
```
* Projects now provide the ability to allow or deny deployments of cluster-scoped resources
(e.g. Namespaces, ClusterRoles, CustomResourceDefinitions). When upgrading from v0.8 to v0.9, to
match the behavior of v0.8 (which did not have restrictions on deploying resources) and continue to
allow deployment of cluster-scoped resources, an additional command should be run:
```bash
argocd proj allow-cluster-resource default '*' '*'
```
The above command allows the `default` project to deploy any cluster-scoped resources which matches
the behavior of v0.8.
* The secret keys in the argocd-secret containing the TLS certificate and key, has been renamed from
`server.crt` and `server.key` to the standard `tls.crt` and `tls.key` keys. This enables ArgoCD
to integrate better with Ingress and cert-manager. When upgrading to v0.9, the `server.crt` and
`server.key` keys in argocd-secret should be renamed to the new keys.
### Changes since v0.8:
+ Auto-sync option in application CRD instance (issue #79)
+ Support raw jsonnet as an application source (issue #540)
@@ -42,6 +102,10 @@ argocd project allow-cluster-resource default '*' '*'
- Fix issue where changes were not pulled when tracking a branch (issue #567)
- Lazy enforcement of unknown cluster/namespace restricted resources (issue #599)
- Fix controller hot loop when app source contains bad manifests (issue #568)
- Fix issue where ArgoCD fails to deploy when resources are in a K8s list format (issue #584)
- Fix comparison failure when app contains unregistered custom resource (issue #583)
- Fix issue where helm hooks were being deployed as part of sync (issue #605)
- Fix race conditions in kube.GetResourcesWithLabel and DeleteResourceWithLabel (issue #587)
- [UI] Fix issue where projects filter does not work when application got changed
- [UI] Creating apps from directories is not obvious (issue #565)
- Helm hooks are being deployed as resources (issue #605)

View File

@@ -49,19 +49,19 @@ RUN curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kuberne
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /usr/local/bin/ks
# Option 2: use official tagged ksonnet release
ENV KSONNET_VERSION=0.11.0
ENV KSONNET_VERSION=0.13.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks
# Install helm
ENV HELM_VERSION=2.9.1
ENV HELM_VERSION=2.11.0
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /usr/local/bin/helm
# Install kustomize
ENV KUSTOMIZE_VERSION=1.0.7
ENV KUSTOMIZE_VERSION=1.0.10
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/kustomize

237
Gopkg.lock generated
View File

@@ -9,16 +9,6 @@
revision = "767c40d6a2e058483c25fa193e963a22da17236d"
version = "v0.18.0"
[[projects]]
digest = "1:6204a59b379aadf05380cf8cf3ae0f5867588ba028fe84f260312a79ae717272"
name = "github.com/GeertJohan/go.rice"
packages = [
".",
"embedded",
]
pruneopts = ""
revision = "c02ca9a983da5807ddf7d796784928f5be4afd09"
[[projects]]
digest = "1:8ec1618fc3ee146af104d6c13be250f25e5976e34557d4afbfe4b28035ce6c05"
name = "github.com/Knetic/govaluate"
@@ -43,15 +33,15 @@
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
[[projects]]
digest = "1:0c024ed5f8ee58bb5bcafcc1d55678cbaec13884a9798eaadfe6ca0d16ef9392"
branch = "master"
digest = "1:0caf9208419fa5db5a0ca7112affaa9550c54291dda8e2abac0c0e76181c959e"
name = "github.com/argoproj/argo"
packages = [
"pkg/apis/workflow",
"pkg/apis/workflow/v1alpha1",
]
pruneopts = ""
revision = "af636ddd8455660f307d835814d3112b90815dfd"
version = "v2.2.0"
revision = "7ef1cea68c94f7f0e1e2f8bd75bedc5a7df8af90"
[[projects]]
branch = "master"
@@ -80,14 +70,6 @@
pruneopts = ""
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:79421244ba5848aae4b0a5c41e633a04e4894cb0b164a219dc8c15ec7facb7f1"
name = "github.com/blang/semver"
packages = ["."]
pruneopts = ""
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
version = "v3.5.1"
[[projects]]
digest = "1:e04162bd6a6d4950541bae744c968108e14913b1cebccf29f7650b573f44adb3"
name = "github.com/casbin/casbin"
@@ -122,14 +104,6 @@
pruneopts = ""
revision = "1180514eaf4d9f38d0d19eef639a1d695e066e72"
[[projects]]
branch = "master"
digest = "1:5fd5c4d4282935b7a575299494f2c09e9d2cacded7815c83aff7c1602aff3154"
name = "github.com/daaku/go.zipexe"
packages = ["."]
pruneopts = ""
revision = "a5fe2436ffcb3236e175e5149162b41cd28bd27d"
[[projects]]
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
name = "github.com/davecgh/go-spew"
@@ -380,6 +354,14 @@
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6"
name = "github.com/google/btree"
packages = ["."]
pruneopts = ""
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
digest = "1:14d826ee25139b4674e9768ac287a135f4e7c14e1134a5b15e4e152edfd49f41"
name = "github.com/google/go-jsonnet"
@@ -411,6 +393,17 @@
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0"
[[projects]]
branch = "master"
digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache",
]
pruneopts = ""
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
[[projects]]
branch = "master"
digest = "1:9dca8c981b8aed7448d94e78bc68a76784867a38b3036d5aabc0b32d92ffd1f4"
@@ -459,14 +452,6 @@
pruneopts = ""
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
branch = "master"
digest = "1:f81c8d7354cc0c6340f2f7a48724ee6c2b3db3e918ecd441c985b4d2d97dd3e7"
name = "github.com/howeyc/gopass"
packages = ["."]
pruneopts = ""
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
digest = "1:23bc0b496ba341c6e3ba24d6358ff4a40a704d9eb5f9a3bd8e8fbd57ad869013"
name = "github.com/imdario/mergo"
@@ -492,20 +477,11 @@
revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4"
[[projects]]
digest = "1:dd5cdbd84daf24b2a009364f3c24859b1e4de1eab87c451fb3bce09935d909fc"
digest = "1:31c6f3c4f1e15fcc24fcfc9f5f24603ff3963c56d6fa162116493b4025fb6acc"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = ""
revision = "e7c7f3b33712573affdcc7a107218e7926b9a05b"
version = "1.0.6"
[[projects]]
branch = "master"
digest = "1:2c5ad58492804c40bdaf5d92039b0cde8b5becd2b7feeb37d7d1cc36a8aa8dbe"
name = "github.com/kardianos/osext"
packages = ["."]
pruneopts = ""
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
digest = "1:41e0bed5df4f9fd04c418bf9b6b7179b3671e416ad6175332601ca1c8dc74606"
@@ -516,42 +492,12 @@
version = "0.5"
[[projects]]
digest = "1:2fe45da14d25bce0a58c5a991967149cc5d07f94be327b928a9fd306466815a3"
name = "github.com/ksonnet/ksonnet"
packages = [
"metadata/params",
"pkg/app",
"pkg/component",
"pkg/docparser",
"pkg/lib",
"pkg/log",
"pkg/node",
"pkg/params",
"pkg/prototype",
"pkg/schema",
"pkg/util/jsonnet",
"pkg/util/kslib",
"pkg/util/strings",
]
branch = "master"
digest = "1:448b4a6e39e46d8740b00dc871f26d58dc39341b160e01267b7917132831a136"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = ""
revision = "e943ae55d4fe256c8330a047ce8426ad9dac110c"
version = "v0.11.0"
[[projects]]
digest = "1:a345c560e5609bd71b1f54993f3b087ca45eb0e6226886c642ce519de81896cb"
name = "github.com/ksonnet/ksonnet-lib"
packages = [
"ksonnet-gen/astext",
"ksonnet-gen/jsonnet",
"ksonnet-gen/ksonnet",
"ksonnet-gen/kubespec",
"ksonnet-gen/kubeversion",
"ksonnet-gen/nodemaker",
"ksonnet-gen/printer",
]
pruneopts = ""
revision = "83f20ee933bcd13fcf4ad1b49a40c92135c5569c"
version = "v0.1.10"
revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5"
[[projects]]
branch = "master"
@@ -589,6 +535,22 @@
pruneopts = ""
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
[[projects]]
digest = "1:0c0ff2a89c1bb0d01887e1dac043ad7efbf3ec77482ef058ac423d13497e16fd"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = ""
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = ""
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
digest = "1:4c0404dc03d974acd5fcd8b8d3ce687b13bd169db032b89275e8b9d77b98ce8c"
name = "github.com/patrickmn/go-cache"
@@ -605,6 +567,22 @@
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
pruneopts = ""
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f"
name = "github.com/peterbourgon/diskv"
packages = ["."]
pruneopts = ""
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
name = "github.com/pkg/errors"
@@ -693,11 +671,12 @@
version = "v1.0.0"
[[projects]]
digest = "1:c92f01303e3ab3b5da92657841639cb53d1548f0d2733d12ef3b9fd9d47c869e"
digest = "1:01d968ff6535945510c944983eee024e81f1c949043e9bbfe5ab206ebc3588a4"
name = "github.com/sirupsen/logrus"
packages = ["."]
pruneopts = ""
revision = "ea8897e79973357ba785ac2533559a6297e83c44"
revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d"
version = "v1.1.0"
[[projects]]
branch = "master"
@@ -715,16 +694,6 @@
revision = "e09e9389d85d8492d313d73d1469c029e710623f"
version = "v0.1.4"
[[projects]]
digest = "1:a35a4db30a6094deac33fdb99de9ed99fefc39a7bf06b57d9f04bcaa425bb183"
name = "github.com/spf13/afero"
packages = [
".",
"mem",
]
pruneopts = ""
revision = "9be650865eab0c12963d8753212f4f9c66cdcf12"
[[projects]]
digest = "1:2208a80fc3259291e43b30f42f844d18f4218036dff510f42c653ec9890d460a"
name = "github.com/spf13/cobra"
@@ -762,15 +731,15 @@
version = "v0.1"
[[projects]]
digest = "1:a30066593578732a356dc7e5d7f78d69184ca65aeeff5939241a3ab10559bb06"
digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock",
]
pruneopts = ""
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
digest = "1:51cf0fca93f4866709ceaf01b750e51d997c299a7bd2edf7ccd79e3b428754ae"
@@ -867,6 +836,14 @@
pruneopts = ""
revision = "cce311a261e6fcf29de72ca96827bdb0b7d9c9e6"
[[projects]]
branch = "master"
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
name = "golang.org/x/sync"
packages = ["errgroup"]
pruneopts = ""
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
digest = "1:ed900376500543ca05f2a2383e1f541b4606f19cd22f34acb81b17a0b90c7f3e"
@@ -1112,8 +1089,8 @@
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[[projects]]
branch = "release-1.10"
digest = "1:5beb32094452970c0d73a2bdacd79aa9cfaa4947a774d521c1bed4b4c2705f15"
branch = "release-1.12"
digest = "1:ed04c5203ecbf6358fb6a774b0ecd40ea992d6dcc42adc1d3b7cf9eceb66b6c8"
name = "k8s.io/api"
packages = [
"admission/v1beta1",
@@ -1128,10 +1105,12 @@
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"autoscaling/v2beta2",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"coordination/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
@@ -1142,17 +1121,18 @@
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"scheduling/v1beta1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1",
]
pruneopts = ""
revision = "8b7507fac302640dd5f1efbf9643199952cc58db"
revision = "475331a8afff5587f47d0470a93f79c60c573c03"
[[projects]]
branch = "release-1.10"
digest = "1:7cb811fe9560718bd0ada29f2091acab5c4b4380ed23ef2824f64ce7038d899e"
branch = "release-1.12"
digest = "1:39be82077450762b5e14b5268e679a14ac0e9c7d3286e2fcface437556a29e4c"
name = "k8s.io/apiextensions-apiserver"
packages = [
"pkg/apis/apiextensions",
@@ -1162,20 +1142,17 @@
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
]
pruneopts = ""
revision = "b13a681559816a9c14f93086bbeeed1c7baf2bcb"
revision = "ca1024863b48cf0701229109df75ac5f0bb4907e"
[[projects]]
branch = "release-1.10"
digest = "1:b9c6e8e91bab6a419c58a63377532782a9f5616552164c38a9527f91c9309bbe"
branch = "release-1.12"
digest = "1:5899da40e41bcc8c1df101b72954096bba9d85b763bc17efc846062ccc111c7b"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/equality",
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apimachinery",
"pkg/apimachinery/announced",
"pkg/apimachinery/registered",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
@@ -1202,6 +1179,7 @@
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/naming",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
@@ -1216,11 +1194,11 @@
"third_party/forked/golang/reflect",
]
pruneopts = ""
revision = "f6313580a4d36c7c74a3d845dda6e116642c4f90"
revision = "f71dbbc36e126f5a371b85f6cca96bc8c57db2b6"
[[projects]]
branch = "release-7.0"
digest = "1:3a45889089f89cc371fb45b3f8a478248b755e4af17a8cf592e49bdf3481a0b3"
branch = "release-9.0"
digest = "1:77bf3d9f18ec82e08ac6c4c7e2d9d1a2ef8d16b25d3ff72fcefcf9256d751573"
name = "k8s.io/client-go"
packages = [
"discovery",
@@ -1238,12 +1216,15 @@
"informers/autoscaling",
"informers/autoscaling/v1",
"informers/autoscaling/v2beta1",
"informers/autoscaling/v2beta2",
"informers/batch",
"informers/batch/v1",
"informers/batch/v1beta1",
"informers/batch/v2alpha1",
"informers/certificates",
"informers/certificates/v1beta1",
"informers/coordination",
"informers/coordination/v1beta1",
"informers/core",
"informers/core/v1",
"informers/events",
@@ -1261,6 +1242,7 @@
"informers/rbac/v1beta1",
"informers/scheduling",
"informers/scheduling/v1alpha1",
"informers/scheduling/v1beta1",
"informers/settings",
"informers/settings/v1alpha1",
"informers/storage",
@@ -1292,6 +1274,8 @@
"kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/autoscaling/v2beta2",
"kubernetes/typed/autoscaling/v2beta2/fake",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
@@ -1300,6 +1284,8 @@
"kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/coordination/v1beta1",
"kubernetes/typed/coordination/v1beta1/fake",
"kubernetes/typed/core/v1",
"kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
@@ -1318,6 +1304,8 @@
"kubernetes/typed/rbac/v1beta1/fake",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1alpha1/fake",
"kubernetes/typed/scheduling/v1beta1",
"kubernetes/typed/scheduling/v1beta1/fake",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/settings/v1alpha1/fake",
"kubernetes/typed/storage/v1",
@@ -1333,10 +1321,12 @@
"listers/apps/v1beta2",
"listers/autoscaling/v1",
"listers/autoscaling/v2beta1",
"listers/autoscaling/v2beta2",
"listers/batch/v1",
"listers/batch/v1beta1",
"listers/batch/v2alpha1",
"listers/certificates/v1beta1",
"listers/coordination/v1beta1",
"listers/core/v1",
"listers/events/v1beta1",
"listers/extensions/v1beta1",
@@ -1346,12 +1336,14 @@
"listers/rbac/v1alpha1",
"listers/rbac/v1beta1",
"listers/scheduling/v1alpha1",
"listers/scheduling/v1beta1",
"listers/settings/v1alpha1",
"listers/storage/v1",
"listers/storage/v1alpha1",
"listers/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"plugin/pkg/client/auth/gcp",
@@ -1372,6 +1364,7 @@
"transport",
"util/buffer",
"util/cert",
"util/connrotation",
"util/flowcontrol",
"util/homedir",
"util/integer",
@@ -1380,11 +1373,11 @@
"util/workqueue",
]
pruneopts = ""
revision = "26a26f55b28aa1b338fbaf6fbbe0bcd76aed05e0"
revision = "13596e875accbd333e0b5bd5fd9462185acd9958"
[[projects]]
branch = "release-1.10"
digest = "1:34b0b3400ffdc2533ed4ea23721956638c2776ba49ca4c5def71dddcf0cdfd9b"
branch = "release-1.12"
digest = "1:e6fffdf0dfeb0d189a7c6d735e76e7564685d3b6513f8b19d3651191cb6b084b"
name = "k8s.io/code-generator"
packages = [
"cmd/go-to-protobuf",
@@ -1393,7 +1386,7 @@
"third_party/forked/golang/reflect",
]
pruneopts = ""
revision = "9de8e796a74d16d2a285165727d04c185ebca6dc"
revision = "3dcf91f64f638563e5106f21f50c31fa361c918d"
[[projects]]
branch = "master"
@@ -1421,7 +1414,7 @@
revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf"
[[projects]]
digest = "1:ad247ab9725165a7f289779d46747da832e33a4efe8ae264461afc571f65dac8"
digest = "1:6061aa42761235df375f20fa4a1aa6d1845cba3687575f3adb2ef3f3bc540af5"
name = "k8s.io/kubernetes"
packages = [
"pkg/apis/apps",
@@ -1430,11 +1423,12 @@
"pkg/apis/core",
"pkg/apis/extensions",
"pkg/apis/networking",
"pkg/apis/policy",
"pkg/kubectl/scheme",
]
pruneopts = ""
revision = "81753b10df112992bf51bbc2c2f85208aad78335"
version = "v1.10.2"
revision = "17c77c7898218073f14c8d573582e8d2313dc740"
version = "v1.12.2"
[solve-meta]
analyzer-name = "dep"
@@ -1459,7 +1453,6 @@
"github.com/gogo/protobuf/proto",
"github.com/gogo/protobuf/protoc-gen-gofast",
"github.com/gogo/protobuf/protoc-gen-gogofast",
"github.com/golang/glog",
"github.com/golang/protobuf/proto",
"github.com/golang/protobuf/protoc-gen-go",
"github.com/golang/protobuf/ptypes/empty",
@@ -1473,8 +1466,6 @@
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
"github.com/grpc-ecosystem/grpc-gateway/runtime",
"github.com/grpc-ecosystem/grpc-gateway/utilities",
"github.com/ksonnet/ksonnet/pkg/app",
"github.com/ksonnet/ksonnet/pkg/component",
"github.com/patrickmn/go-cache",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
@@ -1483,7 +1474,6 @@
"github.com/sirupsen/logrus",
"github.com/skratchdot/open-golang/open",
"github.com/soheilhy/cmux",
"github.com/spf13/afero",
"github.com/spf13/cobra",
"github.com/spf13/pflag",
"github.com/stretchr/testify/assert",
@@ -1496,6 +1486,7 @@
"golang.org/x/crypto/ssh/terminal",
"golang.org/x/net/context",
"golang.org/x/oauth2",
"golang.org/x/sync/errgroup",
"google.golang.org/genproto/googleapis/api/annotations",
"google.golang.org/grpc",
"google.golang.org/grpc/codes",

View File

@@ -9,6 +9,7 @@ required = [
"k8s.io/code-generator/cmd/go-to-protobuf",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
"golang.org/x/sync/errgroup",
]
[[constraint]]
@@ -34,43 +35,29 @@ required = [
revision = "7858729281ec582767b20e0d696b6041d995d5e0"
[[constraint]]
branch = "release-1.10"
branch = "release-1.12"
name = "k8s.io/api"
# override ksonnet dependency
[[override]]
branch = "release-1.10"
name = "k8s.io/apimachinery"
[[constraint]]
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.10"
branch = "release-1.12"
[[constraint]]
branch = "release-1.10"
branch = "release-1.12"
name = "k8s.io/code-generator"
[[constraint]]
branch = "release-7.0"
branch = "release-9.0"
name = "k8s.io/client-go"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.1"
[[constraint]]
name = "github.com/ksonnet/ksonnet"
version = "v0.11.0"
version = "1.2.2"
[[constraint]]
name = "github.com/gobuffalo/packr"
version = "v1.11.0"
# override ksonnet's logrus dependency
[[override]]
name = "github.com/sirupsen/logrus"
revision = "ea8897e79973357ba785ac2533559a6297e83c44"
[[constraint]]
branch = "master"
name = "github.com/argoproj/pkg"

View File

@@ -151,6 +151,7 @@ precheckin: test lint
release-precheck: manifests
@if [ "$(GIT_TREE_STATE)" != "clean" ]; then echo 'git tree state is $(GIT_TREE_STATE)' ; exit 1; fi
@if [ -z "$(GIT_TAG)" ]; then echo 'commit must be tagged to perform release' ; exit 1; fi
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
.PHONY: release
release: release-precheck precheckin cli-darwin cli-linux server-image controller-image repo-server-image cli-image

View File

@@ -72,6 +72,5 @@ For additional details, see [architecture overview](docs/architecture.md).
* Argo CD is being used in production to deploy SaaS services at Intuit
## Roadmap
* Auto-sync toggle to directly apply git state changes to live state
* Revamped UI, and feature parity with CLI
* Customizable application actions

View File

@@ -1 +1 @@
0.9.0
0.10.6

View File

@@ -16,6 +16,7 @@ import (
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/dex"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/settings"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
@@ -59,6 +60,7 @@ func NewCommand() *cobra.Command {
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.AddCommand(NewSettingsCommand())
command.AddCommand(NewClusterConfig())
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
return command
@@ -377,6 +379,42 @@ func NewSettingsCommand() *cobra.Command {
return command
}
// NewClusterConfig returns a new instance of `argocd-util cluster-kubeconfig` command
func NewClusterConfig() *cobra.Command {
var (
clientConfig clientcmd.ClientConfig
)
var command = &cobra.Command{
Use: "cluster-kubeconfig CLUSTER_URL OUTPUT_PATH",
Short: "Generates kubeconfig for the specified cluster",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
os.Exit(1)
}
serverUrl := args[0]
output := args[1]
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
namespace, wasSpecified, err := clientConfig.Namespace()
errors.CheckError(err)
if !(wasSpecified) {
namespace = "argocd"
}
kubeclientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
cluster, err := db.NewDB(namespace, kubeclientset).GetCluster(context.Background(), serverUrl)
errors.CheckError(err)
err = kube.WriteKubeConfig(cluster.RESTConfig(), namespace, output)
errors.CheckError(err)
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
return command
}
func main() {
if err := NewCommand().Execute(); err != nil {
fmt.Println(err)

View File

@@ -702,7 +702,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
_, err := waitOnApplicationStatus(appIf, appName, timeout, watchSync, watchHealth, watchOperations)
_, err := waitOnApplicationStatus(appIf, appName, timeout, watchSync, watchHealth, watchOperations, nil)
errors.CheckError(err)
},
}
@@ -778,8 +778,6 @@ func printAppResources(w io.Writer, app *argoappv1.Application, showOperation bo
if opState != nil {
if opState.SyncResult != nil {
syncRes = opState.SyncResult
} else if opState.RollbackResult != nil {
syncRes = opState.RollbackResult
}
}
if syncRes != nil {
@@ -823,12 +821,17 @@ func printAppResources(w io.Writer, app *argoappv1.Application, showOperation bo
// NewApplicationSyncCommand returns a new instance of an `argocd app sync` command
func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
revision string
prune bool
dryRun bool
timeout uint
strategy string
force bool
revision string
resources *[]string
prune bool
dryRun bool
timeout uint
strategy string
force bool
)
const (
resourceFieldDelimiter = ":"
resourceFieldCount = 3
)
var command = &cobra.Command{
Use: "sync APPNAME",
@@ -841,11 +844,28 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
appName := args[0]
var syncResources []argoappv1.SyncOperationResource
if resources != nil {
syncResources = []argoappv1.SyncOperationResource{}
for _, r := range *resources {
fields := strings.Split(r, resourceFieldDelimiter)
if len(fields) != resourceFieldCount {
log.Fatalf("Resource should have GROUP%sKIND%sNAME, but instead got: %s", resourceFieldDelimiter, resourceFieldDelimiter, r)
}
rsrc := argoappv1.SyncOperationResource{
Group: fields[0],
Kind: fields[1],
Name: fields[2],
}
syncResources = append(syncResources, rsrc)
}
}
syncReq := application.ApplicationSyncRequest{
Name: &appName,
DryRun: dryRun,
Revision: revision,
Prune: prune,
Name: &appName,
DryRun: dryRun,
Revision: revision,
Resources: syncResources,
Prune: prune,
}
switch strategy {
case "apply":
@@ -861,7 +881,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
_, err := appIf.Sync(ctx, &syncReq)
errors.CheckError(err)
app, err := waitOnApplicationStatus(appIf, appName, timeout, false, false, true)
app, err := waitOnApplicationStatus(appIf, appName, timeout, false, false, true, syncResources)
errors.CheckError(err)
pruningRequired := 0
@@ -882,6 +902,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
command.Flags().BoolVar(&dryRun, "dry-run", false, "Preview apply without affecting cluster")
command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources")
command.Flags().StringVar(&revision, "revision", "", "Sync to a specific revision. Preserves parameter overrides")
resources = command.Flags().StringArray("resource", nil, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter))
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
command.Flags().StringVar(&strategy, "strategy", "", "Sync strategy (one of: apply|hook)")
command.Flags().BoolVar(&force, "force", false, "Use a force apply")
@@ -936,7 +957,7 @@ func (rs *resourceState) Merge(newState *resourceState) bool {
return updated
}
func calculateResourceStates(app *argoappv1.Application) map[string]*resourceState {
func calculateResourceStates(app *argoappv1.Application, syncResources []argoappv1.SyncOperationResource) map[string]*resourceState {
resStates := make(map[string]*resourceState)
for _, res := range app.Status.ComparisonResult.Resources {
obj, err := argoappv1.UnmarshalToUnstructured(res.TargetState)
@@ -945,6 +966,9 @@ func calculateResourceStates(app *argoappv1.Application) map[string]*resourceSta
obj, err = argoappv1.UnmarshalToUnstructured(res.LiveState)
errors.CheckError(err)
}
if syncResources != nil && !argo.ContainsSyncResource(obj, syncResources) {
continue
}
newState := newResourceState(obj.GetKind(), obj.GetName(), string(res.Status), res.Health.Status, "", "")
key := newState.Key()
if prev, ok := resStates[key]; ok {
@@ -958,8 +982,6 @@ func calculateResourceStates(app *argoappv1.Application) map[string]*resourceSta
if app.Status.OperationState != nil {
if app.Status.OperationState.SyncResult != nil {
opResult = app.Status.OperationState.SyncResult
} else if app.Status.OperationState.RollbackResult != nil {
opResult = app.Status.OperationState.SyncResult
}
}
if opResult == nil {
@@ -988,7 +1010,7 @@ func calculateResourceStates(app *argoappv1.Application) map[string]*resourceSta
return resStates
}
func waitOnApplicationStatus(appClient application.ApplicationServiceClient, appName string, timeout uint, watchSync, watchHealth, watchOperation bool) (*argoappv1.Application, error) {
func waitOnApplicationStatus(appClient application.ApplicationServiceClient, appName string, timeout uint, watchSync bool, watchHealth bool, watchOperation bool, syncResources []argoappv1.SyncOperationResource) (*argoappv1.Application, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -1045,7 +1067,7 @@ func waitOnApplicationStatus(appClient application.ApplicationServiceClient, app
return app, nil
}
newStates := calculateResourceStates(app)
newStates := calculateResourceStates(app, syncResources)
for _, newState := range newStates {
var doPrint bool
stateKey := newState.Key()
@@ -1146,7 +1168,9 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
for _, depInfo := range app.Status.History {
switch output {
case "wide":
paramStr := paramString(depInfo.Params)
manifest, err := appIf.GetManifests(context.Background(), &application.ApplicationManifestQuery{Name: &appName, Revision: depInfo.Revision})
errors.CheckError(err)
paramStr := paramString(manifest.GetParams())
fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", depInfo.ID, depInfo.DeployedAt, depInfo.Revision, paramStr)
default:
fmt.Fprintf(w, "%d\t%s\t%s\n", depInfo.ID, depInfo.DeployedAt, depInfo.Revision)
@@ -1159,7 +1183,7 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
return command
}
func paramString(params []argoappv1.ComponentParameter) string {
func paramString(params []*argoappv1.ComponentParameter) string {
if len(params) == 0 {
return ""
}
@@ -1210,7 +1234,7 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
})
errors.CheckError(err)
_, err = waitOnApplicationStatus(appIf, appName, timeout, false, false, true)
_, err = waitOnApplicationStatus(appIf, appName, timeout, false, false, true, nil)
errors.CheckError(err)
},
}
@@ -1225,8 +1249,6 @@ const defaultCheckTimeoutSeconds = 0
func printOperationResult(opState *argoappv1.OperationState) {
if opState.SyncResult != nil {
fmt.Printf(printOpFmtStr, "Operation:", "Sync")
} else if opState.RollbackResult != nil {
fmt.Printf(printOpFmtStr, "Operation:", "Rollback")
}
fmt.Printf(printOpFmtStr, "Phase:", opState.Phase)
fmt.Printf(printOpFmtStr, "Start:", opState.StartedAt)

View File

@@ -633,8 +633,8 @@ func modifyProjectResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.C
// NewProjectAllowNamespaceResourceCommand returns a new instance of an `deny-cluster-resources` command
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-namespace-resource PROJECT group kind"
desc := "Removes namespaced resource from black list"
use := "allow-namespace-resource PROJECT GROUP KIND"
desc := "Removes a namespaced API resource from the blacklist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.NamespaceResourceBlacklist {
@@ -654,8 +654,8 @@ func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOpti
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-namespace-resource PROJECT group kind"
desc := "Adds namespaced resource to black list"
use := "deny-namespace-resource PROJECT GROUP KIND"
desc := "Adds a namespaced API resource to the blacklist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.NamespaceResourceBlacklist {
if item.Group == group && item.Kind == kind {
@@ -670,8 +670,8 @@ func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptio
// NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-cluster-resource PROJECT group kind"
desc := "Adds cluster wide resource to white list"
use := "deny-cluster-resource PROJECT GROUP KIND"
desc := "Removes a cluster-scoped API resource from the whitelist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.ClusterResourceWhitelist {
@@ -691,8 +691,8 @@ func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions
// NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-cluster-resource PROJECT group kind"
desc := "Removed cluster wide resource from white list"
use := "allow-cluster-resource PROJECT GROUP KIND"
desc := "Adds a cluster-scoped API resource to the whitelist"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.ClusterResourceWhitelist {
if item.Group == group && item.Kind == kind {

View File

@@ -87,7 +87,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
command.Flags().StringVar(&repo.Username, "username", "", "username to the repository")
command.Flags().StringVar(&repo.Password, "password", "", "password to the repository")
command.Flags().StringVar(&sshPrivateKeyPath, "sshPrivateKeyPath", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().StringVar(&sshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
return command
}

View File

@@ -14,6 +14,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
@@ -148,12 +149,28 @@ func (ctrl *ApplicationController) watchClusterResources(ctx context.Context, it
}
}()
config := item.RESTConfig()
ch, err := kube.WatchResourcesWithLabel(ctx, config, "", common.LabelApplicationName)
watchStartTime := time.Now()
ch, err := ctrl.kubectl.WatchResources(ctx, config, "", func(gvk schema.GroupVersionKind) metav1.ListOptions {
ops := metav1.ListOptions{}
if !kube.IsCRDGroupVersionKind(gvk) {
ops.LabelSelector = common.LabelApplicationName
}
return ops
})
if err != nil {
return err
}
for event := range ch {
eventObj := event.Object.(*unstructured.Unstructured)
if kube.IsCRD(eventObj) {
// restart if new CRD has been created after watch started
if event.Type == watch.Added && watchStartTime.Before(eventObj.GetCreationTimestamp().Time) {
return fmt.Errorf("Restarting the watch because a new CRD was added.")
} else if event.Type == watch.Deleted {
return fmt.Errorf("Restarting the watch because a CRD was deleted.")
}
}
objLabels := eventObj.GetLabels()
if objLabels == nil {
objLabels = make(map[string]string)
@@ -313,7 +330,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
if err == nil {
config := clst.RESTConfig()
err = kube.DeleteResourceWithLabel(config, app.Spec.Destination.Namespace, common.LabelApplicationName, app.Name)
err = kube.DeleteResourcesWithLabel(config, app.Spec.Destination.Namespace, common.LabelApplicationName, app.Name)
if err == nil {
app.SetCascadedDeletion(false)
var patch []byte

View File

@@ -3,16 +3,9 @@ package controller
import (
"context"
"encoding/json"
"runtime/debug"
"time"
"runtime/debug"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,6 +18,12 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/git"
)
type SecretController struct {
@@ -93,22 +92,13 @@ func (ctrl *SecretController) getRepoConnectionState(repo *v1alpha1.Repository)
ModifiedAt: repo.ConnectionState.ModifiedAt,
Status: v1alpha1.ConnectionStatusUnknown,
}
closer, client, err := ctrl.repoClientset.NewRepositoryClient()
if err != nil {
log.Errorf("Unable to create repository client: %v", err)
return state
}
defer util.Close(closer)
_, err = client.ListDir(context.Background(), &repository.ListDirRequest{Repo: repo, Path: ".gitignore"})
err := git.TestRepo(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey)
if err == nil {
state.Status = v1alpha1.ConnectionStatusSuccessful
} else {
state.Status = v1alpha1.ConnectionStatusFailed
state.Message = err.Error()
}
return state
}

View File

@@ -171,7 +171,10 @@ func (s *appStateManager) getLiveObjs(app *v1alpha1.Application, targetObjs []*u
controlledLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
// Move live resources which have corresponding target object to controlledLiveObj
dynClientPool := dynamic.NewDynamicClientPool(restConfig)
dynamicIf, err := dynamic.NewForConfig(restConfig)
if err != nil {
return nil, nil, err
}
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, nil, err
@@ -185,10 +188,6 @@ func (s *appStateManager) getLiveObjs(app *v1alpha1.Application, targetObjs []*u
// of ArgoCD. In order to determine that it is truly missing, we fall back to perform a
// direct lookup of the resource by name. See issue #141
gvk := targetObj.GroupVersionKind()
dclient, err := dynClientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return nil, nil, err
}
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(disco, gvk)
if err != nil {
if !apierr.IsNotFound(err) {
@@ -196,7 +195,7 @@ func (s *appStateManager) getLiveObjs(app *v1alpha1.Application, targetObjs []*u
}
// If we get here, the app is comprised of a custom resource which has yet to be registered
} else {
liveObj, err = kubeutil.GetLiveResource(dclient, targetObj, apiResource, app.Spec.Destination.Namespace)
liveObj, err = kubeutil.GetLiveResource(dynamicIf, targetObj, apiResource, app.Spec.Destination.Namespace)
if err != nil {
return nil, nil, err
}
@@ -394,7 +393,6 @@ func (s *appStateManager) persistDeploymentInfo(
history := append(app.Status.History, v1alpha1.DeploymentInfo{
ComponentParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
Revision: revision,
Params: params,
DeployedAt: metav1.NewTime(time.Now().UTC()),
ID: nextID,
})

View File

@@ -33,12 +33,13 @@ type syncContext struct {
proj *appv1.AppProject
comparison *appv1.ComparisonResult
config *rest.Config
dynClientPool dynamic.ClientPool
dynamicIf dynamic.Interface
disco discovery.DiscoveryInterface
kubectl kube.Kubectl
namespace string
syncOp *appv1.SyncOperation
syncRes *appv1.SyncOperationResult
syncResources []appv1.SyncOperationResource
opState *appv1.OperationState
manifestInfo *repository.ManifestResponse
log *log.Entry
@@ -55,10 +56,12 @@ func (s *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
var revision string
var syncOp appv1.SyncOperation
var syncRes *appv1.SyncOperationResult
var syncResources []appv1.SyncOperationResource
var overrides []appv1.ComponentParameter
if state.Operation.Sync != nil {
syncOp = *state.Operation.Sync
syncResources = syncOp.Resources
overrides = []appv1.ComponentParameter(state.Operation.Sync.ParameterOverrides)
if state.SyncResult != nil {
syncRes = state.SyncResult
@@ -67,34 +70,6 @@ func (s *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
syncRes = &appv1.SyncOperationResult{}
state.SyncResult = syncRes
}
} else if state.Operation.Rollback != nil {
var deploymentInfo *appv1.DeploymentInfo
for _, info := range app.Status.History {
if info.ID == app.Operation.Rollback.ID {
deploymentInfo = &info
break
}
}
if deploymentInfo == nil {
state.Phase = appv1.OperationFailed
state.Message = fmt.Sprintf("application %s does not have deployment with id %v", app.Name, app.Operation.Rollback.ID)
return
}
// Rollback is just a convenience around Sync
syncOp = appv1.SyncOperation{
Revision: deploymentInfo.Revision,
DryRun: state.Operation.Rollback.DryRun,
Prune: state.Operation.Rollback.Prune,
SyncStrategy: &appv1.SyncStrategy{Apply: &appv1.SyncStrategyApply{}},
}
overrides = deploymentInfo.ComponentParameterOverrides
if state.RollbackResult != nil {
syncRes = state.RollbackResult
revision = state.RollbackResult.Revision
} else {
syncRes = &appv1.SyncOperationResult{}
state.RollbackResult = syncRes
}
} else {
state.Phase = appv1.OperationFailed
state.Message = "Invalid operation request: no operation specified"
@@ -107,6 +82,7 @@ func (s *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
// Take the value in the requested operation. We will resolve this to a SHA later.
revision = syncOp.Revision
}
comparison, manifestInfo, conditions, err := s.CompareAppState(app, revision, overrides)
if err != nil {
state.Phase = appv1.OperationError
@@ -136,13 +112,18 @@ func (s *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
}
restConfig := clst.RESTConfig()
dynClientPool := dynamic.NewDynamicClientPool(restConfig)
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
dynamicIf, err := dynamic.NewForConfig(restConfig)
if err != nil {
state.Phase = appv1.OperationError
state.Message = fmt.Sprintf("Failed to initialize dynamic client: %v", err)
return
}
disco, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
state.Phase = appv1.OperationError
state.Message = fmt.Sprintf("Failed to initialize discovery client: %v", err)
return
}
proj, err := argo.GetAppProject(&app.Spec, s.appclientset, s.namespace)
if err != nil {
@@ -156,12 +137,13 @@ func (s *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
proj: proj,
comparison: comparison,
config: restConfig,
dynClientPool: dynClientPool,
dynamicIf: dynamicIf,
disco: disco,
kubectl: s.kubectl,
namespace: app.Spec.Destination.Namespace,
syncOp: &syncOp,
syncRes: syncRes,
syncResources: syncResources,
opState: state,
manifestInfo: manifestInfo,
log: log.WithFields(log.Fields{"application": app.Name}),
@@ -173,7 +155,7 @@ func (s *appStateManager) SyncAppState(app *appv1.Application, state *appv1.Oper
syncCtx.sync()
}
if !syncOp.DryRun && syncCtx.opState.Phase.Successful() {
if !syncOp.DryRun && len(syncOp.Resources) == 0 && syncCtx.opState.Phase.Successful() {
err := s.persistDeploymentInfo(app, manifestInfo.Revision, manifestInfo.Params, nil)
if err != nil {
state.Phase = appv1.OperationError
@@ -262,13 +244,22 @@ func (sc *syncContext) generateSyncTasks() ([]syncTask, bool) {
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("Failed to unmarshal target object: %v", err))
return nil, false
}
syncTask := syncTask{
liveObj: liveObj,
targetObj: targetObj,
if sc.syncResources == nil || argo.ContainsSyncResource(liveObj, sc.syncResources) || argo.ContainsSyncResource(targetObj, sc.syncResources) {
syncTask := syncTask{
liveObj: liveObj,
targetObj: targetObj,
}
syncTasks = append(syncTasks, syncTask)
}
syncTasks = append(syncTasks, syncTask)
}
return syncTasks, true
if len(syncTasks) == 0 {
if len(sc.comparison.Resources) == 0 {
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("Application has no resources"))
} else {
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("Specified resources filter does not match any application resource"))
}
}
return syncTasks, len(syncTasks) > 0
}
// startedPreSyncPhase detects if we already started the PreSync stage of a sync operation.
@@ -641,15 +632,12 @@ func (sc *syncContext) runHook(hook *unstructured.Unstructured, hookType appv1.H
}
gvk := hook.GroupVersionKind()
dclient, err := sc.dynClientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return false, err
}
apiResource, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
if err != nil {
return false, err
}
resIf := dclient.Resource(apiResource, sc.namespace)
resource := kube.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
resIf := kube.ToResourceInterface(sc.dynamicIf, apiResource, resource, sc.namespace)
var liveObj *unstructured.Unstructured
existing, err := resIf.Get(hook.GetName(), metav1.GetOptions{})
@@ -952,15 +940,12 @@ func (sc *syncContext) deleteHook(name, kind, apiVersion string) error {
Version: groupVersion[1],
Kind: kind,
}
dclient, err := sc.dynClientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return err
}
apiResource, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
if err != nil {
return err
}
resIf := dclient.Resource(apiResource, sc.namespace)
resource := kube.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
resIf := kube.ToResourceInterface(sc.dynamicIf, apiResource, resource, sc.namespace)
return resIf.Delete(name, &metav1.DeleteOptions{})
}

View File

@@ -1,10 +1,14 @@
package controller
import (
"context"
"fmt"
"sort"
"testing"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
@@ -25,6 +29,13 @@ type kubectlOutput struct {
type mockKubectlCmd struct {
commands map[string]kubectlOutput
events chan watch.Event
}
func (k mockKubectlCmd) WatchResources(
ctx context.Context, config *rest.Config, namespace string, selector func(kind schema.GroupVersionKind) v1.ListOptions) (chan watch.Event, error) {
return k.events, nil
}
func (k mockKubectlCmd) DeleteResource(config *rest.Config, obj *unstructured.Unstructured, namespace string) error {

View File

@@ -9,8 +9,14 @@
## Features
* [Application Sources](application_sources.md)
* [Application Parameters](parameters.md)
* [Projects](projects.md)
* [Automated Sync](auto_sync.md)
* [Resource Health](health.md)
* [Resource Hooks](resource_hooks.md)
* [Single Sign On](sso.md)
* [Webhooks](webhook.md)
* [RBAC](rbac.md)
* [RBAC](rbac.md)
## Other
* [Configuring Ingress](ingress.md)
* [F.A.Q.](faq.md)

View File

@@ -5,7 +5,7 @@ ArgoCD supports several different ways in which kubernetes manifests can be defi
* [ksonnet](https://ksonnet.io) applications
* [kustomize](https://kustomize.io) applications
* [helm](https://helm.sh) charts
* Simple directory of YAML/json manifests
* Directory of YAML/json/jsonnet manifests
Some additional considerations should be made when deploying apps of a particular type:

51
docs/auto_sync.md Normal file
View File

@@ -0,0 +1,51 @@
# Automated Sync Policy
ArgoCD has the ability to automatically sync an application when it detects differences between
the desired manifests in git, and the live state in the cluster. A benefit of automatic sync is that
CI/CD pipelines no longer need direct access to the ArgoCD API server to perform the deployment.
Instead, the pipeline makes a commit and push to the git repository with the changes to the
manifests in the tracking git repo.
To configure automated sync run:
```bash
argocd app set <APPNAME> --sync-policy automated
```
Alternatively, if creating the application an application manifest, specify a syncPolicy with an
`automated` policy.
```yaml
spec:
syncPolicy:
automated: {}
```
## Automatic Pruning
By default (and as a safety mechanism), automated sync will not delete resources when ArgoCD detects
the resource is no longer defined in git. To prune the resources, a manual sync can always be
performed (with pruning checked). Pruning can also be enabled to happen automatically as part of the
automated sync by running:
```bash
argocd app set <APPNAME> --auto-prune
```
Or by setting the prune option to true in the automated sync policy:
```yaml
spec:
syncPolicy:
automated:
prune: true
```
## Automated Sync Semantics
* An automated sync will only be performed if the application is OutOfSync. Applications in a
Synced or error state will not attempt automated sync.
* Automated sync will only attempt one synchronization per unique combination of commit SHA1 and
application parameters. If the most recent successful sync in the history was already performed
against the same commit-SHA and parameters, a second sync will not be attempted.
* Automatic sync will not reattempt a sync if the previous sync attempt against the same commit-SHA
and parameters had failed.
* Rollback cannot be performed against an application with automated sync enabled.

16
docs/faq.md Normal file
View File

@@ -0,0 +1,16 @@
# FAQ
## Why is my application still `OutOfSync` immediately after a successful Sync?
It is possible for an application to still be `OutOfSync` even immediately after a successful Sync
operation. Some reasons for this might be:
* There may be problems in manifests themselves, which may contain extra/unknown fields from the
actual K8s spec. These extra fields would get dropped when querying Kubernetes for the live state,
resulting in an `OutOfSync` status indicating a missing field was detected.
* The sync was performed (with pruning disabled), and there are resources which need to be deleted.
* A mutating webhook altered the manifest after it was submitted to Kubernetes
To debug `OutOfSync` issues, run the `app diff` command to see the differences between git and live:
```
argocd app diff APPNAME
```

View File

@@ -9,7 +9,7 @@ An example guestbook application is provided to demonstrate how ArgoCD works.
## 1. Install ArgoCD
```bash
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v0.8.2/manifests/install.yaml
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v0.9.2/manifests/install.yaml
```
This will create a new namespace, `argocd`, where ArgoCD services and application resources will live.
@@ -31,42 +31,39 @@ brew install argoproj/tap/argocd
On Linux:
```bash
curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v0.8.2/argocd-linux-amd64
curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v0.9.2/argocd-linux-amd64
chmod +x /usr/local/bin/argocd
```
## 3. Open access to ArgoCD API server
## 3. Access the ArgoCD API server
By default, the ArgoCD API server is not exposed with an external IP. To expose the API server,
change the service type to `LoadBalancer`:
By default, the ArgoCD API server is not exposed with an external IP. To access the API server,
choose one of the following means to expose the ArgoCD API server:
### Service Type LoadBalancer
Change the argocd-server service type to `LoadBalancer`:
```bash
kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
```
### Notes about Ingress and AWS Load Balancers
* If using Ingress objects without TLS from the ingress-controller to ArgoCD API server, you will
need to add the `--insecure` command line flag to the argocd-server deployment.
* AWS Classic ELB (in HTTP mode) and ALB do not have full support for HTTP2/gRPC which is the
protocol used by the `argocd` CLI. When using an AWS load balancer, either Classic ELB in
passthrough mode is needed, or NLBs.
### Ingress
Follow the [ingress documentation](ingress.md) on how to configure ArgoCD with ingress.
### Port Forwarding
`kubectl port-forward` can also be used to connect to the API server without exposing the service.
The API server can be accessed using the localhost address/port.
## 4. Login to the server from the CLI
## 4. Login using the CLI
Login with using the `admin` user. The initial password is autogenerated to be the pod name of the
Login as the `admin` user. The initial password is autogenerated to be the pod name of the
ArgoCD API server. This can be retrieved with the command:
```bash
kubectl get pods -n argocd -l app=argocd-server -o name | cut -d'/' -f 2
```
Using the above password, login to ArgoCD's external IP:
On Minikube:
```bash
argocd login $(minikube service argocd-server -n argocd --url | cut -d'/' -f 3) --name minikube
```
Other clusters:
```bash
kubectl get svc -n argocd argocd-server
argocd login <EXTERNAL-IP>
@@ -79,39 +76,34 @@ argocd relogin
```
## 5. Register a cluster to deploy apps to
## 5. Register a cluster to deploy apps to (optional)
We will now register a cluster to deploy applications to. First list all clusters contexts in your
kubconfig:
This step registers a cluster's credentials to ArgoCD, and is only necessary when deploying to
an external cluster. When deploying internally (to the same cluster that ArgoCD is running in),
https://kubernetes.default.svc should be used as the application's K8s API server address.
First list all clusters contexts in your current kubconfig:
```bash
argocd cluster add
```
Choose a context name from the list and supply it to `argocd cluster add CONTEXTNAME`. For example,
for minikube context, run:
for docker-for-desktop context, run:
```bash
argocd cluster add minikube --in-cluster
argocd cluster add docker-for-desktop
```
The above command installs an `argocd-manager` ServiceAccount and ClusterRole into the cluster
associated with the supplied kubectl context. ArgoCD uses the service account token to perform its
associated with the supplied kubectl context. ArgoCD uses this service account token to perform its
management tasks (i.e. deploy/monitoring).
The `--in-cluster` option indicates that the cluster we are registering, is the same cluster that
ArgoCD is running in. This allows ArgoCD to connect to the cluster using the internal kubernetes
hostname (kubernetes.default.svc). When registering a cluster external to ArgoCD, the `--in-cluster`
flag should be omitted.
## 6. Create the application from a git repository
## 6. Create an application from a git repository location
### Creating apps via UI
Open a browser to the ArgoCD external UI, and login using the credentials set in step 4.
On Minikube:
```bash
minikube service argocd-server -n argocd
```
Open a browser to the ArgoCD external UI, and login using the credentials set in step 4, and the
external IP/hostname set in step 4.
Connect a git repository containing your apps. An example repository containing a sample
guestbook application is available at https://github.com/argoproj/argocd-example-apps.git.
@@ -131,7 +123,7 @@ After connecting a git repository, select the guestbook application for creation
Applications can be also be created using the ArgoCD CLI:
```bash
argocd app create guestbook-default --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --env default
argocd app create guestbook-default --repo https://github.com/argoproj/argocd-example-apps.git --path ksonnet-guestbook
```
## 7. Sync (deploy) the application
@@ -139,7 +131,7 @@ argocd app create guestbook-default --repo https://github.com/argoproj/argocd-ex
Once the guestbook application is created, you can now view its status:
From UI:
![create app](assets/guestbook-app.png)
![guestbook app](assets/guestbook-app.png)
From CLI:
```bash
@@ -174,12 +166,12 @@ Deployment guestbook-ui deployment.apps "guestbook-ui" created
```
This command retrieves the manifests from git repository and performs a `kubectl apply` of the
manifests. The guestbook app is now running and you can now view its resource
components, logs, events, and assessed health:
manifests. The guestbook app is now running and you can now view its resource components, logs,
events, and assessed health status:
![view app](assets/guestbook-tree.png)
## 8. Next Steps
ArgoCD supports additional features such as SSO, WebHooks, RBAC, Projects. See the rest of
the [documentation](./) for details.
ArgoCD supports additional features such as automated sync, SSO, WebHooks, RBAC, Projects. See the
rest of the [documentation](./) for details.

View File

@@ -6,7 +6,6 @@ surfaced to the overall Application health status as a whole. The following chec
specific types of kuberentes resources:
### Deployment, ReplicaSet, StatefulSet DaemonSet
* Observed generation is equal to desired generation.
* Number of **updated** replicas equals the number of desired replicas.
@@ -16,3 +15,6 @@ with at least one value for `hostname` or `IP`.
### Ingress
* The `status.loadBalancer.ingress` list is non-empty, with at least one value for `hostname` or `IP`.
### PersistentVolumeClaim
* The `status.phase` is `Bound`

126
docs/ingress.md Normal file
View File

@@ -0,0 +1,126 @@
# Ingress Configuration
ArgoCD runs both a gRPC server (used by the CLI), as well as a HTTP/HTTPS server (used by the UI).
Both protocols are exposed by the argocd-server service object on the following ports:
* 443 - gRPC/HTTPS
* 80 - HTTP (redirects to HTTPS)
There are several ways how Ingress can be configured.
## [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx)
### Option 1: ssl-passthrough
Because ArgoCD serves multiple protocols (gRPC/HTTPS) on the same port (443), this provides a
challenge when attempting to define a single nginx ingress object and rule for the argocd-service,
since the `nginx.ingress.kubernetes.io/backend-protocol` [annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#backend-protocol)
accepts only a single value for the backend protocol (e.g. HTTP, HTTPS, GRPC, GRPCS).
In order to expose the ArgoCD API server with a single ingress rule and hostname, the
`nginx.ingress.kubernetes.io/ssl-passthrough` [annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#ssl-passthrough)
must be used to passthrough TLS connections and terminate TLS at the ArgoCD API server.
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server-ingress
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: argocd.example.com
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
```
The above rule terminates TLS at the ArgoCD API server, which detects the protocol being used,
and responds appropriately. Note that the `nginx.ingress.kubernetes.io/ssl-passthrough` annotation
requires that the `--enable-ssl-passthrough` flag be added to the command line arguments to
`nginx-ingress-controller`.
### Option 2: Multiple ingress objects and hosts
Since ingress-nginx Ingress supports only a single protocol per Ingress object, an alternative
way would be to define two Ingress objects. One for HTTP/HTTPS, and the other for gRPC:
HTTP/HTTPS Ingress:
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server-http-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
rules:
- http:
paths:
- backend:
serviceName: argocd-server
servicePort: http
host: argocd.example.com
tls:
- hosts:
- argocd.example.com
secretName: argocd-secret
```
gRPC Ingress:
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server-grpc-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
spec:
rules:
- http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
host: grpc.argocd.example.com
tls:
- hosts:
- grpc.argocd.example.com
secretName: argocd-secret
```
The API server should then be run with TLS disabled. Edit the `argocd-server` deployment to add the
`--insecure` flag to the argocd-server command:
```yaml
spec:
template:
spec:
name: argocd-server
containers:
- command:
- /argocd-server
- --staticassets
- /shared/app
- --repo-server
- argocd-repo-server:8081
- --insecure
```
The obvious disadvantage to this approach is that this technique require two separate hostnames for
the API server -- one for gRPC and the other for HTTP/HTTPS. However it allow TLS termination to
happen at the ingress controller.
## AWS Application Load Balancers (ALBs) and Classic ELB (HTTP mode)
Neither ALBs and Classic ELB in HTTP mode, do not have full support for HTTP2/gRPC which is the
protocol used by the `argocd` CLI. Thus, when using an AWS load balancer, either Classic ELB in
passthrough mode is needed, or NLBs.

View File

@@ -3,8 +3,9 @@
1. Tag, build, and push argo-cd-ui
```bash
cd argo-cd-ui
git checkout -b release-X.Y
git tag vX.Y.Z
git push upstream vX.Y.Z
git push upstream release-X.Y --tags
IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z DOCKER_PUSH=true yarn docker
```
@@ -14,12 +15,13 @@ git checkout -b release-X.Y
git push upstream release-X.Y
```
3. Update manifests with new version
3. Update VERSION and manifests with new version
```bash
vi VERSION # ensure value is desired X.Y.Z semantic version
vi manifests/base/kustomization.yaml # update with new image tags
make manifests
git commit -a -m "Update manifests to vX.Y.Z"
git push upstream master
git push upstream release-X.Y
```
4. Tag, build, and push release to docker hub

181
docs/projects.md Normal file
View File

@@ -0,0 +1,181 @@
## Projects
Projects provide a logical grouping of applications, which is useful when ArgoCD is used by multiple
teams. Projects provide the following features:
* ability to restrict *what* may be deployed (the git source repositories)
* ability to restrict *where* apps may be deployed (the destination clusters and namespaces)
* ability to control what type of objects may be deployed (e.g. RBAC, CRDs, DaemonSets, NetworkPolicy etc...)
### The default project
Every application belongs to a single project. If unspecified, an application belongs to the
`default` project, which is created automatically and by default, permits deployments from any
source repo, to any cluster, and all resource Kinds. The default project can be modified, but not
deleted. When initially created, it's specification is configured to be the most permissive:
```yaml
spec:
sourceRepos:
- '*'
destinations:
- namespace: '*'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'
```
### Creating Projects
Additional projects can be created to give separate teams different levels of access to namespaces.
The following command creates a new project `myproject` which can deploy applications to namespace
`mynamespace` of cluster `https://kubernetes.default.svc`. The permitted git source repository is
set to `https://github.com/argoproj/argocd-example-apps.git` repository.
```
argocd proj create myproject -d https://kubernetes.default.svc,mynamespace -s https://github.com/argoproj/argocd-example-apps.git
```
### Managing Projects
Permitted source git repositories are managed using commands:
```bash
argocd project add-source <PROJECT> <REPO>
argocd project remove-source <PROJECT> <REPO>
```
Permitted destination clusters and namespaces are managed with the commands:
```
argocd project add-destination <PROJECT> <CLUSTER>,<NAMESPACE>
argocd project remove-destination <PROJECT> <CLUSTER>,<NAMESPACE>
```
Permitted destination K8s resource kinds are managed with the commands. Note that namespaced-scoped
resources are restricted via a blacklist, whereas cluster-scoped resources are restricted via
whitelist.
```
argocd project allow-cluster-resource <PROJECT> <GROUP> <KIND>
argocd project allow-namespace-resource <PROJECT> <GROUP> <KIND>
argocd project deny-cluster-resource <PROJECT> <GROUP> <KIND>
argocd project deny-namespace-resource <PROJECT> <GROUP> <KIND>
```
### Assign application to a project
The application project can be changed using `app set` command. In order to change the project of
an app, the user must have permissions to access the new project.
```
argocd app set guestbook-default --project myproject
```
### Configuring RBAC with projects
Once projects have been defined, RBAC rules can be written to restrict access to the applications
in the project. The following example configures RBAC for two GitHub teams: `team1` and `team2`,
both in the GitHub org, `some-github-org`. There are two projects, `project-a` and `project-b`.
`team1` can only manage applications in `project-a`, while `team2` can only manage applications in
`project-b`. Both `team1` and `team2` have the ability to manage repositories.
*ConfigMap `argocd-rbac-cm` example:*
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
data:
policy.default: ""
policy.csv: |
p, some-github-org:team1, applications, *, project-a/*, allow
p, some-github-org:team2, applications, *, project-a/*, allow
p, role:org-admin, repositories, get, *, allow
p, role:org-admin, repositories, create, *, allow
p, role:org-admin, repositories, update, *, allow
p, role:org-admin, repositories, delete, *, allow
g, some-github-org:team1, org-admin
g, some-github-org:team2, org-admin
```
## Project Roles
Projects include a feature called roles that enable automated access to a project's applications.
These can be used to give a CI pipeline a restricted set of permissions. For example, a CI system
may only be able to sync a single app (but not change its source or destination).
Projects can have multiple roles, and those roles can have different access granted to them. These
permissions are called policies, and they are stored within the role as a list of policy strings.
A role's policy can only grant access to that role and are limited to applications within the role's
project. However, the policies have an option for granting wildcard access to any application
within a project.
In order to create roles in a project and add policies to a role, a user will need permission to
update a project. The following commands can be used to manage a role.
```bash
argoproj proj role list
argoproj proj role get
argoproj proj role create
argoproj proj role delete
argoproj proj role add-policy
argoproj proj role remove-policy
```
Project roles in itself are not useful without generating a token to associate to that role. ArgoCD
supports JWT tokens as the means to authenticate to a role. Since the JWT token is
associated with a role's policies, any changes to the role's policies will immediately take effect
for that JWT token.
The following commands are used to manage the JWT tokens.
```bash
argoproj proj role create-token PROJECT ROLE-NAME
argoproj proj role delete-token PROJECT ROLE-NAME ISSUED-AT
```
Since the JWT tokens aren't stored in ArgoCD, they can only be retrieved when they are created. A
user can leverage them in the cli by either passing them in using the `--auth-token` flag or setting
the ARGOCD_AUTH_TOKEN environment variable. The JWT tokens can be used until they expire or are
revoked. The JWT tokens can created with or without an expiration, but the default on the cli is
creates them without an expirations date. Even if a token has not expired, it cannot be used if
the token has been revoked.
Below is an example of leveraging a JWT token to access a guestbook application. It makes the
assumption that the user already has a project named myproject and an application called
guestbook-default.
```bash
PROJ=myproject
APP=guestbook-default
ROLE=get-role
argocd proj role create $PROJ $ROLE
argocd proj role create-token $PROJ $ROLE -e 10m
JWT=<value from command above>
argocd proj role list $PROJ
argocd proj role get $PROJ $ROLE
# This command will fail because the JWT Token associated with the project role does not have a policy to allow access to the application
argocd app get $APP --auth-token $JWT
# Adding a policy to grant access to the application for the new role
argocd proj role add-policy $PROJ $ROLE --action get --permission allow --object $APP
argocd app get $PROJ-$ROLE --auth-token $JWT
# Removing the policy we added and adding one with a wildcard.
argocd proj role remove-policy $PROJ $TOKEN -a get -o $PROJ-$TOKEN
argocd proj role remove-policy $PROJ $TOKEN -a get -o '*'
# The wildcard allows us to access the application due to the wildcard.
argocd app get $PROJ-$TOKEN --auth-token $JWT
argocd proj role get $PROJ
argocd proj role get $PROJ $ROLE
# Revoking the JWT token
argocd proj role delete-token $PROJ $ROLE <id field from the last command>
# This will fail since the JWT Token was deleted for the project role.
argocd app get $APP --auth-token $JWT
```

View File

@@ -14,149 +14,27 @@ RBAC configuration allows defining roles and groups. ArgoCD has two pre-defined
* `role:admin` - unrestricted access to all resources
These role definitions can be seen in [builtin-policy.csv](../util/rbac/builtin-policy.csv)
Additional roles and groups can be configured in `argocd-rbac-cm` ConfigMap. The example below custom role `org-admin`. The role is assigned to any user which belongs to
`your-github-org:your-team` group. All other users get `role:readonly` and cannot modify ArgoCD settings.
Additional roles and groups can be configured in `argocd-rbac-cm` ConfigMap. The example below
configures a custom role, named `org-admin`. The role is assigned to any user which belongs to
`your-github-org:your-team` group. All other users get the default policy of `role:readonly`,
which cannot modify ArgoCD settings.
*ConfigMap `argocd-rbac-cm` example:*
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
data:
policy.default: role:readonly
policy.csv: |
p, role:org-admin, applications, *, */*, allow
p, role:org-admin, applications/*, *, */*, allow
p, role:org-admin, clusters, get, *, allow
p, role:org-admin, repositories, get, *, allow
p, role:org-admin, repositories/apps, get, *, allow
p, role:org-admin, repositories, create, *, allow
p, role:org-admin, repositories, update, *, allow
p, role:org-admin, repositories, delete, *, allow
g, your-github-org:your-team, role:org-admin
kind: ConfigMap
metadata:
name: argocd-rbac-cm
```
## Configure Projects
Argo projects allow grouping applications which is useful if ArgoCD is used by multiple teams. Additionally, projects restrict source repositories and destination
Kubernetes clusters which can be used by applications belonging to the project.
### 1. Create new project
Following command creates project `myproject` which can deploy applications to namespace `default` of cluster `https://kubernetes.default.svc`. The valid application source is defined in the `https://github.com/argoproj/argocd-example-apps.git` repository.
```
argocd proj create myproject -d https://kubernetes.default.svc,default -s https://github.com/argoproj/argocd-example-apps.git
```
Project sources and destinations can be managed using commands
```
argocd project add-destination
argocd project remove-destination
argocd project add-source
argocd project remove-source
```
### 2. Assign application to a project
Each application belongs to a project. By default, all application belongs to the default project which provides access to any source repo/cluster. The application project can be
changes using `app set` command:
```
argocd app set guestbook-default --project myproject
```
### 3. Update RBAC rules
Following example configure admin access for two teams. Each team has access only two application of one project (`team1` can access `default` project and `team2` can access
`myproject` project).
*ConfigMap `argocd-rbac-cm` example:*
```yaml
apiVersion: v1
data:
policy.default: ""
policy.csv: |
p, role:team1-admin, applications, *, default/*, allow
p, role:team1-admin, applications/*, *, default/*, allow
p, role:team1-admin, applications, *, myproject/*, allow
p, role:team1-admin, applications/*, *, myproject/*, allow
p, role:org-admin, clusters, get, *, allow
p, role:org-admin, repositories, get, *, allow
p, role:org-admin, repositories/apps, get, *, allow
p, role:org-admin, repositories, create, *, allow
p, role:org-admin, repositories, update, *, allow
p, role:org-admin, repositories, delete, *, allow
g, role:team1-admin, org-admin
g, role:team2-admin, org-admin
g, your-github-org:your-team1, role:team1-admin
g, your-github-org:your-team2, role:team2-admin
kind: ConfigMap
metadata:
name: argocd-rbac-cm
```
## Project Roles
Projects include a feature called roles that allow users to define access to project's applications. A project can have multiple roles, and those roles can have different access granted to them. These permissions are called policies, and they are stored within the role as a list of casbin strings. A role's policy can only grant access to that role and are limited to applications within the role's project. However, the policies have an option for granting wildcard access to any application within a project.
In order to create roles in a project and add policies to a role, a user will need permission to update a project. The following commands can be used to manage a role.
```
argoproj proj role list
argoproj proj role get
argoproj proj role create
argoproj proj role delete
argoproj proj role add-policy
argoproj proj role remove-policy
```
Project roles can not be used unless a user creates a entity that is associated with that project role. ArgoCD supports creating JWT tokens with a role associated with it. Since the JWT token is associated with a role's policies, any changes to the role's policies will immediately take effect for that JWT token.
A user will need permission to update a project in order to create a JWT token for a role, and they can use the following commands to manage the JWT tokens.
```
argoproj proj role create-token
argoproj proj role delete-token
```
Since the JWT tokens aren't stored in ArgoCD, they can only be retrieved when they are created. A user can leverage them in the cli by either passing them in using the `--auth-token` flag or setting the ARGOCD_AUTH_TOKEN environment variable. The JWT tokens can be used until they expire or are revoked. The JWT tokens can created with or without an expiration, but the default on the cli is creates them without an expirations date. Even if a token has not expired, it can not be used if the token has been revoke.
Below is an example of leveraging a JWT token to access the guestbook application. It makes the assumption that the user already has a project named myproject and an application called guestbook-default.
```
PROJ=myproject
APP=guestbook-default
ROLE=get-role
argocd proj role create $PROJ $ROLE
argocd proj role create-token $PROJ $ROLE -e 10m
JWT=<value from command above>
argocd proj role list $PROJ
argocd proj role get $PROJ $ROLE
#This command will fail because the JWT Token associated with the project role does not have a policy to allow access to the application
argocd app get $APP --auth-token $JWT
# Adding a policy to grant access to the application for the new role
argocd proj role add-policy $PROJ $ROLE --action get --permission allow --object $APP
argocd app get $PROJ-$ROLE --auth-token $JWT
# Removing the policy we added and adding one with a wildcard.
argocd proj role remove-policy $PROJ $TOKEN -a get -o $PROJ-$TOKEN
argocd proj role remove-policy $PROJ $TOKEN -a get -o '*'
# The wildcard allows us to access the application due to the wildcard.
argocd app get $PROJ-$TOKEN --auth-token $JWT
argocd proj role get $PROJ
argocd proj role get $PROJ $ROLE
# Revoking the JWT token
argocd proj role delete-token $PROJ $ROLE <id field from the last command>
# This will fail since the JWT Token was deleted for the project role.
argocd app get $APP --auth-token $JWT
```

View File

@@ -4,13 +4,14 @@ An ArgoCD application spec provides several different ways of track kubernetes r
git. This document describes the different techniques and the means of deploying those manifests to
the target environment.
## Branch Tracking
## HEAD / Branch Tracking
If a branch name is specified, ArgoCD will continually compare live state against the resource
manifests defined at the tip of the specified branch.
If a branch name, or a symbolic reference (like HEAD) is specified, ArgoCD will continually compare
live state against the resource manifests defined at the tip of the specified branch or the
deferenced commit of the symbolic reference.
To redeploy an application, a user makes changes to the manifests, and commit/pushes those the
changes to the tracked branch, which will then be detected by ArgoCD controller.
changes to the tracked branch/symbolic reference, which will then be detected by ArgoCD controller.
## Tag Tracking
@@ -33,9 +34,9 @@ which is pinned to a commit, is by updating the tracking revision in the applica
commit containing the new manifests. Note that [parameter overrides](parameters.md) can still be set
on an application which is pinned to a revision.
## Auto-Sync [(Not Yet Implemented)]((https://github.com/argoproj/argo-cd/issues/79))
## Automated Sync
In all tracking strategies, the application will have the option to sync automatically. If auto-sync
In all tracking strategies, the application has the option to sync automatically. If [auto-sync](auto_sync.md)
is configured, the new resources manifests will be applied automatically -- as soon as a difference
is detected between the target state (git) and live state. If auto-sync is disabled, a manual sync
will be needed using the Argo UI, CLI, or API.

View File

@@ -12,7 +12,14 @@ spec:
app: application-controller
spec:
containers:
- command: [/argocd-application-controller, --repo-server, 'argocd-repo-server:8081']
- command:
- /argocd-application-controller
- --repo-server
- argocd-repo-server:8081
- --status-processors
- "20"
- --operation-processors
- "10"
image: argoproj/argocd-application-controller:latest
name: application-controller
serviceAccountName: application-controller

View File

@@ -24,8 +24,10 @@ resources:
imageTags:
- name: argoproj/argocd-server
newTag: latest
newTag: v0.10.6
- name: argoproj/argocd-ui
newTag: v0.10.6
- name: argoproj/argocd-repo-server
newTag: latest
- name: argoproj/application-controller
newTag: latest
newTag: v0.10.6
- name: argoproj/argocd-application-controller
newTag: v0.10.6

View File

@@ -9,3 +9,16 @@ rules:
- '*'
verbs:
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- list
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get

View File

@@ -160,6 +160,19 @@ rules:
- '*'
verbs:
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- list
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
@@ -315,7 +328,11 @@ spec:
- /argocd-application-controller
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-application-controller:latest
- --status-processors
- "20"
- --operation-processors
- "10"
image: argoproj/argocd-application-controller:v0.10.6
name: application-controller
serviceAccountName: application-controller
---
@@ -336,7 +353,7 @@ spec:
containers:
- command:
- /argocd-repo-server
image: argoproj/argocd-repo-server:latest
image: argoproj/argocd-repo-server:v0.10.6
name: argocd-repo-server
ports:
- containerPort: 8081
@@ -366,7 +383,7 @@ spec:
- /shared/app
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.6
name: argocd-server
readinessProbe:
httpGet:
@@ -383,7 +400,7 @@ spec:
- -r
- /app
- /shared
image: argoproj/argocd-ui:latest
image: argoproj/argocd-ui:v0.10.6
name: ui
volumeMounts:
- mountPath: /shared
@@ -423,7 +440,7 @@ spec:
- cp
- /argocd-util
- /shared
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.6
name: copyutil
volumeMounts:
- mountPath: /shared

View File

@@ -261,7 +261,11 @@ spec:
- /argocd-application-controller
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-application-controller:latest
- --status-processors
- "20"
- --operation-processors
- "10"
image: argoproj/argocd-application-controller:v0.10.6
name: application-controller
serviceAccountName: application-controller
---
@@ -282,7 +286,7 @@ spec:
containers:
- command:
- /argocd-repo-server
image: argoproj/argocd-repo-server:latest
image: argoproj/argocd-repo-server:v0.10.6
name: argocd-repo-server
ports:
- containerPort: 8081
@@ -312,7 +316,7 @@ spec:
- /shared/app
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.6
name: argocd-server
readinessProbe:
httpGet:
@@ -329,7 +333,7 @@ spec:
- -r
- /app
- /shared
image: argoproj/argocd-ui:latest
image: argoproj/argocd-ui:v0.10.6
name: ui
volumeMounts:
- mountPath: /shared
@@ -369,7 +373,7 @@ spec:
- cp
- /argocd-util
- /shared
image: argoproj/argocd-server:latest
image: argoproj/argocd-server:v0.10.6
name: copyutil
volumeMounts:
- mountPath: /shared

View File

@@ -40,6 +40,8 @@ const (
EnvArgoCDServer = "ARGOCD_SERVER"
// EnvArgoCDAuthToken is the environment variable to look for an ArgoCD auth token
EnvArgoCDAuthToken = "ARGOCD_AUTH_TOKEN"
// MaxGRPCMessageSize contains max grpc message size
MaxGRPCMessageSize = 100 * 1024 * 1024
)
// Client defines an interface for interaction with an Argo CD server.
@@ -277,7 +279,8 @@ func (c *client) NewConn() (*grpc.ClientConn, error) {
endpointCredentials := jwtCredentials{
Token: c.AuthToken,
}
return grpc_util.BlockingDial(context.Background(), "tcp", c.ServerAddr, creds, grpc.WithPerRPCCredentials(endpointCredentials))
return grpc_util.BlockingDial(context.Background(), "tcp", c.ServerAddr, creds,
grpc.WithPerRPCCredentials(endpointCredentials), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize)))
}
func (c *client) tlsConfig() (*tls.Config, error) {

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,6 @@ package github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1;
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1alpha1";
@@ -239,8 +238,6 @@ message ConnectionState {
// DeploymentInfo contains information relevant to an application deployment
message DeploymentInfo {
repeated ComponentParameter params = 1;
optional string revision = 2;
repeated ComponentParameter componentParameterOverrides = 3;
@@ -287,8 +284,6 @@ message JWTToken {
// Operation contains requested operation parameters.
message Operation {
optional SyncOperation sync = 1;
optional RollbackOperation rollback = 2;
}
// OperationState contains information about state of currently performing operation on application.
@@ -305,9 +300,6 @@ message OperationState {
// SyncResult is the result of a Sync operation
optional SyncOperationResult syncResult = 4;
// RollbackResult is the result of a Rollback operation
optional SyncOperationResult rollbackResult = 5;
// StartedAt contains time of operation start
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 6;
@@ -388,14 +380,6 @@ message ResourceState {
optional HealthStatus health = 5;
}
message RollbackOperation {
optional int64 id = 1;
optional bool prune = 2;
optional bool dryRun = 3;
}
// SyncOperation contains sync operation details.
message SyncOperation {
// Revision is the git revision in which to sync the application to.
@@ -415,6 +399,18 @@ message SyncOperation {
// If nil, uses the parameter override set in application.
// If empty, sets no parameter overrides
optional ParameterOverrides parameterOverrides = 5;
// Resources describes which resources to sync
repeated SyncOperationResource resources = 6;
}
// SyncOperationResource contains resources to sync.
message SyncOperationResource {
optional string group = 1;
optional string kind = 2;
optional string name = 3;
}
// SyncOperationResult represent result of sync operation

View File

@@ -17,6 +17,22 @@ import (
"github.com/argoproj/argo-cd/util/git"
)
// SyncOperationResource contains resources to sync.
type SyncOperationResource struct {
Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
}
// HasIdentity determines whether a sync operation is identified by a manifest.
func (r SyncOperationResource) HasIdentity(u *unstructured.Unstructured) bool {
gvk := u.GroupVersionKind()
if u.GetName() == r.Name && gvk.Kind == r.Kind && gvk.Group == r.Group {
return true
}
return false
}
// SyncOperation contains sync operation details.
type SyncOperation struct {
// Revision is the git revision in which to sync the application to.
@@ -32,6 +48,8 @@ type SyncOperation struct {
// If nil, uses the parameter override set in application.
// If empty, sets no parameter overrides
ParameterOverrides ParameterOverrides `json:"parameterOverrides" protobuf:"bytes,5,opt,name=parameterOverrides"`
// Resources describes which resources to sync
Resources []SyncOperationResource `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"`
}
// ParameterOverrides masks the value so protobuf can generate
@@ -43,16 +61,9 @@ func (po ParameterOverrides) String() string {
return fmt.Sprintf("%v", []ComponentParameter(po))
}
type RollbackOperation struct {
ID int64 `json:"id" protobuf:"bytes,1,opt,name=id"`
Prune bool `json:"prune,omitempty" protobuf:"bytes,2,opt,name=prune"`
DryRun bool `json:"dryRun,omitempty" protobuf:"bytes,3,opt,name=dryRun"`
}
// Operation contains requested operation parameters.
type Operation struct {
Sync *SyncOperation `json:"sync,omitempty" protobuf:"bytes,1,opt,name=sync"`
Rollback *RollbackOperation `json:"rollback,omitempty" protobuf:"bytes,2,opt,name=rollback"`
Sync *SyncOperation `json:"sync,omitempty" protobuf:"bytes,1,opt,name=sync"`
}
type OperationPhase string
@@ -87,8 +98,6 @@ type OperationState struct {
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// SyncResult is the result of a Sync operation
SyncResult *SyncOperationResult `json:"syncResult,omitempty" protobuf:"bytes,4,opt,name=syncResult"`
// RollbackResult is the result of a Rollback operation
RollbackResult *SyncOperationResult `json:"rollbackResult,omitempty" protobuf:"bytes,5,opt,name=rollbackResult"`
// StartedAt contains time of operation start
StartedAt metav1.Time `json:"startedAt" protobuf:"bytes,6,opt,name=startedAt"`
// FinishedAt contains time of operation completion
@@ -200,7 +209,6 @@ type ResourceDetails struct {
// DeploymentInfo contains information relevant to an application deployment
type DeploymentInfo struct {
Params []ComponentParameter `json:"params" protobuf:"bytes,1,name=params"`
Revision string `json:"revision" protobuf:"bytes,2,opt,name=revision"`
ComponentParameterOverrides []ComponentParameter `json:"componentParameterOverrides,omitempty" protobuf:"bytes,3,opt,name=componentParameterOverrides"`
DeployedAt metav1.Time `json:"deployedAt" protobuf:"bytes,4,opt,name=deployedAt"`
@@ -493,6 +501,8 @@ type AppProject struct {
func (proj *AppProject) ProjectPoliciesString() string {
var policies []string
for _, role := range proj.Spec.Roles {
projectPolicy := fmt.Sprintf("p, proj:%s:%s, projects, get, %s, allow", proj.ObjectMeta.Name, role.Name, proj.ObjectMeta.Name)
policies = append(policies, projectPolicy)
policies = append(policies, role.Policies...)
}
return strings.Join(policies, "\n")

View File

@@ -481,11 +481,6 @@ func (in *ConnectionState) DeepCopy() *ConnectionState {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentInfo) DeepCopyInto(out *DeploymentInfo) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make([]ComponentParameter, len(*in))
copy(*out, *in)
}
if in.ComponentParameterOverrides != nil {
in, out := &in.ComponentParameterOverrides, &out.ComponentParameterOverrides
*out = make([]ComponentParameter, len(*in))
@@ -565,15 +560,6 @@ func (in *Operation) DeepCopyInto(out *Operation) {
(*in).DeepCopyInto(*out)
}
}
if in.Rollback != nil {
in, out := &in.Rollback, &out.Rollback
if *in == nil {
*out = nil
} else {
*out = new(RollbackOperation)
**out = **in
}
}
return
}
@@ -600,15 +586,6 @@ func (in *OperationState) DeepCopyInto(out *OperationState) {
(*in).DeepCopyInto(*out)
}
}
if in.RollbackResult != nil {
in, out := &in.RollbackResult, &out.RollbackResult
if *in == nil {
*out = nil
} else {
*out = new(SyncOperationResult)
(*in).DeepCopyInto(*out)
}
}
in.StartedAt.DeepCopyInto(&out.StartedAt)
if in.FinishedAt != nil {
in, out := &in.FinishedAt, &out.FinishedAt
@@ -762,22 +739,6 @@ func (in *ResourceState) DeepCopy() *ResourceState {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollbackOperation) DeepCopyInto(out *RollbackOperation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackOperation.
func (in *RollbackOperation) DeepCopy() *RollbackOperation {
if in == nil {
return nil
}
out := new(RollbackOperation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncOperation) DeepCopyInto(out *SyncOperation) {
*out = *in
@@ -790,6 +751,16 @@ func (in *SyncOperation) DeepCopyInto(out *SyncOperation) {
(*in).DeepCopyInto(*out)
}
}
if in.ParameterOverrides != nil {
in, out := &in.ParameterOverrides, &out.ParameterOverrides
*out = make(ParameterOverrides, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]SyncOperationResource, len(*in))
copy(*out, *in)
}
return
}
@@ -803,6 +774,22 @@ func (in *SyncOperation) DeepCopy() *SyncOperation {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncOperationResource) DeepCopyInto(out *SyncOperationResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncOperationResource.
func (in *SyncOperationResource) DeepCopy() *SyncOperationResource {
if in == nil {
return nil
}
out := new(SyncOperationResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncOperationResult) DeepCopyInto(out *SyncOperationResult) {
*out = *in

View File

@@ -2,7 +2,6 @@ package versioned
import (
argoprojv1alpha1 "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/typed/application/v1alpha1"
glog "github.com/golang/glog"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -56,7 +55,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
glog.Errorf("failed to create the DiscoveryClient: %v", err)
return nil, err
}
return &cs, nil

View File

@@ -23,9 +23,10 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
}
}
fakePtr := testing.Fake{}
fakePtr.AddReactor("*", "*", testing.ObjectReaction(o))
fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
cs := &Clientset{}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
@@ -35,7 +36,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
return true, watch, nil
})
return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}}
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a

View File

@@ -6,15 +6,14 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
util_runtime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
argoprojv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
@@ -27,10 +26,13 @@ func init() {
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
argoprojv1alpha1.AddToScheme(scheme)
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
util_runtime.Must(AddToScheme(scheme))
}

View File

@@ -6,15 +6,14 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
util_runtime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
argoprojv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
@@ -27,10 +26,13 @@ func init() {
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
argoprojv1alpha1.AddToScheme(scheme)
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
util_runtime.Must(AddToScheme(Scheme))
}

View File

@@ -44,7 +44,7 @@ func (c *FakeApplications) List(opts v1.ListOptions) (result *v1alpha1.Applicati
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.ApplicationList{}
list := &v1alpha1.ApplicationList{ListMeta: obj.(*v1alpha1.ApplicationList).ListMeta}
for _, item := range obj.(*v1alpha1.ApplicationList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)

View File

@@ -44,7 +44,7 @@ func (c *FakeAppProjects) List(opts v1.ListOptions) (result *v1alpha1.AppProject
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.AppProjectList{}
list := &v1alpha1.AppProjectList{ListMeta: obj.(*v1alpha1.AppProjectList).ListMeta}
for _, item := range obj.(*v1alpha1.AppProjectList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)

View File

@@ -14,12 +14,16 @@ import (
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
@@ -27,23 +31,62 @@ type sharedInformerFactory struct {
startedInformers map[reflect.Type]bool
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil)
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return &sharedInformerFactory{
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: namespace,
tweakListOptions: tweakListOptions,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
@@ -92,7 +135,13 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
if exists {
return informer
}
informer = newFunc(f.client, f.defaultResync)
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer

View File

@@ -12,7 +12,6 @@ import (
"time"
"github.com/google/go-jsonnet"
"github.com/ksonnet/ksonnet/pkg/app"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -76,10 +75,6 @@ func (s *Service) ListDir(ctx context.Context, q *ListDirRequest) (*FileList, er
s.repoLock.Lock(gitClient.Root())
defer s.repoLock.Unlock(gitClient.Root())
err = gitClient.Init()
if err != nil {
return nil, err
}
commitSHA, err = checkoutRevision(gitClient, commitSHA)
if err != nil {
return nil, err
@@ -119,10 +114,6 @@ func (s *Service) GetFile(ctx context.Context, q *GetFileRequest) (*GetFileRespo
s.repoLock.Lock(gitClient.Root())
defer s.repoLock.Unlock(gitClient.Root())
err = gitClient.Init()
if err != nil {
return nil, err
}
commitSHA, err = checkoutRevision(gitClient, commitSHA)
if err != nil {
return nil, err
@@ -165,10 +156,6 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
s.repoLock.Lock(gitClient.Root())
defer s.repoLock.Unlock(gitClient.Root())
err = gitClient.Init()
if err != nil {
return nil, err
}
commitSHA, err = checkoutRevision(gitClient, commitSHA)
if err != nil {
return nil, err
@@ -196,13 +183,13 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, error) {
var targetObjs []*unstructured.Unstructured
var params []*v1alpha1.ComponentParameter
var env *app.EnvironmentSpec
var dest *v1alpha1.ApplicationDestination
var err error
appSourceType := IdentifyAppSourceTypeByAppDir(appPath)
switch appSourceType {
case AppSourceKsonnet:
targetObjs, params, env, err = ksShow(appPath, q.Environment, q.ComponentParameterOverrides)
targetObjs, params, dest, err = ksShow(appPath, q.Environment, q.ComponentParameterOverrides)
case AppSourceHelm:
h := helm.NewHelmApp(appPath)
err = h.DependencyBuild()
@@ -266,9 +253,9 @@ func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
Manifests: manifests,
Params: params,
}
if env != nil {
res.Namespace = env.Destination.Namespace
res.Server = env.Destination.Server
if dest != nil {
res.Namespace = dest.Namespace
res.Server = dest.Server
}
return &res, nil
}
@@ -309,13 +296,17 @@ func IdentifyAppSourceTypeByAppPath(appFilePath string) AppSourceType {
// checkoutRevision is a convenience function to initialize a repo, fetch, and checkout a revision
// Returns the 40 character commit SHA after the checkout has been performed
func checkoutRevision(gitClient git.Client, commitSHA string) (string, error) {
err := gitClient.Fetch()
err := gitClient.Init()
if err != nil {
return "", err
return "", status.Errorf(codes.Internal, "Failed to initialize git repo: %v", err)
}
err = gitClient.Fetch()
if err != nil {
return "", status.Errorf(codes.Internal, "Failed to fetch git repo: %v", err)
}
err = gitClient.Checkout(commitSHA)
if err != nil {
return "", err
return "", status.Errorf(codes.Internal, "Failed to checkout %s: %v", commitSHA, err)
}
return gitClient.CommitSHA()
}
@@ -335,7 +326,7 @@ func getFileCacheKey(commitSHA string, q *GetFileRequest) string {
}
// ksShow runs `ks show` in an app directory after setting any component parameter overrides
func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, []*v1alpha1.ComponentParameter, *app.EnvironmentSpec, error) {
func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, []*v1alpha1.ComponentParameter, *v1alpha1.ApplicationDestination, error) {
ksApp, err := ksonnet.NewKsonnetApp(appPath)
if err != nil {
return nil, nil, nil, status.Errorf(codes.FailedPrecondition, "unable to load application from %s: %v", appPath, err)
@@ -352,8 +343,7 @@ func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) (
}
}
}
appSpec := ksApp.App()
env, err := appSpec.Environment(envName)
dest, err := ksApp.Destination(envName)
if err != nil {
return nil, nil, nil, status.Errorf(codes.NotFound, "environment %q does not exist in ksonnet app", envName)
}
@@ -361,7 +351,7 @@ func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) (
if err != nil {
return nil, nil, nil, err
}
return targetObjs, params, env, nil
return targetObjs, params, dest, nil
}
var manifestFile = regexp.MustCompile(`^.*\.(yaml|yml|json|jsonnet)$`)

View File

@@ -748,6 +748,7 @@ func (s *Server) Sync(ctx context.Context, syncReq *ApplicationSyncRequest) (*ap
DryRun: syncReq.DryRun,
SyncStrategy: syncReq.Strategy,
ParameterOverrides: parameterOverrides,
Resources: syncReq.Resources,
},
}
a, err = argo.SetAppOperation(ctx, appIf, s.auditLogger, *syncReq.Name, &op)
@@ -774,11 +775,25 @@ func (s *Server) Rollback(ctx context.Context, rollbackReq *ApplicationRollbackR
if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil {
return nil, status.Errorf(codes.FailedPrecondition, "Rollback cannot be initiated when auto-sync is enabled")
}
var deploymentInfo *appv1.DeploymentInfo
for _, info := range a.Status.History {
if info.ID == rollbackReq.ID {
deploymentInfo = &info
break
}
}
if deploymentInfo == nil {
return nil, fmt.Errorf("application %s does not have deployment with id %v", a.Name, rollbackReq.ID)
}
// Rollback is just a convenience around Sync
op := appv1.Operation{
Rollback: &appv1.RollbackOperation{
ID: rollbackReq.ID,
Prune: rollbackReq.Prune,
DryRun: rollbackReq.DryRun,
Sync: &appv1.SyncOperation{
Revision: deploymentInfo.Revision,
DryRun: rollbackReq.DryRun,
Prune: rollbackReq.Prune,
SyncStrategy: &appv1.SyncStrategy{Apply: &appv1.SyncStrategyApply{}},
ParameterOverrides: deploymentInfo.ComponentParameterOverrides,
},
}
a, err = argo.SetAppOperation(ctx, appIf, s.auditLogger, *rollbackReq.Name, &op)
@@ -809,7 +824,7 @@ func (s *Server) TerminateOperation(ctx context.Context, termOpReq *OperationTer
if !apierr.IsConflict(err) {
return nil, err
}
log.Warnf("Failed to set operation for app '%s' due to update conflict. Retrying again...", termOpReq.Name)
log.Warnf("Failed to set operation for app '%s' due to update conflict. Retrying again...", *termOpReq.Name)
time.Sleep(100 * time.Millisecond)
a, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*termOpReq.Name, metav1.GetOptions{})
if err != nil {

View File

@@ -51,7 +51,7 @@ func (m *ApplicationQuery) Reset() { *m = ApplicationQuery{} }
func (m *ApplicationQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationQuery) ProtoMessage() {}
func (*ApplicationQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{0}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{0}
}
func (m *ApplicationQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -115,7 +115,7 @@ func (m *ApplicationResourceEventsQuery) Reset() { *m = ApplicationResou
func (m *ApplicationResourceEventsQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationResourceEventsQuery) ProtoMessage() {}
func (*ApplicationResourceEventsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{1}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{1}
}
func (m *ApplicationResourceEventsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -178,7 +178,7 @@ func (m *ApplicationManifestQuery) Reset() { *m = ApplicationManifestQue
func (m *ApplicationManifestQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationManifestQuery) ProtoMessage() {}
func (*ApplicationManifestQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{2}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{2}
}
func (m *ApplicationManifestQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -231,7 +231,7 @@ func (m *ApplicationResponse) Reset() { *m = ApplicationResponse{} }
func (m *ApplicationResponse) String() string { return proto.CompactTextString(m) }
func (*ApplicationResponse) ProtoMessage() {}
func (*ApplicationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{3}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{3}
}
func (m *ApplicationResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -272,7 +272,7 @@ func (m *ApplicationCreateRequest) Reset() { *m = ApplicationCreateReque
func (m *ApplicationCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationCreateRequest) ProtoMessage() {}
func (*ApplicationCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{4}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{4}
}
func (m *ApplicationCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -326,7 +326,7 @@ func (m *ApplicationUpdateRequest) Reset() { *m = ApplicationUpdateReque
func (m *ApplicationUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationUpdateRequest) ProtoMessage() {}
func (*ApplicationUpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{5}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{5}
}
func (m *ApplicationUpdateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -374,7 +374,7 @@ func (m *ApplicationDeleteRequest) Reset() { *m = ApplicationDeleteReque
func (m *ApplicationDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationDeleteRequest) ProtoMessage() {}
func (*ApplicationDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{6}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{6}
}
func (m *ApplicationDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -419,22 +419,23 @@ func (m *ApplicationDeleteRequest) GetCascade() bool {
// ApplicationSyncRequest is a request to apply the config state to live state
type ApplicationSyncRequest struct {
Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision" json:"revision"`
DryRun bool `protobuf:"varint,3,opt,name=dryRun" json:"dryRun"`
Prune bool `protobuf:"varint,4,opt,name=prune" json:"prune"`
Strategy *v1alpha1.SyncStrategy `protobuf:"bytes,5,opt,name=strategy" json:"strategy,omitempty"`
Parameter *ParameterOverrides `protobuf:"bytes,6,opt,name=parameter" json:"parameter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision" json:"revision"`
DryRun bool `protobuf:"varint,3,opt,name=dryRun" json:"dryRun"`
Prune bool `protobuf:"varint,4,opt,name=prune" json:"prune"`
Strategy *v1alpha1.SyncStrategy `protobuf:"bytes,5,opt,name=strategy" json:"strategy,omitempty"`
Parameter *ParameterOverrides `protobuf:"bytes,6,opt,name=parameter" json:"parameter,omitempty"`
Resources []v1alpha1.SyncOperationResource `protobuf:"bytes,7,rep,name=resources" json:"resources"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ApplicationSyncRequest) Reset() { *m = ApplicationSyncRequest{} }
func (m *ApplicationSyncRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationSyncRequest) ProtoMessage() {}
func (*ApplicationSyncRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{7}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{7}
}
func (m *ApplicationSyncRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -505,6 +506,15 @@ func (m *ApplicationSyncRequest) GetParameter() *ParameterOverrides {
return nil
}
func (m *ApplicationSyncRequest) GetResources() []v1alpha1.SyncOperationResource {
if m != nil {
return m.Resources
}
return nil
}
// ParameterOverrides is a wrapper on a list of parameters. If omitted, the application's overrides
// in the spec will be used. If set, will use the supplied list of overrides
type ParameterOverrides struct {
Overrides []*Parameter `protobuf:"bytes,1,rep,name=overrides" json:"overrides,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@@ -516,7 +526,7 @@ func (m *ParameterOverrides) Reset() { *m = ParameterOverrides{} }
func (m *ParameterOverrides) String() string { return proto.CompactTextString(m) }
func (*ParameterOverrides) ProtoMessage() {}
func (*ParameterOverrides) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{8}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{8}
}
func (m *ParameterOverrides) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -565,7 +575,7 @@ func (m *Parameter) Reset() { *m = Parameter{} }
func (m *Parameter) String() string { return proto.CompactTextString(m) }
func (*Parameter) ProtoMessage() {}
func (*Parameter) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{9}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{9}
}
func (m *Parameter) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -628,7 +638,7 @@ func (m *ApplicationUpdateSpecRequest) Reset() { *m = ApplicationUpdateS
func (m *ApplicationUpdateSpecRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationUpdateSpecRequest) ProtoMessage() {}
func (*ApplicationUpdateSpecRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{10}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{10}
}
func (m *ApplicationUpdateSpecRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -685,7 +695,7 @@ func (m *ApplicationRollbackRequest) Reset() { *m = ApplicationRollbackR
func (m *ApplicationRollbackRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationRollbackRequest) ProtoMessage() {}
func (*ApplicationRollbackRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{11}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{11}
}
func (m *ApplicationRollbackRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -756,7 +766,7 @@ func (m *ApplicationDeleteResourceRequest) Reset() { *m = ApplicationDel
func (m *ApplicationDeleteResourceRequest) String() string { return proto.CompactTextString(m) }
func (*ApplicationDeleteResourceRequest) ProtoMessage() {}
func (*ApplicationDeleteResourceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{12}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{12}
}
func (m *ApplicationDeleteResourceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -830,7 +840,7 @@ func (m *ApplicationPodLogsQuery) Reset() { *m = ApplicationPodLogsQuery
func (m *ApplicationPodLogsQuery) String() string { return proto.CompactTextString(m) }
func (*ApplicationPodLogsQuery) ProtoMessage() {}
func (*ApplicationPodLogsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{13}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{13}
}
func (m *ApplicationPodLogsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -920,7 +930,7 @@ func (m *LogEntry) Reset() { *m = LogEntry{} }
func (m *LogEntry) String() string { return proto.CompactTextString(m) }
func (*LogEntry) ProtoMessage() {}
func (*LogEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{14}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{14}
}
func (m *LogEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -974,7 +984,7 @@ func (m *OperationTerminateRequest) Reset() { *m = OperationTerminateReq
func (m *OperationTerminateRequest) String() string { return proto.CompactTextString(m) }
func (*OperationTerminateRequest) ProtoMessage() {}
func (*OperationTerminateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{15}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{15}
}
func (m *OperationTerminateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1020,7 +1030,7 @@ func (m *OperationTerminateResponse) Reset() { *m = OperationTerminateRe
func (m *OperationTerminateResponse) String() string { return proto.CompactTextString(m) }
func (*OperationTerminateResponse) ProtoMessage() {}
func (*OperationTerminateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_application_139f86dca173329e, []int{16}
return fileDescriptor_application_a0bd93d5f02bc9d6, []int{16}
}
func (m *OperationTerminateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1968,6 +1978,18 @@ func (m *ApplicationSyncRequest) MarshalTo(dAtA []byte) (int, error) {
}
i += n4
}
if len(m.Resources) > 0 {
for _, msg := range m.Resources {
dAtA[i] = 0x3a
i++
i = encodeVarintApplication(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
@@ -2446,6 +2468,12 @@ func (m *ApplicationSyncRequest) Size() (n int) {
l = m.Parameter.Size()
n += 1 + l + sovApplication(uint64(l))
}
if len(m.Resources) > 0 {
for _, e := range m.Resources {
l = e.Size()
n += 1 + l + sovApplication(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -3553,6 +3581,37 @@ func (m *ApplicationSyncRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApplication
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthApplication
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Resources = append(m.Resources, v1alpha1.SyncOperationResource{})
if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipApplication(dAtA[iNdEx:])
@@ -4875,97 +4934,99 @@ var (
)
func init() {
proto.RegisterFile("server/application/application.proto", fileDescriptor_application_139f86dca173329e)
proto.RegisterFile("server/application/application.proto", fileDescriptor_application_a0bd93d5f02bc9d6)
}
var fileDescriptor_application_139f86dca173329e = []byte{
// 1407 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xcf, 0x6f, 0xdc, 0x44,
0x14, 0x66, 0x76, 0x37, 0x9b, 0xec, 0x4b, 0x85, 0x60, 0x68, 0x83, 0x31, 0x69, 0xb2, 0x9a, 0xa6,
0xe9, 0x36, 0xa5, 0x76, 0x13, 0x55, 0x02, 0x55, 0x20, 0xd4, 0xd0, 0xd2, 0xa6, 0x0a, 0x6d, 0x70,
0x5a, 0x90, 0xb8, 0xa0, 0xa9, 0x3d, 0xdd, 0x35, 0xd9, 0xf5, 0x98, 0xb1, 0x77, 0xd1, 0x52, 0xf5,
0x40, 0x41, 0x9c, 0x90, 0x2a, 0x04, 0x07, 0x6e, 0x40, 0xcf, 0x15, 0x17, 0xee, 0x9c, 0x7b, 0x44,
0xe2, 0x5e, 0xa1, 0x88, 0x3f, 0x04, 0xcd, 0xd8, 0x5e, 0x8f, 0x9b, 0x5d, 0xa7, 0xc0, 0x72, 0x1b,
0xbf, 0x79, 0xf3, 0xde, 0xf7, 0x7e, 0xcc, 0xbc, 0x4f, 0x86, 0x95, 0x88, 0x89, 0x01, 0x13, 0x36,
0x0d, 0xc3, 0xae, 0xef, 0xd2, 0xd8, 0xe7, 0x81, 0xbe, 0xb6, 0x42, 0xc1, 0x63, 0x8e, 0xe7, 0x35,
0x91, 0x79, 0xb4, 0xcd, 0xdb, 0x5c, 0xc9, 0x6d, 0xb9, 0x4a, 0x54, 0xcc, 0xc5, 0x36, 0xe7, 0xed,
0x2e, 0xb3, 0x69, 0xe8, 0xdb, 0x34, 0x08, 0x78, 0xac, 0x94, 0xa3, 0x74, 0x97, 0xec, 0xbd, 0x11,
0x59, 0x3e, 0x57, 0xbb, 0x2e, 0x17, 0xcc, 0x1e, 0xac, 0xdb, 0x6d, 0x16, 0x30, 0x41, 0x63, 0xe6,
0xa5, 0x3a, 0xe7, 0x73, 0x9d, 0x1e, 0x75, 0x3b, 0x7e, 0xc0, 0xc4, 0xd0, 0x0e, 0xf7, 0xda, 0x52,
0x10, 0xd9, 0x3d, 0x16, 0xd3, 0x71, 0xa7, 0xb6, 0xda, 0x7e, 0xdc, 0xe9, 0xdf, 0xb6, 0x5c, 0xde,
0xb3, 0xa9, 0x50, 0xc0, 0x3e, 0x51, 0x8b, 0xb3, 0xae, 0x97, 0x9f, 0xd6, 0xc3, 0x1b, 0xac, 0xd3,
0x6e, 0xd8, 0xa1, 0x07, 0x4d, 0x6d, 0x96, 0x99, 0x12, 0x2c, 0xe4, 0x69, 0xae, 0xd4, 0xd2, 0x8f,
0xb9, 0x18, 0x6a, 0xcb, 0xc4, 0x06, 0x09, 0xe0, 0x85, 0x8b, 0xb9, 0xaf, 0xf7, 0xfb, 0x4c, 0x0c,
0x31, 0x86, 0x5a, 0x40, 0x7b, 0xcc, 0x40, 0x4d, 0xd4, 0x6a, 0x38, 0x6a, 0x8d, 0x97, 0x60, 0x56,
0xb0, 0x3b, 0x82, 0x45, 0x1d, 0xa3, 0xd2, 0x44, 0xad, 0xb9, 0xcd, 0xda, 0xe3, 0x27, 0xcb, 0xcf,
0x39, 0x99, 0x10, 0xaf, 0xc2, 0xac, 0x74, 0xcf, 0xdc, 0xd8, 0xa8, 0x36, 0xab, 0xad, 0xc6, 0xe6,
0x91, 0xfd, 0x27, 0xcb, 0x73, 0x3b, 0x89, 0x28, 0x72, 0xb2, 0x4d, 0xf2, 0x35, 0x82, 0x25, 0xcd,
0xa1, 0xc3, 0x22, 0xde, 0x17, 0x2e, 0xbb, 0x3c, 0x60, 0x41, 0x1c, 0x3d, 0xed, 0xbe, 0x32, 0x72,
0xdf, 0x82, 0x23, 0x22, 0x55, 0xbd, 0x2e, 0xf7, 0x2a, 0x72, 0x2f, 0xc5, 0x50, 0xd8, 0xc1, 0xab,
0x30, 0x9f, 0x7d, 0xdf, 0xda, 0xba, 0x64, 0x54, 0x35, 0x45, 0x7d, 0x83, 0xec, 0x80, 0xa1, 0xe1,
0x78, 0x8f, 0x06, 0xfe, 0x1d, 0x16, 0xc5, 0x93, 0x11, 0x34, 0x61, 0x4e, 0xb0, 0x81, 0x1f, 0xf9,
0x3c, 0x50, 0x19, 0xc8, 0x8c, 0x8e, 0xa4, 0xe4, 0x18, 0xbc, 0x54, 0x8c, 0x2c, 0xe4, 0x41, 0xc4,
0xc8, 0x43, 0x54, 0xf0, 0xf4, 0x8e, 0x60, 0x34, 0x66, 0x0e, 0xfb, 0xb4, 0xcf, 0xa2, 0x18, 0x07,
0xa0, 0xb7, 0xaa, 0x72, 0x38, 0xbf, 0xf1, 0xae, 0x95, 0x17, 0xd6, 0xca, 0x0a, 0xab, 0x16, 0x1f,
0xbb, 0x9e, 0x15, 0xee, 0xb5, 0x2d, 0xd9, 0x23, 0x96, 0xde, 0xf6, 0x59, 0x8f, 0x58, 0x9a, 0xa7,
0x2c, 0x6a, 0x4d, 0x0f, 0x2f, 0x40, 0xbd, 0x1f, 0x46, 0x4c, 0xc4, 0x49, 0x15, 0x9d, 0xf4, 0x8b,
0x7c, 0x55, 0x04, 0x79, 0x2b, 0xf4, 0x34, 0x90, 0x9d, 0xff, 0x11, 0x64, 0x01, 0x1e, 0xb9, 0x5a,
0x40, 0x71, 0x89, 0x75, 0x59, 0x8e, 0x62, 0x5c, 0x51, 0x0c, 0x98, 0x75, 0x69, 0xe4, 0x52, 0x8f,
0xa5, 0xf1, 0x64, 0x9f, 0xe4, 0x51, 0x05, 0x16, 0x34, 0x53, 0xbb, 0xc3, 0xc0, 0x2d, 0x33, 0x74,
0x68, 0x75, 0xf1, 0x22, 0xd4, 0x3d, 0x31, 0x74, 0xfa, 0x81, 0x51, 0xd5, 0xfa, 0x3f, 0x95, 0x61,
0x13, 0x66, 0x42, 0xd1, 0x0f, 0x98, 0x51, 0xd3, 0x36, 0x13, 0x11, 0x76, 0x61, 0x2e, 0x8a, 0xe5,
0xbd, 0x6d, 0x0f, 0x8d, 0x99, 0x26, 0x6a, 0xcd, 0x6f, 0x5c, 0xf9, 0x0f, 0xb9, 0x93, 0x91, 0xec,
0xa6, 0xe6, 0x9c, 0x91, 0x61, 0xfc, 0x16, 0x34, 0x42, 0x2a, 0x68, 0x8f, 0xc5, 0x4c, 0x18, 0x75,
0xe5, 0x65, 0xb9, 0x60, 0x60, 0x27, 0xdb, 0xbd, 0x31, 0x60, 0x42, 0xf8, 0x1e, 0x8b, 0x9c, 0xfc,
0x04, 0xb9, 0x06, 0xf8, 0xa0, 0x02, 0x3e, 0x0f, 0x0d, 0x9e, 0x7d, 0x18, 0xa8, 0x59, 0x6d, 0xcd,
0x6f, 0x2c, 0x8c, 0x37, 0xea, 0xe4, 0x8a, 0x84, 0x41, 0x63, 0x24, 0xc7, 0x86, 0x9e, 0xec, 0x34,
0x2f, 0x49, 0xca, 0x4d, 0x98, 0x19, 0xd0, 0x6e, 0x9f, 0x15, 0xf2, 0x9d, 0x88, 0x30, 0x81, 0x86,
0xcb, 0x7b, 0x21, 0x0f, 0x58, 0x10, 0xab, 0x7c, 0x67, 0xfb, 0xb9, 0x98, 0xfc, 0x80, 0x60, 0xf1,
0x40, 0xcb, 0xee, 0x86, 0xac, 0xb4, 0xce, 0x1e, 0xd4, 0xa2, 0x90, 0xb9, 0xea, 0xfd, 0x98, 0xdf,
0xb8, 0x36, 0x9d, 0x1e, 0x96, 0x4e, 0xb3, 0xd0, 0xa4, 0x75, 0xf9, 0xc8, 0x99, 0x7a, 0x8f, 0xf3,
0x6e, 0xf7, 0x36, 0x75, 0xf7, 0xca, 0x80, 0x99, 0x50, 0xf1, 0x3d, 0x05, 0xab, 0xba, 0x09, 0xd2,
0xd4, 0xfe, 0x93, 0xe5, 0xca, 0xd6, 0x25, 0xa7, 0xe2, 0x7b, 0xff, 0xbe, 0xf5, 0xc8, 0x2f, 0x08,
0x9a, 0x63, 0x2e, 0x54, 0xf2, 0x0a, 0x96, 0xc1, 0x79, 0xf6, 0xf7, 0x76, 0x03, 0x80, 0x86, 0xfe,
0x07, 0x4c, 0xa8, 0xbb, 0x93, 0x3c, 0xb7, 0x38, 0x0d, 0x00, 0x2e, 0xee, 0x6c, 0xa5, 0x3b, 0x8e,
0xa6, 0x25, 0x9b, 0x62, 0xcf, 0x0f, 0x3c, 0xa3, 0xa6, 0x37, 0x85, 0x94, 0x90, 0x9f, 0x2b, 0xf0,
0xb2, 0x06, 0x78, 0x87, 0x7b, 0xdb, 0xbc, 0x5d, 0x32, 0x17, 0x0c, 0x98, 0x0d, 0xb9, 0x97, 0x43,
0x74, 0xb2, 0xcf, 0xa4, 0x85, 0x82, 0x98, 0xca, 0xb1, 0x5c, 0x98, 0x02, 0xb9, 0x58, 0x46, 0x19,
0xf9, 0x81, 0xcb, 0x76, 0x99, 0xcb, 0x03, 0x2f, 0x52, 0x78, 0xaa, 0x59, 0x94, 0xfa, 0x0e, 0xbe,
0x0a, 0x0d, 0xf5, 0x7d, 0xd3, 0xef, 0xb1, 0xf4, 0x12, 0xaf, 0x59, 0xc9, 0xfc, 0xb7, 0xf4, 0xf9,
0x9f, 0x37, 0x8d, 0x9c, 0xff, 0xd6, 0x60, 0xdd, 0x92, 0x27, 0x9c, 0xfc, 0xb0, 0xc4, 0x15, 0x53,
0xbf, 0xbb, 0xed, 0x07, 0x2c, 0x32, 0xea, 0x9a, 0xc3, 0x5c, 0x2c, 0x0b, 0x7e, 0x87, 0x77, 0xbb,
0xfc, 0x33, 0x63, 0xb6, 0x59, 0xc9, 0x0b, 0x9e, 0xc8, 0xc8, 0xe7, 0x30, 0xb7, 0xcd, 0xdb, 0x97,
0x83, 0x58, 0x0c, 0xe5, 0x58, 0x96, 0xe1, 0xc8, 0x6b, 0xa2, 0xdf, 0xb0, 0x4c, 0x88, 0xaf, 0x43,
0x23, 0xf6, 0x7b, 0x6c, 0x37, 0xa6, 0xbd, 0x30, 0x6d, 0xfa, 0x7f, 0x80, 0x7b, 0x84, 0x2c, 0x33,
0x41, 0x6c, 0x78, 0xe5, 0x46, 0x28, 0x49, 0x88, 0xcf, 0x83, 0x9b, 0x4c, 0xf4, 0xfc, 0x80, 0x96,
0xbe, 0xd0, 0x64, 0x11, 0xcc, 0x71, 0x07, 0x92, 0xd9, 0xb8, 0xf1, 0xe5, 0x8b, 0x80, 0xf5, 0x8b,
0xc4, 0xc4, 0xc0, 0x77, 0x19, 0x7e, 0x80, 0xa0, 0xb6, 0xed, 0x47, 0x31, 0x3e, 0x5e, 0xb8, 0x7b,
0x4f, 0x13, 0x15, 0x73, 0x4a, 0xf7, 0x57, 0xba, 0x22, 0x8b, 0xf7, 0xff, 0xf8, 0xeb, 0xbb, 0xca,
0x02, 0x3e, 0xaa, 0x38, 0xdf, 0x60, 0x5d, 0xa7, 0x60, 0x11, 0xfe, 0x06, 0x01, 0x96, 0x6a, 0x45,
0xbe, 0x82, 0xcf, 0x4c, 0xc2, 0x37, 0x86, 0xd7, 0x98, 0xc7, 0xb5, 0xc4, 0x5b, 0x92, 0x54, 0xca,
0x34, 0x2b, 0x05, 0x05, 0x60, 0x4d, 0x01, 0x58, 0xc1, 0x64, 0x1c, 0x00, 0xfb, 0xae, 0xcc, 0xe6,
0x3d, 0x9b, 0x25, 0x7e, 0x7f, 0x44, 0x30, 0xf3, 0x21, 0x8d, 0xdd, 0xce, 0x61, 0x19, 0xda, 0x99,
0x4e, 0x86, 0x94, 0x2f, 0x05, 0x95, 0x9c, 0x50, 0x30, 0x8f, 0xe3, 0x57, 0x33, 0x98, 0x51, 0x2c,
0x18, 0xed, 0x15, 0xd0, 0x9e, 0x43, 0xf8, 0x21, 0x82, 0x7a, 0x42, 0x75, 0xf0, 0xc9, 0x49, 0x10,
0x0b, 0x54, 0xc8, 0x9c, 0x12, 0xa1, 0x20, 0xa7, 0x15, 0xc0, 0x13, 0x64, 0x6c, 0x21, 0x2f, 0x14,
0xd8, 0xd0, 0xb7, 0x08, 0xaa, 0x57, 0xd8, 0xa1, 0x6d, 0x36, 0x2d, 0x64, 0x07, 0x52, 0x37, 0xa6,
0xc2, 0xf8, 0x3e, 0x82, 0x23, 0x57, 0x58, 0x9c, 0x11, 0xd2, 0x68, 0x72, 0xfa, 0x0a, 0x9c, 0xd5,
0x5c, 0xb4, 0x34, 0x6e, 0x9f, 0x6d, 0x8d, 0x48, 0xe8, 0x59, 0xe5, 0xfa, 0x14, 0x3e, 0x59, 0xd6,
0x5c, 0xbd, 0x91, 0xcf, 0xdf, 0x10, 0xd4, 0x93, 0x81, 0x3a, 0xd9, 0x7d, 0x81, 0x23, 0x4e, 0x2d,
0x47, 0x97, 0x15, 0xd0, 0xb7, 0xcd, 0x73, 0xe3, 0x81, 0xea, 0xe7, 0xe5, 0x4b, 0xe5, 0xd1, 0x98,
0x5a, 0x0a, 0x7d, 0xb1, 0xb2, 0xbf, 0x22, 0x80, 0x9c, 0x11, 0xe0, 0xd3, 0xe5, 0x41, 0x68, 0xac,
0xc1, 0x9c, 0x22, 0x27, 0x20, 0x96, 0x0a, 0xa6, 0x65, 0x36, 0xcb, 0xb2, 0x2e, 0x19, 0xc3, 0x05,
0xc5, 0x1b, 0xf0, 0x00, 0xea, 0xc9, 0x88, 0x9e, 0x9c, 0xf5, 0x02, 0x27, 0x36, 0x9b, 0x25, 0xef,
0x4f, 0x52, 0xf8, 0xb4, 0xe7, 0xd6, 0x4a, 0x7b, 0xee, 0x27, 0x04, 0x35, 0xc9, 0x2b, 0xf1, 0x89,
0x49, 0xf6, 0x34, 0xfe, 0x3c, 0xb5, 0x52, 0x9f, 0x51, 0xd0, 0x4e, 0x92, 0xf2, 0xec, 0x0c, 0x03,
0xf7, 0x02, 0x5a, 0xc3, 0x8f, 0x10, 0xcc, 0x65, 0x3c, 0x0a, 0x9f, 0x9a, 0x18, 0x76, 0x91, 0x69,
0x4d, 0x0d, 0xaa, 0xad, 0xa0, 0x9e, 0x26, 0x2b, 0x65, 0x50, 0x45, 0xea, 0x5c, 0xc2, 0xfd, 0x1e,
0x01, 0x1e, 0x8d, 0xbb, 0xd1, 0x00, 0xc4, 0xab, 0x05, 0x57, 0x13, 0x27, 0xa9, 0x79, 0xea, 0x50,
0xbd, 0xe2, 0xbd, 0x5e, 0x2b, 0xbd, 0xd7, 0x7c, 0xe4, 0xff, 0x01, 0x82, 0xe7, 0x8b, 0x24, 0x10,
0x9f, 0x3d, 0xac, 0xd3, 0x0a, 0x64, 0xf1, 0x19, 0x3a, 0xee, 0x35, 0x05, 0x69, 0x75, 0xad, 0x3c,
0x57, 0x99, 0xfb, 0x2f, 0x10, 0xcc, 0xa6, 0x2c, 0x0f, 0xaf, 0x4c, 0xb2, 0xad, 0xd3, 0x40, 0xf3,
0x58, 0x41, 0x2b, 0x63, 0x42, 0xe4, 0x75, 0xe5, 0x76, 0x1d, 0xdb, 0x65, 0x6e, 0x43, 0xee, 0x45,
0xf6, 0xdd, 0x94, 0x22, 0xde, 0xb3, 0xbb, 0xbc, 0x1d, 0x9d, 0x43, 0x9b, 0x6f, 0x3e, 0xde, 0x5f,
0x42, 0xbf, 0xef, 0x2f, 0xa1, 0x3f, 0xf7, 0x97, 0xd0, 0x47, 0x56, 0xd9, 0x5f, 0x95, 0x83, 0x7f,
0x9f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xe7, 0xbe, 0x98, 0x92, 0x12, 0x00, 0x00,
var fileDescriptor_application_a0bd93d5f02bc9d6 = []byte{
// 1427 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xcf, 0x6f, 0x1b, 0x45,
0x14, 0x66, 0xec, 0xc4, 0x8e, 0x5f, 0x2a, 0x04, 0x43, 0x1b, 0x96, 0x25, 0x4d, 0xac, 0x6d, 0x9a,
0xba, 0x29, 0xdd, 0x6d, 0xa2, 0x4a, 0xa0, 0x0a, 0x84, 0x1a, 0x5a, 0xda, 0x54, 0xa1, 0x35, 0x9b,
0x16, 0x24, 0x2e, 0x68, 0xba, 0x3b, 0x75, 0x96, 0xd8, 0x3b, 0xcb, 0xec, 0xd8, 0xc8, 0x54, 0x3d,
0x50, 0x10, 0x27, 0xa4, 0x0a, 0xc1, 0x81, 0x1b, 0xd0, 0x33, 0xe2, 0xc2, 0x9d, 0x73, 0x8f, 0x48,
0xdc, 0x2b, 0x14, 0xf1, 0x87, 0xa0, 0x99, 0xdd, 0xf5, 0xce, 0x36, 0xf6, 0xa6, 0x50, 0x73, 0x9b,
0x7d, 0xf3, 0xe6, 0xbd, 0xef, 0xfd, 0x9a, 0xf9, 0x6c, 0x58, 0x89, 0x29, 0x1f, 0x50, 0xee, 0x90,
0x28, 0xea, 0x06, 0x1e, 0x11, 0x01, 0x0b, 0xf5, 0xb5, 0x1d, 0x71, 0x26, 0x18, 0x9e, 0xd7, 0x44,
0xe6, 0xd1, 0x0e, 0xeb, 0x30, 0x25, 0x77, 0xe4, 0x2a, 0x51, 0x31, 0x17, 0x3b, 0x8c, 0x75, 0xba,
0xd4, 0x21, 0x51, 0xe0, 0x90, 0x30, 0x64, 0x42, 0x29, 0xc7, 0xe9, 0xae, 0xb5, 0xf7, 0x46, 0x6c,
0x07, 0x4c, 0xed, 0x7a, 0x8c, 0x53, 0x67, 0xb0, 0xee, 0x74, 0x68, 0x48, 0x39, 0x11, 0xd4, 0x4f,
0x75, 0xce, 0xe7, 0x3a, 0x3d, 0xe2, 0xed, 0x06, 0x21, 0xe5, 0x43, 0x27, 0xda, 0xeb, 0x48, 0x41,
0xec, 0xf4, 0xa8, 0x20, 0xe3, 0x4e, 0x6d, 0x75, 0x02, 0xb1, 0xdb, 0xbf, 0x6d, 0x7b, 0xac, 0xe7,
0x10, 0xae, 0x80, 0x7d, 0xa2, 0x16, 0x67, 0x3d, 0x3f, 0x3f, 0xad, 0x87, 0x37, 0x58, 0x27, 0xdd,
0x68, 0x97, 0x1c, 0x34, 0xb5, 0x59, 0x66, 0x8a, 0xd3, 0x88, 0xa5, 0xb9, 0x52, 0xcb, 0x40, 0x30,
0x3e, 0xd4, 0x96, 0x89, 0x0d, 0x2b, 0x84, 0x17, 0x2e, 0xe6, 0xbe, 0xde, 0xef, 0x53, 0x3e, 0xc4,
0x18, 0x66, 0x42, 0xd2, 0xa3, 0x06, 0x6a, 0xa2, 0x56, 0xc3, 0x55, 0x6b, 0xbc, 0x04, 0x75, 0x4e,
0xef, 0x70, 0x1a, 0xef, 0x1a, 0x95, 0x26, 0x6a, 0xcd, 0x6d, 0xce, 0x3c, 0x7a, 0xbc, 0xfc, 0x9c,
0x9b, 0x09, 0xf1, 0x2a, 0xd4, 0xa5, 0x7b, 0xea, 0x09, 0xa3, 0xda, 0xac, 0xb6, 0x1a, 0x9b, 0x47,
0xf6, 0x1f, 0x2f, 0xcf, 0xb5, 0x13, 0x51, 0xec, 0x66, 0x9b, 0xd6, 0xd7, 0x08, 0x96, 0x34, 0x87,
0x2e, 0x8d, 0x59, 0x9f, 0x7b, 0xf4, 0xf2, 0x80, 0x86, 0x22, 0x7e, 0xd2, 0x7d, 0x65, 0xe4, 0xbe,
0x05, 0x47, 0x78, 0xaa, 0x7a, 0x5d, 0xee, 0x55, 0xe4, 0x5e, 0x8a, 0xa1, 0xb0, 0x83, 0x57, 0x61,
0x3e, 0xfb, 0xbe, 0xb5, 0x75, 0xc9, 0xa8, 0x6a, 0x8a, 0xfa, 0x86, 0xd5, 0x06, 0x43, 0xc3, 0xf1,
0x1e, 0x09, 0x83, 0x3b, 0x34, 0x16, 0x93, 0x11, 0x34, 0x61, 0x8e, 0xd3, 0x41, 0x10, 0x07, 0x2c,
0x54, 0x19, 0xc8, 0x8c, 0x8e, 0xa4, 0xd6, 0x31, 0x78, 0xa9, 0x18, 0x59, 0xc4, 0xc2, 0x98, 0x5a,
0x0f, 0x51, 0xc1, 0xd3, 0x3b, 0x9c, 0x12, 0x41, 0x5d, 0xfa, 0x69, 0x9f, 0xc6, 0x02, 0x87, 0xa0,
0xb7, 0xaa, 0x72, 0x38, 0xbf, 0xf1, 0xae, 0x9d, 0x17, 0xd6, 0xce, 0x0a, 0xab, 0x16, 0x1f, 0x7b,
0xbe, 0x1d, 0xed, 0x75, 0x6c, 0xd9, 0x23, 0xb6, 0xde, 0xf6, 0x59, 0x8f, 0xd8, 0x9a, 0xa7, 0x2c,
0x6a, 0x4d, 0x0f, 0x2f, 0x40, 0xad, 0x1f, 0xc5, 0x94, 0x8b, 0xa4, 0x8a, 0x6e, 0xfa, 0x65, 0x7d,
0x55, 0x04, 0x79, 0x2b, 0xf2, 0x35, 0x90, 0xbb, 0xff, 0x23, 0xc8, 0x02, 0x3c, 0xeb, 0x6a, 0x01,
0xc5, 0x25, 0xda, 0xa5, 0x39, 0x8a, 0x71, 0x45, 0x31, 0xa0, 0xee, 0x91, 0xd8, 0x23, 0x3e, 0x4d,
0xe3, 0xc9, 0x3e, 0xad, 0x87, 0x55, 0x58, 0xd0, 0x4c, 0xed, 0x0c, 0x43, 0xaf, 0xcc, 0xd0, 0xa1,
0xd5, 0xc5, 0x8b, 0x50, 0xf3, 0xf9, 0xd0, 0xed, 0x87, 0x46, 0x55, 0xeb, 0xff, 0x54, 0x86, 0x4d,
0x98, 0x8d, 0x78, 0x3f, 0xa4, 0xc6, 0x8c, 0xb6, 0x99, 0x88, 0xb0, 0x07, 0x73, 0xb1, 0x90, 0x73,
0xdb, 0x19, 0x1a, 0xb3, 0x4d, 0xd4, 0x9a, 0xdf, 0xb8, 0xf2, 0x0c, 0xb9, 0x93, 0x91, 0xec, 0xa4,
0xe6, 0xdc, 0x91, 0x61, 0xfc, 0x16, 0x34, 0x22, 0xc2, 0x49, 0x8f, 0x0a, 0xca, 0x8d, 0x9a, 0xf2,
0xb2, 0x5c, 0x30, 0xd0, 0xce, 0x76, 0x6f, 0x0c, 0x28, 0xe7, 0x81, 0x4f, 0x63, 0x37, 0x3f, 0x81,
0x05, 0x34, 0xb2, 0xe1, 0x88, 0x8d, 0x7a, 0xb3, 0xda, 0x9a, 0xdf, 0x68, 0x3f, 0x23, 0xc8, 0x1b,
0x91, 0xbc, 0xac, 0xb4, 0x19, 0x4f, 0xb3, 0x92, 0x3b, 0xb2, 0xae, 0x01, 0x3e, 0x08, 0x0b, 0x9f,
0x87, 0x06, 0xcb, 0x3e, 0x0c, 0xa4, 0xb0, 0x2c, 0x8c, 0x0f, 0xc5, 0xcd, 0x15, 0x2d, 0x0a, 0x8d,
0x91, 0x1c, 0x1b, 0x7a, 0x89, 0x53, 0xbf, 0x49, 0xa1, 0x4d, 0x98, 0x1d, 0x90, 0x6e, 0x9f, 0x16,
0xaa, 0x9c, 0x88, 0xb0, 0x05, 0x0d, 0x8f, 0xf5, 0x22, 0x16, 0xd2, 0x50, 0xa8, 0x2a, 0x67, 0xfb,
0xb9, 0xd8, 0xfa, 0x01, 0xc1, 0xe2, 0x81, 0x41, 0xd9, 0x89, 0x68, 0x69, 0x77, 0xf9, 0x30, 0x13,
0x47, 0xd4, 0x53, 0xb7, 0xd6, 0xfc, 0xc6, 0xb5, 0xe9, 0x4c, 0x8e, 0x74, 0x9a, 0x85, 0x26, 0xad,
0xcb, 0xab, 0xd5, 0xd4, 0x27, 0x8b, 0x75, 0xbb, 0xb7, 0x89, 0xb7, 0x57, 0x06, 0xcc, 0x84, 0x4a,
0xe0, 0x2b, 0x58, 0xd5, 0x4d, 0x90, 0xa6, 0xf6, 0x1f, 0x2f, 0x57, 0xb6, 0x2e, 0xb9, 0x95, 0xc0,
0xff, 0xef, 0x0d, 0x6f, 0xfd, 0x8a, 0xa0, 0x39, 0x66, 0x8c, 0x93, 0xaa, 0x97, 0xc1, 0x79, 0xfa,
0x5b, 0x7e, 0x03, 0x80, 0x44, 0xc1, 0x07, 0x94, 0xab, 0x89, 0x4d, 0x2e, 0x79, 0x9c, 0x06, 0x00,
0x17, 0xdb, 0x5b, 0xe9, 0x8e, 0xab, 0x69, 0xc9, 0xa6, 0xd8, 0x0b, 0x42, 0xdf, 0x98, 0xd1, 0x9b,
0x42, 0x4a, 0xac, 0x9f, 0x2b, 0xf0, 0xb2, 0x06, 0xb8, 0xcd, 0xfc, 0x6d, 0xd6, 0x29, 0x79, 0x8d,
0x0c, 0xa8, 0x47, 0xcc, 0xcf, 0x21, 0xba, 0xd9, 0x67, 0xd2, 0x42, 0xa1, 0x20, 0x92, 0x0c, 0x14,
0xde, 0x9e, 0x5c, 0x2c, 0xa3, 0x8c, 0x83, 0xd0, 0xa3, 0x3b, 0xd4, 0x63, 0xa1, 0x1f, 0x2b, 0x3c,
0xd5, 0x2c, 0x4a, 0x7d, 0x07, 0x5f, 0x85, 0x86, 0xfa, 0xbe, 0x19, 0xf4, 0x68, 0x7a, 0x75, 0xac,
0xd9, 0x09, 0xeb, 0xb0, 0x75, 0xd6, 0x91, 0x37, 0x8d, 0x64, 0x1d, 0xf6, 0x60, 0xdd, 0x96, 0x27,
0xdc, 0xfc, 0xb0, 0xc4, 0x25, 0x48, 0xd0, 0xdd, 0x0e, 0x42, 0x1a, 0x1b, 0x35, 0xcd, 0x61, 0x2e,
0x96, 0x05, 0xbf, 0xc3, 0xba, 0x5d, 0xf6, 0x99, 0x51, 0x6f, 0x56, 0xf2, 0x82, 0x27, 0x32, 0xeb,
0x73, 0x98, 0xdb, 0x66, 0x9d, 0xcb, 0xa1, 0xe0, 0x43, 0x49, 0x06, 0x64, 0x38, 0x72, 0x4c, 0xf4,
0x09, 0xcb, 0x84, 0xf8, 0x3a, 0x34, 0x44, 0xd0, 0xa3, 0x3b, 0x82, 0xf4, 0xa2, 0xb4, 0xe9, 0xff,
0x05, 0xee, 0x11, 0xb2, 0xcc, 0x84, 0xe5, 0xc0, 0x2b, 0xa3, 0xdb, 0xe4, 0x26, 0xe5, 0xbd, 0x20,
0x24, 0xa5, 0xef, 0x82, 0xb5, 0x08, 0xe6, 0xb8, 0x03, 0xc9, 0x8b, 0xbc, 0xf1, 0xe5, 0x8b, 0x80,
0xf5, 0x41, 0xa2, 0x7c, 0x10, 0x78, 0x14, 0x3f, 0x40, 0x30, 0xb3, 0x1d, 0xc4, 0x02, 0x1f, 0x2f,
0xcc, 0xde, 0x93, 0xf4, 0xc8, 0x9c, 0xd2, 0xfc, 0x4a, 0x57, 0xd6, 0xe2, 0xfd, 0x3f, 0xff, 0xfe,
0xae, 0xb2, 0x80, 0x8f, 0x2a, 0xa6, 0x39, 0x58, 0xd7, 0x89, 0x5f, 0x8c, 0xbf, 0x41, 0x80, 0xa5,
0x5a, 0x91, 0x25, 0xe1, 0x33, 0x93, 0xf0, 0x8d, 0x61, 0x53, 0xe6, 0x71, 0x2d, 0xf1, 0xb6, 0xa4,
0xb2, 0x32, 0xcd, 0x4a, 0x41, 0x01, 0x58, 0x53, 0x00, 0x56, 0xb0, 0x35, 0x0e, 0x80, 0x73, 0x57,
0x66, 0xf3, 0x9e, 0x43, 0x13, 0xbf, 0x3f, 0x22, 0x98, 0xfd, 0x90, 0x08, 0x6f, 0xf7, 0xb0, 0x0c,
0xb5, 0xa7, 0x93, 0x21, 0xe5, 0x4b, 0x41, 0xb5, 0x4e, 0x28, 0x98, 0xc7, 0xf1, 0xab, 0x19, 0xcc,
0x58, 0x70, 0x4a, 0x7a, 0x05, 0xb4, 0xe7, 0x10, 0x7e, 0x88, 0xa0, 0x96, 0x10, 0x2c, 0x7c, 0x72,
0x12, 0xc4, 0x02, 0x01, 0x33, 0xa7, 0x44, 0x63, 0xac, 0xd3, 0x0a, 0xe0, 0x09, 0x6b, 0x6c, 0x21,
0x2f, 0x14, 0x38, 0xd8, 0xb7, 0x08, 0xaa, 0x57, 0xe8, 0xa1, 0x6d, 0x36, 0x2d, 0x64, 0x07, 0x52,
0x37, 0xa6, 0xc2, 0xf8, 0x3e, 0x82, 0x23, 0x57, 0xa8, 0xc8, 0x68, 0x70, 0x3c, 0x39, 0x7d, 0x05,
0xa6, 0x6c, 0x2e, 0xda, 0xda, 0x2f, 0x8a, 0x6c, 0x6b, 0x44, 0x7d, 0xcf, 0x2a, 0xd7, 0xa7, 0xf0,
0xc9, 0xb2, 0xe6, 0xea, 0x8d, 0x7c, 0xfe, 0x8e, 0xa0, 0x96, 0x3c, 0xa8, 0x93, 0xdd, 0x17, 0x98,
0xe9, 0xd4, 0x72, 0x74, 0x59, 0x01, 0x7d, 0xdb, 0x3c, 0x37, 0x1e, 0xa8, 0x7e, 0x5e, 0xde, 0x54,
0x3e, 0x11, 0xc4, 0x56, 0xe8, 0x8b, 0x95, 0xfd, 0x0d, 0x01, 0xe4, 0x8c, 0x00, 0x9f, 0x2e, 0x0f,
0x42, 0x63, 0x0d, 0xe6, 0x14, 0x39, 0x81, 0x65, 0xab, 0x60, 0x5a, 0x66, 0xb3, 0x2c, 0xeb, 0x92,
0x31, 0x5c, 0x50, 0xbc, 0x01, 0x0f, 0xa0, 0x96, 0x3c, 0xd1, 0x93, 0xb3, 0x5e, 0x60, 0xe2, 0x66,
0xb3, 0xe4, 0xfe, 0x49, 0x0a, 0x9f, 0xf6, 0xdc, 0x5a, 0x69, 0xcf, 0xfd, 0x84, 0x60, 0x46, 0x12,
0x45, 0x7c, 0x62, 0x92, 0x3d, 0x8d, 0xb5, 0x4f, 0xad, 0xd4, 0x67, 0x14, 0xb4, 0x93, 0x56, 0x79,
0x76, 0x86, 0xa1, 0x77, 0x01, 0xad, 0xe1, 0x5f, 0x10, 0xcc, 0x65, 0x3c, 0x0a, 0x9f, 0x9a, 0x18,
0x76, 0x91, 0x69, 0x4d, 0x0d, 0xaa, 0xa3, 0xa0, 0x9e, 0xb6, 0x56, 0xca, 0xa0, 0xf2, 0xd4, 0xb9,
0x84, 0xfb, 0x3d, 0x02, 0x3c, 0x7a, 0xee, 0x46, 0x0f, 0x20, 0x5e, 0x2d, 0xb8, 0x9a, 0xf8, 0x92,
0x9a, 0xa7, 0x0e, 0xd5, 0x2b, 0xce, 0xf5, 0x5a, 0xe9, 0x5c, 0xb3, 0x91, 0xff, 0x07, 0x08, 0x9e,
0x2f, 0x92, 0x40, 0x7c, 0xf6, 0xb0, 0x4e, 0x2b, 0x90, 0xc5, 0xa7, 0xe8, 0xb8, 0xd7, 0x14, 0xa4,
0xd5, 0xb5, 0xf2, 0x5c, 0x65, 0xee, 0xbf, 0x40, 0x50, 0x4f, 0x59, 0x1e, 0x5e, 0x99, 0x64, 0x5b,
0xa7, 0x81, 0xe6, 0xb1, 0x82, 0x56, 0xc6, 0x84, 0xac, 0xd7, 0x95, 0xdb, 0x75, 0xec, 0x94, 0xb9,
0x8d, 0x98, 0x1f, 0x3b, 0x77, 0x53, 0x8a, 0x78, 0xcf, 0xe9, 0xb2, 0x4e, 0x7c, 0x0e, 0x6d, 0xbe,
0xf9, 0x68, 0x7f, 0x09, 0xfd, 0xb1, 0xbf, 0x84, 0xfe, 0xda, 0x5f, 0x42, 0x1f, 0xd9, 0x65, 0xff,
0xe5, 0x1c, 0xfc, 0xcf, 0xeb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x08, 0x8a, 0xbf, 0x08,
0x13, 0x00, 0x00,
}

View File

@@ -58,6 +58,7 @@ message ApplicationSyncRequest {
optional bool prune = 4 [(gogoproto.nullable) = false];
optional github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.SyncStrategy strategy = 5;
optional ParameterOverrides parameter = 6;
repeated github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.SyncOperationResource resources = 7 [(gogoproto.nullable) = false];
}
// ParameterOverrides is a wrapper on a list of parameters. If omitted, the application's overrides

View File

@@ -53,7 +53,7 @@ func NewMetricsServer(port int, appLister applister.ApplicationLister) *http.Ser
appRegistry := NewAppRegistry(appLister)
mux.Handle(MetricsPath, promhttp.HandlerFor(appRegistry, promhttp.HandlerOpts{}))
return &http.Server{
Addr: fmt.Sprintf("localhost:%d", port),
Addr: fmt.Sprintf("0.0.0.0:%d", port),
Handler: mux,
}
}
@@ -88,7 +88,7 @@ func (c *appCollector) Describe(ch chan<- *prometheus.Desc) {
func (c *appCollector) Collect(ch chan<- prometheus.Metric) {
apps, err := c.store.List(labels.NewSelector())
if err != nil {
log.Warn("Failed to collect applications: %v", err)
log.Warnf("Failed to collect applications: %v", err)
return
}
for _, app := range apps {

View File

@@ -313,7 +313,6 @@ func validateProject(p *v1alpha1.AppProject) error {
} else {
return status.Errorf(codes.AlreadyExists, "can't have duplicate roles: role '%s' already exists", role.Name)
}
}
return nil
@@ -367,6 +366,13 @@ func (s *Server) Update(ctx context.Context, q *ProjectUpdateRequest) (*v1alpha1
return nil, status.Errorf(
codes.InvalidArgument, "following source repos are used by one or more application and cannot be removed: %s", strings.Join(removedSrcUsed, ";"))
}
for i, role := range q.Project.Spec.Roles {
var normalizedPolicies []string
for _, policy := range role.Policies {
normalizedPolicies = append(normalizedPolicies, normalizePolicy(policy))
}
q.Project.Spec.Roles[i].Policies = normalizedPolicies
}
res, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Update(q.Project)
if err == nil {
@@ -375,6 +381,19 @@ func (s *Server) Update(ctx context.Context, q *ProjectUpdateRequest) (*v1alpha1
return res, err
}
func normalizePolicy(policy string) string {
policyComponents := strings.Split(policy, ",")
normalizedPolicy := ""
for _, component := range policyComponents {
if normalizedPolicy == "" {
normalizedPolicy = component
} else {
normalizedPolicy = fmt.Sprintf("%s, %s", normalizedPolicy, strings.Trim(component, " "))
}
}
return normalizedPolicy
}
// Delete deletes a project
func (s *Server) Delete(ctx context.Context, q *ProjectQuery) (*EmptyResponse, error) {
if q.Name == common.DefaultAppProjectName {

View File

@@ -3,6 +3,7 @@ package project
import (
"context"
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -326,4 +327,25 @@ func TestProjectServer(t *testing.T) {
expectedErr := fmt.Sprintf("rpc error: code = InvalidArgument desc = incorrect policy format for '%s' as effect can only have value 'allow' or 'deny'", invalidPolicy)
assert.EqualError(t, err, expectedErr)
})
t.Run("TestNormalizeProjectRolePolicies", func(t *testing.T) {
action := "create"
object := "testApplication"
roleName := "testRole"
effect := "allow"
projWithRole := existingProj.DeepCopy()
role := v1alpha1.ProjectRole{Name: roleName, JWTTokens: []v1alpha1.JWTToken{{IssuedAt: 1}}}
noSpacesPolicyTemplate := strings.Replace(policyTemplate, " ", "", -1)
invalidPolicy := fmt.Sprintf(noSpacesPolicyTemplate, projWithRole.Name, roleName, action, projWithRole.Name, object, effect)
role.Policies = append(role.Policies, invalidPolicy)
projWithRole.Spec.Roles = append(projWithRole.Spec.Roles, role)
projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projWithRole), enforcer, util.NewKeyLock(), nil)
request := &ProjectUpdateRequest{Project: projWithRole}
updateProj, err := projectServer.Update(context.Background(), request)
assert.Nil(t, err)
expectedPolicy := fmt.Sprintf(policyTemplate, projWithRole.Name, roleName, action, projWithRole.Name, object, effect)
assert.Equal(t, expectedPolicy, updateProj.Spec.Roles[0].Policies[0])
})
}

View File

@@ -62,7 +62,13 @@ func (s *Server) ListApps(ctx context.Context, q *RepoAppsQuery) (*RepoAppsRespo
}
repo, err := s.db.GetRepository(ctx, q.Repo)
if err != nil {
return nil, err
if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.NotFound {
repo = &appsv1.Repository{
Repo: q.Repo,
}
} else {
return nil, err
}
}
// Test the repo
@@ -115,7 +121,13 @@ func (s *Server) GetAppDetails(ctx context.Context, q *RepoAppDetailsQuery) (*Re
}
repo, err := s.db.GetRepository(ctx, q.Repo)
if err != nil {
return nil, err
if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.NotFound {
repo = &appsv1.Repository{
Repo: q.Repo,
}
} else {
return nil, err
}
}
// Test the repo

View File

@@ -133,8 +133,9 @@ func initializeDefaultProject(opts ArgoCDServerOpts) error {
defaultProj := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: common.DefaultAppProjectName, Namespace: opts.Namespace},
Spec: v1alpha1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "*"}},
SourceRepos: []string{"*"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "*"}},
ClusterResourceWhitelist: []metav1.GroupKind{{Group: "*", Kind: "*"}},
},
}
@@ -242,6 +243,7 @@ func (a *ArgoCDServer) Run(ctx context.Context, port int) {
tlsConfig := tls.Config{
Certificates: []tls.Certificate{*a.settings.Certificate},
}
a.TLSConfigCustomizer(&tlsConfig)
tlsl = tls.NewListener(tlsl, &tlsConfig)
// Now, we build another mux recursively to match HTTPS and gRPC.
@@ -363,10 +365,19 @@ func (a *ArgoCDServer) useTLS() bool {
}
func (a *ArgoCDServer) newGRPCServer() *grpc.Server {
var sOpts []grpc.ServerOption
sOpts := []grpc.ServerOption{
// Set the both send and receive the bytes limit to be 100MB
// The proper way to achieve high performance is to have pagination
// while we work toward that, we can have high limit first
grpc.MaxRecvMsgSize(apiclient.MaxGRPCMessageSize),
grpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize),
grpc.ConnectionTimeout(300 * time.Second),
}
sensitiveMethods := map[string]bool{
"/session.SessionService/Create": true,
"/account.AccountService/UpdatePassword": true,
"/repository.RepositoryService/Create": true,
"/repository.RepositoryService/Update": true,
}
// NOTE: notice we do not configure the gRPC server here with TLS (e.g. grpc.Creds(creds))
// This is because TLS handshaking occurs in cmux handling
@@ -435,7 +446,7 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int) *http.Server
Addr: endpoint,
Handler: &bug21955Workaround{handler: mux},
}
var dOpts []grpc.DialOption
dOpts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(apiclient.MaxGRPCMessageSize))}
if a.useTLS() {
// The following sets up the dial Options for grpc-gateway to talk to gRPC server over TLS.
// grpc-gateway is just translating HTTP/HTTPS requests as gRPC requests over localhost,

View File

@@ -80,6 +80,7 @@ func TestEnforceProjectToken(t *testing.T) {
s := NewServer(ArgoCDServerOpts{Namespace: fakeNamespace, KubeClientset: kubeclientset, AppClientset: apps.NewSimpleClientset(&existingProj)})
s.newGRPCServer()
claims := jwt.MapClaims{"sub": defaultSub, "iat": defaultIssuedAt}
assert.True(t, s.enf.EnforceClaims(claims, "projects", "get", existingProj.ObjectMeta.Name))
assert.True(t, s.enf.EnforceClaims(claims, "applications", "get", defaultTestObject))
})

View File

@@ -1198,87 +1198,6 @@
"accountUpdatePasswordResponse": {
"type": "object"
},
"apismetav1ObjectMeta": {
"description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create.",
"type": "object",
"properties": {
"annotations": {
"type": "object",
"title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional",
"additionalProperties": {
"type": "string"
}
},
"clusterName": {
"type": "string",
"title": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional"
},
"creationTimestamp": {
"$ref": "#/definitions/v1Time"
},
"deletionGracePeriodSeconds": {
"type": "string",
"format": "int64",
"title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional"
},
"deletionTimestamp": {
"$ref": "#/definitions/v1Time"
},
"finalizers": {
"type": "array",
"title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\n+optional\n+patchStrategy=merge",
"items": {
"type": "string"
}
},
"generateName": {
"description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency\n+optional",
"type": "string"
},
"generation": {
"type": "string",
"format": "int64",
"title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional"
},
"initializers": {
"$ref": "#/definitions/v1Initializers"
},
"labels": {
"type": "object",
"title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional",
"additionalProperties": {
"type": "string"
}
},
"name": {
"type": "string",
"title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional"
},
"namespace": {
"description": "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional",
"type": "string"
},
"ownerReferences": {
"type": "array",
"title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge",
"items": {
"$ref": "#/definitions/v1OwnerReference"
}
},
"resourceVersion": {
"description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency\n+optional",
"type": "string"
},
"selfLink": {
"type": "string",
"title": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n+optional"
},
"uid": {
"description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional",
"type": "string"
}
}
},
"applicationApplicationResponse": {
"type": "object"
},
@@ -1320,6 +1239,12 @@
"type": "boolean",
"format": "boolean"
},
"resources": {
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1SyncOperationResource"
}
},
"revision": {
"type": "string"
},
@@ -1358,6 +1283,7 @@
},
"applicationParameterOverrides": {
"type": "object",
"title": "ParameterOverrides is a wrapper on a list of parameters. If omitted, the application's overrides\nin the spec will be used. If set, will use the supplied list of overrides",
"properties": {
"overrides": {
"type": "array",
@@ -1686,7 +1612,7 @@
"title": "A human-readable description of the status of this operation.\nTODO: decide on maximum length.\n+optional"
},
"metadata": {
"$ref": "#/definitions/apismetav1ObjectMeta"
"$ref": "#/definitions/v1ObjectMeta"
},
"reason": {
"type": "string",
@@ -1807,7 +1733,7 @@
"type": "object",
"properties": {
"continue": {
"description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nlist may not be possible if the server configuration has changed or more than a few minutes have\npassed. The resourceVersion field returned when using this continue value will be identical to\nthe value in the first response.",
"description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage.",
"type": "string"
},
"resourceVersion": {
@@ -1836,6 +1762,87 @@
}
}
},
"v1ObjectMeta": {
"description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create.",
"type": "object",
"properties": {
"annotations": {
"type": "object",
"title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional",
"additionalProperties": {
"type": "string"
}
},
"clusterName": {
"type": "string",
"title": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional"
},
"creationTimestamp": {
"$ref": "#/definitions/v1Time"
},
"deletionGracePeriodSeconds": {
"type": "string",
"format": "int64",
"title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional"
},
"deletionTimestamp": {
"$ref": "#/definitions/v1Time"
},
"finalizers": {
"type": "array",
"title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\n+optional\n+patchStrategy=merge",
"items": {
"type": "string"
}
},
"generateName": {
"description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency\n+optional",
"type": "string"
},
"generation": {
"type": "string",
"format": "int64",
"title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional"
},
"initializers": {
"$ref": "#/definitions/v1Initializers"
},
"labels": {
"type": "object",
"title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional",
"additionalProperties": {
"type": "string"
}
},
"name": {
"type": "string",
"title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional"
},
"namespace": {
"description": "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional",
"type": "string"
},
"ownerReferences": {
"type": "array",
"title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge",
"items": {
"$ref": "#/definitions/v1OwnerReference"
}
},
"resourceVersion": {
"description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency\n+optional",
"type": "string"
},
"selfLink": {
"type": "string",
"title": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n+optional"
},
"uid": {
"description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional",
"type": "string"
}
}
},
"v1ObjectReference": {
"type": "object",
"title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
@@ -2018,7 +2025,7 @@
"title": "AppProject is a definition of AppProject resource.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"properties": {
"metadata": {
"$ref": "#/definitions/apismetav1ObjectMeta"
"$ref": "#/definitions/v1ObjectMeta"
},
"spec": {
"$ref": "#/definitions/v1alpha1AppProjectSpec"
@@ -2089,7 +2096,7 @@
"title": "Application is a definition of Application resource.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"properties": {
"metadata": {
"$ref": "#/definitions/apismetav1ObjectMeta"
"$ref": "#/definitions/v1ObjectMeta"
},
"operation": {
"$ref": "#/definitions/v1alpha1Operation"
@@ -2374,12 +2381,6 @@
"type": "string",
"format": "int64"
},
"params": {
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1ComponentParameter"
}
},
"revision": {
"type": "string"
}
@@ -2444,9 +2445,6 @@
"description": "Operation contains requested operation parameters.",
"type": "object",
"properties": {
"rollback": {
"$ref": "#/definitions/v1alpha1RollbackOperation"
},
"sync": {
"$ref": "#/definitions/v1alpha1SyncOperation"
}
@@ -2470,9 +2468,6 @@
"type": "string",
"title": "Phase is the current phase of the operation"
},
"rollbackResult": {
"$ref": "#/definitions/v1alpha1SyncOperationResult"
},
"startedAt": {
"$ref": "#/definitions/v1Time"
},
@@ -2601,23 +2596,6 @@
}
}
},
"v1alpha1RollbackOperation": {
"type": "object",
"properties": {
"dryRun": {
"type": "boolean",
"format": "boolean"
},
"id": {
"type": "string",
"format": "int64"
},
"prune": {
"type": "boolean",
"format": "boolean"
}
}
},
"v1alpha1SyncOperation": {
"description": "SyncOperation contains sync operation details.",
"type": "object",
@@ -2635,6 +2613,13 @@
"format": "boolean",
"title": "Prune deletes resources that are no longer tracked in git"
},
"resources": {
"type": "array",
"title": "Resources describes which resources to sync",
"items": {
"$ref": "#/definitions/v1alpha1SyncOperationResource"
}
},
"revision": {
"description": "Revision is the git revision in which to sync the application to.\nIf omitted, will use the revision specified in app spec.",
"type": "string"
@@ -2644,6 +2629,21 @@
}
}
},
"v1alpha1SyncOperationResource": {
"description": "SyncOperationResource contains resources to sync.",
"type": "object",
"properties": {
"group": {
"type": "string"
},
"kind": {
"type": "string"
},
"name": {
"type": "string"
}
}
},
"v1alpha1SyncOperationResult": {
"type": "object",
"title": "SyncOperationResult represent result of sync operation",

View File

@@ -157,8 +157,8 @@ func TestAppManagement(t *testing.T) {
return err == nil && app.Status.ComparisonResult.Status == v1alpha1.ComparisonStatusSynced, err
})
assert.Equal(t, v1alpha1.ComparisonStatusSynced, app.Status.ComparisonResult.Status)
assert.True(t, app.Status.OperationState.RollbackResult != nil)
assert.Equal(t, 2, len(app.Status.OperationState.RollbackResult.Resources))
assert.True(t, app.Status.OperationState.SyncResult != nil)
assert.Equal(t, 2, len(app.Status.OperationState.SyncResult.Resources))
assert.True(t, app.Status.OperationState.Phase == v1alpha1.OperationSucceeded)
assert.Equal(t, 3, len(app.Status.History))
})

View File

@@ -9,13 +9,12 @@ import (
"strings"
"time"
"github.com/ghodss/yaml"
"github.com/ksonnet/ksonnet/pkg/app"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
@@ -29,6 +28,7 @@ import (
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/ksonnet"
)
const (
@@ -122,6 +122,7 @@ func RefreshApp(appIf v1alpha1.ApplicationInterface, name string) (*argoappv1.Ap
}
// WaitForRefresh watches an application until its comparison timestamp is after the refresh timestamp
// If refresh timestamp is not present, will use current timestamp at time of call
func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *time.Duration) (*argoappv1.Application, error) {
ctx := context.Background()
var cancel context.CancelFunc
@@ -136,6 +137,7 @@ func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *t
return nil, err
}
defer watchIf.Stop()
now := time.Now().UTC()
for {
select {
@@ -161,6 +163,9 @@ func WaitForRefresh(appIf v1alpha1.ApplicationInterface, name string, timeout *t
return nil, fmt.Errorf("Application event object failed conversion: %v", next)
}
refreshTimestampStr := app.ObjectMeta.Annotations[common.AnnotationKeyRefresh]
if refreshTimestampStr == "" {
refreshTimestampStr = now.String()
}
refreshTimestamp, err := time.Parse(time.RFC3339, refreshTimestampStr)
if err != nil {
return nil, fmt.Errorf("Unable to parse '%s': %v", common.AnnotationKeyRefresh, err)
@@ -223,9 +228,13 @@ func GetSpecErrors(
} else {
switch appSourceType {
case repository.AppSourceKsonnet:
appYamlConditions := verifyAppYAML(ctx, repoRes, spec, repoClient)
if len(appYamlConditions) > 0 {
conditions = append(conditions, appYamlConditions...)
err := verifyAppYAML(ctx, repoRes, spec, repoClient)
if err != nil {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: err.Error(),
})
}
case repository.AppSourceHelm:
helmConditions := verifyHelmChart(ctx, repoRes, spec, repoClient)
@@ -321,7 +330,12 @@ func queryAppSourceType(ctx context.Context, spec *argoappv1.ApplicationSpec, re
}
// verifyAppYAML verifies that a ksonnet app.yaml is functional
func verifyAppYAML(ctx context.Context, repoRes *argoappv1.Repository, spec *argoappv1.ApplicationSpec, repoClient repository.RepositoryServiceClient) []argoappv1.ApplicationCondition {
func verifyAppYAML(ctx context.Context, repoRes *argoappv1.Repository, spec *argoappv1.ApplicationSpec, repoClient repository.RepositoryServiceClient) error {
// Default revision to HEAD if unspecified
if spec.Source.TargetRevision == "" {
spec.Source.TargetRevision = "HEAD"
}
req := repository.GetFileRequest{
Repo: &argoappv1.Repository{
Repo: spec.Source.RepoURL,
@@ -335,47 +349,25 @@ func verifyAppYAML(ctx context.Context, repoRes *argoappv1.Repository, spec *arg
req.Repo.SSHPrivateKey = repoRes.SSHPrivateKey
}
getRes, err := repoClient.GetFile(ctx, &req)
var conditions []argoappv1.ApplicationCondition
if err != nil {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: fmt.Sprintf("Unable to load app.yaml: %v", err),
})
} else {
var appSpec app.Spec
err = yaml.Unmarshal(getRes.Data, &appSpec)
if err != nil {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: "app.yaml is not a valid ksonnet app spec",
})
} else {
// Default revision to HEAD if unspecified
if spec.Source.TargetRevision == "" {
spec.Source.TargetRevision = "HEAD"
}
// Verify the specified environment is defined in it
envSpec, ok := appSpec.Environments[spec.Source.Environment]
if !ok || envSpec == nil {
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,
Message: fmt.Sprintf("environment '%s' does not exist in ksonnet app", spec.Source.Environment),
})
}
if envSpec != nil {
// If server and namespace are not supplied, pull it from the app.yaml
if spec.Destination.Server == "" {
spec.Destination.Server = envSpec.Destination.Server
}
if spec.Destination.Namespace == "" {
spec.Destination.Namespace = envSpec.Destination.Namespace
}
}
}
return fmt.Errorf("Unable to load app.yaml: %v", err)
}
return conditions
// Verify the specified environment is defined in the app spec
dest, err := ksonnet.Destination(getRes.Data, spec.Source.Environment)
if err != nil {
return err
}
// If server and namespace are not supplied, pull it from the app.yaml
if spec.Destination.Server == "" {
spec.Destination.Server = dest.Server
}
if spec.Destination.Namespace == "" {
spec.Destination.Namespace = dest.Namespace
}
return nil
}
// verifyHelmChart verifies a helm chart is functional
@@ -459,7 +451,7 @@ func SetAppOperation(ctx context.Context, appIf v1alpha1.ApplicationInterface, a
a.Operation = op
a.Status.OperationState = nil
a, err = appIf.Update(a)
if op.Sync == nil && op.Rollback == nil {
if op.Sync == nil {
return nil, status.Errorf(codes.InvalidArgument, "Operation unspecified")
}
if err == nil {
@@ -471,3 +463,18 @@ func SetAppOperation(ctx context.Context, appIf v1alpha1.ApplicationInterface, a
log.Warnf("Failed to set operation for app '%s' due to update conflict. Retrying again...", appName)
}
}
// ContainsSyncResource determines if the given resource exists in the provided slice of sync operation resources.
// ContainsSyncResource returns false if either argument is nil.
func ContainsSyncResource(u *unstructured.Unstructured, rr []argoappv1.SyncOperationResource) bool {
if u == nil || rr == nil {
return false
}
for _, r := range rr {
if r.HasIdentity(u) {
return true
}
}
return false
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/watch"
"github.com/argoproj/argo-cd/common"
@@ -94,3 +95,28 @@ func TestWaitForRefresh(t *testing.T) {
assert.Nil(t, err)
assert.NotNil(t, app)
}
func TestContainsSyncResource(t *testing.T) {
var (
blankUnstructured unstructured.Unstructured
blankResource argoappv1.SyncOperationResource
helloResource argoappv1.SyncOperationResource = argoappv1.SyncOperationResource{Name: "hello"}
)
tables := []struct {
u *unstructured.Unstructured
rr []argoappv1.SyncOperationResource
expected bool
}{
{nil, nil, false},
{nil, []argoappv1.SyncOperationResource{}, false},
{&blankUnstructured, []argoappv1.SyncOperationResource{}, false},
{&blankUnstructured, []argoappv1.SyncOperationResource{blankResource}, true},
{&blankUnstructured, []argoappv1.SyncOperationResource{helloResource}, false},
}
for _, table := range tables {
if out := ContainsSyncResource(table.u, table.rr); out != table.expected {
t.Errorf("Expected %t for slice %+v conains resource %+v; instead got %t", table.expected, table.rr, table.u, out)
}
}
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/coreos/dex/api"
oidc "github.com/coreos/go-oidc"
jwt "github.com/dgrijalva/jwt-go"
log "github.com/sirupsen/logrus"
"golang.org/x/oauth2"
"google.golang.org/grpc"
@@ -325,15 +324,9 @@ func (a *ClientApp) HandleCallback(w http.ResponseWriter, r *http.Request) {
return
}
idToken, err := a.verify(rawIDToken)
claims, err := a.sessionMgr.VerifyToken(rawIDToken)
if err != nil {
http.Error(w, fmt.Sprintf("Failed to verify ID token: %v", err), http.StatusInternalServerError)
return
}
var claims jwt.MapClaims
err = idToken.Claims(&claims)
if err != nil {
http.Error(w, fmt.Sprintf("Failed to unmarshal claims: %v", err), http.StatusInternalServerError)
http.Error(w, fmt.Sprintf("invalid session token: %v", err), http.StatusInternalServerError)
return
}
flags := []string{"path=/"}

View File

@@ -2,11 +2,8 @@ package git
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path"
"strings"
log "github.com/sirupsen/logrus"
@@ -29,7 +26,6 @@ type Client interface {
LsRemote(revision string) (string, error)
LsFiles(path string) ([]string, error)
CommitSHA() (string, error)
Reset() error
}
// ClientFactory is a factory of Git Clients
@@ -40,12 +36,9 @@ type ClientFactory interface {
// nativeGitClient implements Client interface using git CLI
type nativeGitClient struct {
repoURL string
root string
username string
password string
sshPrivateKey string
auth transport.AuthMethod
repoURL string
root string
auth transport.AuthMethod
}
type factory struct{}
@@ -56,11 +49,8 @@ func NewFactory() ClientFactory {
func (f *factory) NewClient(repoURL, path, username, password, sshPrivateKey string) (Client, error) {
clnt := nativeGitClient{
repoURL: repoURL,
root: path,
username: username,
password: password,
sshPrivateKey: sshPrivateKey,
repoURL: repoURL,
root: path,
}
if sshPrivateKey != "" {
signer, err := ssh.ParsePrivateKey([]byte(sshPrivateKey))
@@ -83,91 +73,61 @@ func (m *nativeGitClient) Root() string {
// Init initializes a local git repository and sets the remote origin
func (m *nativeGitClient) Init() error {
var needInit bool
if _, err := os.Stat(m.root); os.IsNotExist(err) {
needInit = true
} else {
_, err = m.runCmd("git", "status")
needInit = err != nil
_, err := git.PlainOpen(m.root)
if err == nil {
return nil
}
if needInit {
log.Infof("Initializing %s to %s", m.repoURL, m.root)
_, err := exec.Command("rm", "-rf", m.root).Output()
if err != nil {
return fmt.Errorf("unable to clean repo at %s: %v", m.root, err)
}
err = os.MkdirAll(m.root, 0755)
if err != nil {
return err
}
if _, err := m.runCmd("git", "init"); err != nil {
return err
}
if _, err := m.runCmd("git", "remote", "add", "origin", m.repoURL); err != nil {
return err
}
if err != git.ErrRepositoryNotExists {
return err
}
// always set credentials since it can change
err := m.setCredentials()
log.Infof("Initializing %s to %s", m.repoURL, m.root)
_, err = exec.Command("rm", "-rf", m.root).Output()
if err != nil {
return fmt.Errorf("unable to clean repo at %s: %v", m.root, err)
}
err = os.MkdirAll(m.root, 0755)
if err != nil {
return err
}
return nil
}
// setCredentials sets a local credentials file to connect to a remote git repository
func (m *nativeGitClient) setCredentials() error {
if m.password != "" {
log.Debug("Setting password credentials")
gitCredentialsFile := path.Join(m.root, ".git", "credentials")
urlObj, err := url.ParseRequestURI(m.repoURL)
if err != nil {
return err
}
urlObj.User = url.UserPassword(m.username, m.password)
cmdURL := urlObj.String()
err = ioutil.WriteFile(gitCredentialsFile, []byte(cmdURL), 0600)
if err != nil {
return fmt.Errorf("failed to set git credentials: %v", err)
}
_, err = m.runCmd("git", "config", "--local", "credential.helper", fmt.Sprintf("store --file=%s", gitCredentialsFile))
if err != nil {
return err
}
repo, err := git.PlainInit(m.root, false)
if err != nil {
return err
}
if IsSSHURL(m.repoURL) {
sshCmd := gitSSHCommand
if m.sshPrivateKey != "" {
log.Debug("Setting SSH credentials")
sshPrivateKeyFile := path.Join(m.root, ".git", "ssh-private-key")
err := ioutil.WriteFile(sshPrivateKeyFile, []byte(m.sshPrivateKey), 0600)
if err != nil {
return fmt.Errorf("failed to set git credentials: %v", err)
}
sshCmd += " -i " + sshPrivateKeyFile
}
_, err := m.runCmd("git", "config", "--local", "core.sshCommand", sshCmd)
if err != nil {
return err
}
}
return nil
_, err = repo.CreateRemote(&config.RemoteConfig{
Name: git.DefaultRemoteName,
URLs: []string{m.repoURL},
})
return err
}
// Fetch fetches latest updates from origin
func (m *nativeGitClient) Fetch() error {
var err error
log.Debugf("Fetching repo %s at %s", m.repoURL, m.root)
if _, err = m.runCmd("git", "fetch", "origin", "--tags", "--force"); err != nil {
repo, err := git.PlainOpen(m.root)
if err != nil {
return err
}
log.Debug("git fetch origin --tags --force")
err = repo.Fetch(&git.FetchOptions{
RemoteName: git.DefaultRemoteName,
Auth: m.auth,
Tags: git.AllTags,
Force: true,
})
if err == git.NoErrAlreadyUpToDate {
return nil
}
return err
// git fetch does not update the HEAD reference. The following command will update the local
// knowledge of what remote considers the “default branch”
// See: https://stackoverflow.com/questions/8839958/how-does-origin-head-get-set
if _, err := m.runCmd("git", "remote", "set-head", "origin", "-a"); err != nil {
return err
}
return nil
// NOTE(jessesuen): disabling the following code because:
// 1. we no longer perform a `git checkout HEAD`, instead relying on `ls-remote` and checking
// out a specific SHA1.
// 2. This command is the only other command that we use (excluding fetch/ls-remote) which
// requires remote access, and there appears to be no go-git equivalent to this command.
// _, err = m.runCmd("git", "remote", "set-head", "origin", "-a")
// return err
}
// LsFiles lists the local working tree, including only files that are under source control
@@ -181,34 +141,6 @@ func (m *nativeGitClient) LsFiles(path string) ([]string, error) {
return ss[:len(ss)-1], nil
}
// Reset resets local changes in a repository
func (m *nativeGitClient) Reset() error {
if _, err := m.runCmd("git", "reset", "--hard", "origin/HEAD"); err != nil {
return err
}
// Delete all local branches (we must first detach so we are not checked out a branch we are about to delete)
if _, err := m.runCmd("git", "checkout", "--detach", "origin/HEAD"); err != nil {
return err
}
branchesOut, err := m.runCmd("git", "for-each-ref", "--format=%(refname:short)", "refs/heads/")
if err != nil {
return err
}
branchesOut = strings.TrimSpace(branchesOut)
if branchesOut != "" {
branches := strings.Split(branchesOut, "\n")
args := []string{"branch", "-D"}
args = append(args, branches...)
if _, err = m.runCmd("git", args...); err != nil {
return err
}
}
if _, err := m.runCmd("git", "clean", "-fd"); err != nil {
return err
}
return nil
}
// Checkout checkout specified git sha
func (m *nativeGitClient) Checkout(revision string) error {
if revision == "" || revision == "HEAD" {
@@ -217,7 +149,7 @@ func (m *nativeGitClient) Checkout(revision string) error {
if _, err := m.runCmd("git", "checkout", "--force", revision); err != nil {
return err
}
if _, err := m.runCmd("git", "clean", "-fd"); err != nil {
if _, err := m.runCmd("git", "clean", "-fdx"); err != nil {
return err
}
return nil
@@ -243,7 +175,7 @@ func (m *nativeGitClient) LsRemote(revision string) (string, error) {
if err != nil {
return "", err
}
refs, err := remote.List(&git.ListOptions{})
refs, err := remote.List(&git.ListOptions{Auth: m.auth})
if err != nil {
return "", err
}
@@ -310,6 +242,8 @@ func (m *nativeGitClient) runCmd(command string, args ...string) (string, error)
log.Debug(strings.Join(cmd.Args, " "))
cmd.Dir = m.root
env := os.Environ()
env = append(env, "HOME=/dev/null")
env = append(env, "GIT_CONFIG_NOSYSTEM=true")
env = append(env, "GIT_ASKPASS=")
cmd.Env = env
out, err := cmd.Output()

View File

@@ -1,12 +1,7 @@
package git
import (
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"regexp"
"strings"
)
@@ -43,93 +38,45 @@ func IsTruncatedCommitSHA(sha string) bool {
// NormalizeGitURL normalizes a git URL for lookup and storage
func NormalizeGitURL(repo string) string {
// preprocess
repo = strings.TrimSpace(repo)
repo = ensureSuffix(repo, ".git")
if IsSSHURL(repo) {
repo = ensurePrefix(repo, "ssh://")
}
// process
if !isAzureGitURL(repo) {
// NOTE: not all git services support `.git` appended to their URLs (e.g. azure) so this
// normalization is not entirely safe.
repo = ensureSuffix(repo, ".git")
}
repoURL, err := url.Parse(repo)
if err != nil {
return ""
}
// postprocess
repoURL.Host = strings.ToLower(repoURL.Host)
normalized := repoURL.String()
return strings.TrimPrefix(normalized, "ssh://")
}
// isAzureGitURL returns true if supplied URL is from an Azure domain
func isAzureGitURL(repo string) bool {
repoURL, err := url.Parse(repo)
if err != nil {
return false
}
hostname := repoURL.Hostname()
return strings.HasSuffix(hostname, "dev.azure.com") || strings.HasSuffix(hostname, "visualstudio.com")
}
// IsSSHURL returns true if supplied URL is SSH URL
func IsSSHURL(url string) bool {
return strings.HasPrefix(url, "git@") || strings.HasPrefix(url, "ssh://")
}
const gitSSHCommand = "ssh -q -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=20"
//TODO: Make sure every public method works with '*' repo
// GetGitCommandEnvAndURL returns URL and env options for git operation
func GetGitCommandEnvAndURL(repo, username, password string, sshPrivateKey string) (string, []string, error) {
cmdURL := repo
env := os.Environ()
if IsSSHURL(repo) {
sshCmd := gitSSHCommand
if sshPrivateKey != "" {
sshFile, err := ioutil.TempFile("", "")
if err != nil {
return "", nil, err
}
_, err = sshFile.WriteString(sshPrivateKey)
if err != nil {
return "", nil, err
}
err = sshFile.Close()
if err != nil {
return "", nil, err
}
sshCmd += " -i " + sshFile.Name()
}
env = append(env, fmt.Sprintf("GIT_SSH_COMMAND=%s", sshCmd))
} else {
env = append(env, "GIT_ASKPASS=")
repoURL, err := url.ParseRequestURI(repo)
if err != nil {
return "", nil, err
}
repoURL.User = url.UserPassword(username, password)
cmdURL = repoURL.String()
}
return cmdURL, env, nil
}
// TestRepo tests if a repo exists and is accessible with the given credentials
func TestRepo(repo, username, password string, sshPrivateKey string) error {
repo, env, err := GetGitCommandEnvAndURL(repo, username, password, sshPrivateKey)
clnt, err := NewFactory().NewClient(repo, "", username, password, sshPrivateKey)
if err != nil {
return err
}
cmd := exec.Command("git", "ls-remote", repo, "HEAD")
cmd.Env = env
_, err = cmd.Output()
if err != nil {
if exErr, ok := err.(*exec.ExitError); ok {
errOutput := strings.Split(string(exErr.Stderr), "\n")[0]
errOutput = fmt.Sprintf("%s: %s", repo, errOutput)
return errors.New(redactPassword(errOutput, password))
}
return err
}
return nil
}
func redactPassword(msg string, password string) string {
if password != "" {
passwordRegexp := regexp.MustCompile("\\b" + regexp.QuoteMeta(password) + "\\b")
msg = passwordRegexp.ReplaceAllString(msg, "*****")
}
return msg
_, err = clnt.LsRemote("HEAD")
return err
}

View File

@@ -72,20 +72,22 @@ func TestIsSSHURL(t *testing.T) {
func TestNormalizeUrl(t *testing.T) {
data := map[string]string{
"git@GITHUB.com:argoproj/test": "git@github.com:argoproj/test.git",
"git@GITHUB.com:argoproj/test.git": "git@github.com:argoproj/test.git",
"git@GITHUB.com:test": "git@github.com:test.git",
"git@GITHUB.com:test.git": "git@github.com:test.git",
"https://GITHUB.com/argoproj/test": "https://github.com/argoproj/test.git",
"https://GITHUB.com/argoproj/test.git": "https://github.com/argoproj/test.git",
"https://github.com/TEST": "https://github.com/TEST.git",
"https://github.com/TEST.git": "https://github.com/TEST.git",
"ssh://git@GITHUB.com:argoproj/test": "git@github.com:argoproj/test.git",
"ssh://git@GITHUB.com:argoproj/test.git": "git@github.com:argoproj/test.git",
"ssh://git@GITHUB.com:test.git": "git@github.com:test.git",
"ssh://git@github.com:test": "git@github.com:test.git",
" https://github.com/argoproj/test ": "https://github.com/argoproj/test.git",
"\thttps://github.com/argoproj/test\n": "https://github.com/argoproj/test.git",
"git@GITHUB.com:argoproj/test": "git@github.com:argoproj/test.git",
"git@GITHUB.com:argoproj/test.git": "git@github.com:argoproj/test.git",
"git@GITHUB.com:test": "git@github.com:test.git",
"git@GITHUB.com:test.git": "git@github.com:test.git",
"https://GITHUB.com/argoproj/test": "https://github.com/argoproj/test.git",
"https://GITHUB.com/argoproj/test.git": "https://github.com/argoproj/test.git",
"https://github.com/TEST": "https://github.com/TEST.git",
"https://github.com/TEST.git": "https://github.com/TEST.git",
"ssh://git@GITHUB.com:argoproj/test": "git@github.com:argoproj/test.git",
"ssh://git@GITHUB.com:argoproj/test.git": "git@github.com:argoproj/test.git",
"ssh://git@GITHUB.com:test.git": "git@github.com:test.git",
"ssh://git@github.com:test": "git@github.com:test.git",
" https://github.com/argoproj/test ": "https://github.com/argoproj/test.git",
"\thttps://github.com/argoproj/test\n": "https://github.com/argoproj/test.git",
"https://1234.visualstudio.com/myproj/_git/myrepo": "https://1234.visualstudio.com/myproj/_git/myrepo",
"https://dev.azure.com/1234/myproj/_git/myrepo": "https://dev.azure.com/1234/myproj/_git/myrepo",
}
for k, v := range data {
assert.Equal(t, v, NormalizeGitURL(k))

View File

@@ -9,40 +9,47 @@ import (
extv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/apps"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
"k8s.io/kubernetes/pkg/apis/apps"
)
// GetAppHealth returns the health of a k8s resource
func GetAppHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
var err error
var health *appv1.HealthStatus
switch obj.GetKind() {
case kube.DeploymentKind:
health, err = getDeploymentHealth(kubectl, obj)
case kube.ServiceKind:
health, err = getServiceHealth(kubectl, obj)
case kube.IngressKind:
health, err = getIngressHealth(kubectl, obj)
case kube.StatefulSetKind:
health, err = getStatefulSetHealth(kubectl, obj)
case kube.ReplicaSetKind:
health, err = getReplicaSetHealth(kubectl, obj)
case kube.DaemonSetKind:
health, err = getDaemonSetHealth(kubectl, obj)
case kube.PersistentVolumeClaimKind:
health, err = getPvcHealth(kubectl, obj)
default:
health = &appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
gvk := obj.GroupVersionKind()
switch gvk.Group {
case "apps", "extensions":
switch gvk.Kind {
case kube.DeploymentKind:
health, err = getDeploymentHealth(kubectl, obj)
case kube.IngressKind:
health, err = getIngressHealth(kubectl, obj)
case kube.StatefulSetKind:
health, err = getStatefulSetHealth(kubectl, obj)
case kube.ReplicaSetKind:
health, err = getReplicaSetHealth(kubectl, obj)
case kube.DaemonSetKind:
health, err = getDaemonSetHealth(kubectl, obj)
}
case "":
switch gvk.Kind {
case kube.ServiceKind:
health, err = getServiceHealth(kubectl, obj)
case kube.PersistentVolumeClaimKind:
health, err = getPVCHealth(kubectl, obj)
}
}
if err != nil {
health.Status = appv1.HealthStatusUnknown
health.StatusDetails = err.Error()
health = &appv1.HealthStatus{
Status: appv1.HealthStatusUnknown,
StatusDetails: err.Error(),
}
} else if health == nil {
health = &appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
}
return health, err
}
@@ -71,7 +78,7 @@ func IsWorse(current, new appv1.HealthStatusCode) bool {
return newIndex > currentIndex
}
func getPvcHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
func getPVCHealth(kubectl kube.Kubectl, obj *unstructured.Unstructured) (*appv1.HealthStatus, error) {
obj, err := kubectl.ConvertToVersion(obj, "", "v1")
if err != nil {
return nil, err

View File

@@ -12,74 +12,45 @@ import (
"github.com/argoproj/argo-cd/util/kube"
)
func assertAppHealth(t *testing.T, yamlPath string, expectedStatus appv1.HealthStatusCode) {
yamlBytes, err := ioutil.ReadFile(yamlPath)
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, expectedStatus, health.Status)
}
func TestDeploymentHealth(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("../kube/testdata/nginx.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
}
func TestDeploymentProgressing(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/progressing.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusProgressing, health.Status)
}
func TestDeploymentDegraded(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/degraded.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusDegraded, health.Status)
assertAppHealth(t, "../kube/testdata/nginx.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/deployment-progressing.yaml", appv1.HealthStatusProgressing)
assertAppHealth(t, "./testdata/deployment-degraded.yaml", appv1.HealthStatusDegraded)
}
func TestStatefulSetHealth(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/statefulset.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
assertAppHealth(t, "./testdata/statefulset.yaml", appv1.HealthStatusHealthy)
}
func TestPvcHealthy(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/pvc-bound.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusHealthy, health.Status)
func TestPVCHealth(t *testing.T) {
assertAppHealth(t, "./testdata/pvc-bound.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/pvc-pending.yaml", appv1.HealthStatusProgressing)
}
func TestPvcPending(t *testing.T) {
yamlBytes, err := ioutil.ReadFile("./testdata/pvc-pending.yaml")
assert.Nil(t, err)
var obj unstructured.Unstructured
err = yaml.Unmarshal(yamlBytes, &obj)
assert.Nil(t, err)
health, err := GetAppHealth(kube.KubectlCmd{}, &obj)
assert.Nil(t, err)
assert.NotNil(t, health)
assert.Equal(t, appv1.HealthStatusProgressing, health.Status)
func TestServiceHealth(t *testing.T) {
assertAppHealth(t, "./testdata/svc-clusterip.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/svc-loadbalancer.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/svc-loadbalancer-unassigned.yaml", appv1.HealthStatusProgressing)
}
func TestIngressHealth(t *testing.T) {
assertAppHealth(t, "./testdata/ingress.yaml", appv1.HealthStatusHealthy)
assertAppHealth(t, "./testdata/ingress-unassigned.yaml", appv1.HealthStatusProgressing)
}
func TestCRD(t *testing.T) {
// This ensures we do not try to compare only based on "Kind"
assertAppHealth(t, "./testdata/knative-service.yaml", appv1.HealthStatusHealthy)
}

View File

@@ -0,0 +1,24 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
creationTimestamp: 2018-09-20T06:47:27Z
generation: 9
name: argocd-server-ingress
namespace: argocd
resourceVersion: "23207680"
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
spec:
rules:
- host: example.argoproj.io
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
status:
loadBalancer: {}

26
util/health/testdata/ingress.yaml vendored Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
creationTimestamp: 2018-09-20T06:47:27Z
generation: 9
name: argocd-server-ingress
namespace: argocd
resourceVersion: "23207680"
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
spec:
rules:
- host: example.argoproj.io
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https
status:
loadBalancer:
ingress:
- ip: 1.2.3.4

View File

@@ -0,0 +1,14 @@
apiVersion: serving.knative.dev/v1alpha1
kind: Service
metadata:
name: helloworld
spec:
runLatest:
configuration:
revisionTemplate:
spec:
container:
env:
- name: TARGET
value: world
image: helloworld:latest

25
util/health/testdata/svc-clusterip.yaml vendored Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"argocd-metrics","namespace":"argocd"},"spec":{"ports":[{"name":"http","port":8082,"protocol":"TCP","targetPort":8082}],"selector":{"app":"argocd-server"}}}
creationTimestamp: 2018-10-27T06:36:27Z
name: argocd-metrics
namespace: argocd
resourceVersion: "1131"
selfLink: /api/v1/namespaces/argocd/services/argocd-metrics
uid: a1f65069-d9b2-11e8-b3c1-9ae2f452bd03
spec:
clusterIP: 10.96.199.2
ports:
- name: http
port: 8082
protocol: TCP
targetPort: 8082
selector:
app: argocd-server
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: Service
metadata:
creationTimestamp: 2018-11-06T01:07:35Z
name: argo-artifacts
namespace: argo
resourceVersion: "346792"
selfLink: /api/v1/namespaces/argo/services/argo-artifacts
uid: 586f5e57-e160-11e8-b3c1-9ae2f452bd03
spec:
clusterIP: 10.105.70.181
externalTrafficPolicy: Cluster
ports:
- name: service
nodePort: 32667
port: 9000
protocol: TCP
targetPort: 9000
selector:
app: minio
release: argo-artifacts
sessionAffinity: None
type: LoadBalancer
status:
loadBalancer: {}

View File

@@ -0,0 +1,35 @@
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "600"
creationTimestamp: 2018-06-05T23:34:58Z
labels:
applications.argoproj.io/app-name: argocd-cdp
name: argocd-server
namespace: argocd
resourceVersion: "32559487"
selfLink: /api/v1/namespaces/argocd/services/argocd-server
uid: 0f5885a9-6919-11e8-ad29-020124679688
spec:
clusterIP: 100.69.46.185
externalTrafficPolicy: Cluster
ports:
- name: http
nodePort: 30354
port: 80
protocol: TCP
targetPort: 8080
- name: https
nodePort: 31866
port: 443
protocol: TCP
targetPort: 8080
selector:
app: argocd-server
sessionAffinity: None
type: LoadBalancer
status:
loadBalancer:
ingress:
- hostname: abc123.us-west-2.elb.amazonaws.com

View File

@@ -1,38 +1,54 @@
package ksonnet
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/ksonnet/ksonnet/pkg/app"
"github.com/ksonnet/ksonnet/pkg/component"
"github.com/ghodss/yaml"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/config"
"github.com/argoproj/argo-cd/util/kube"
)
// Destination returns the deployment destination for an environment in app spec data
func Destination(data []byte, environment string) (*v1alpha1.ApplicationDestination, error) {
var appSpec struct {
Environments map[string]struct {
Destination v1alpha1.ApplicationDestination
}
}
err := yaml.Unmarshal(data, &appSpec)
if err != nil {
return nil, fmt.Errorf("could not unmarshal ksonnet spec app.yaml: %v", err)
}
envSpec, ok := appSpec.Environments[environment]
if !ok {
return nil, fmt.Errorf("environment '%s' does not exist in ksonnet app", environment)
}
return &envSpec.Destination, nil
}
// KsonnetApp represents a ksonnet application directory and provides wrapper functionality around
// the `ks` command.
type KsonnetApp interface {
// Root is the root path ksonnet application directory
Root() string
// App is the Ksonnet application
App() app.App
// Spec is the Ksonnet application spec
Spec() *app.Spec
// Show returns a list of unstructured objects that would be applied to an environment
Show(environment string) ([]*unstructured.Unstructured, error)
// Destination returns the deployment destination for an environment
Destination(environment string) (*v1alpha1.ApplicationDestination, error)
// ListEnvParams returns list of environment parameters
ListEnvParams(environment string) ([]*v1alpha1.ComponentParameter, error)
@@ -42,12 +58,12 @@ type KsonnetApp interface {
// KsonnetVersion returns the version of ksonnet used when running ksonnet commands
func KsonnetVersion() (string, error) {
cmd := exec.Command("ks", "version")
out, err := cmd.Output()
ksApp := ksonnetApp{}
out, err := ksApp.ksCmd("", "version")
if err != nil {
return "", fmt.Errorf("unable to determine ksonnet version: %v", err)
}
ksonnetVersionStr := strings.Split(string(out), "\n")[0]
ksonnetVersionStr := strings.Split(out, "\n")[0]
parts := strings.SplitN(ksonnetVersionStr, ":", 2)
if len(parts) != 2 {
return "", fmt.Errorf("unexpected version string format: %s", ksonnetVersionStr)
@@ -56,28 +72,28 @@ func KsonnetVersion() (string, error) {
}
type ksonnetApp struct {
app app.App
spec app.Spec
rootDir string
}
// NewKsonnetApp tries to create a new wrapper to run commands on the `ks` command-line tool.
func NewKsonnetApp(path string) (KsonnetApp, error) {
ksApp := ksonnetApp{}
a, err := app.Load(afero.NewOsFs(), path, false)
if err != nil {
ksApp := ksonnetApp{rootDir: path}
// ensure that the file exists
if _, err := ksApp.appYamlPath(); err != nil {
return nil, err
}
ksApp.app = a
var spec app.Spec
err = config.UnmarshalLocalFile(filepath.Join(a.Root(), "app.yaml"), &spec)
if err != nil {
return nil, err
}
ksApp.spec = spec
return &ksApp, nil
}
func (k *ksonnetApp) appYamlPath() (string, error) {
const appYamlName = "app.yaml"
p := filepath.Join(k.Root(), appYamlName)
if _, err := os.Stat(p); err != nil {
return "", err
}
return p, nil
}
func (k *ksonnetApp) ksCmd(args ...string) (string, error) {
cmd := exec.Command("ks", args...)
cmd.Dir = k.Root()
@@ -100,17 +116,7 @@ func (k *ksonnetApp) ksCmd(args ...string) (string, error) {
}
func (k *ksonnetApp) Root() string {
return k.app.Root()
}
// App is the Ksonnet application
func (k *ksonnetApp) App() app.App {
return k.app
}
// Spec is the Ksonnet application spec
func (k *ksonnetApp) Spec() *app.Spec {
return &k.spec
return k.rootDir
}
// Show generates a concatenated list of Kubernetes manifests in the given environment.
@@ -129,23 +135,42 @@ func (k *ksonnetApp) Show(environment string) ([]*unstructured.Unstructured, err
return data, err
}
// Destination returns the deployment destination for an environment
func (k *ksonnetApp) Destination(environment string) (*v1alpha1.ApplicationDestination, error) {
p, err := k.appYamlPath()
if err != nil {
return nil, err
}
data, err := ioutil.ReadFile(p)
if err != nil {
return nil, err
}
return Destination(data, environment)
}
// ListEnvParams returns list of environment parameters
func (k *ksonnetApp) ListEnvParams(environment string) ([]*v1alpha1.ComponentParameter, error) {
log.Infof("listing environment '%s' parameters", environment)
mod, err := component.DefaultManager.Module(k.app, "")
out, err := k.ksCmd("param", "list", "--output", "json", "--env", environment)
if err != nil {
return nil, err
}
ksParams, err := mod.Params(environment)
if err != nil {
// Auxiliary data to hold unmarshaled JSON output, which may use different field names
var ksParams struct {
Data []struct {
Component string `json:"component"`
Key string `json:"param"`
Value string `json:"value"`
} `json:"data"`
}
if err := json.Unmarshal([]byte(out), &ksParams); err != nil {
return nil, err
}
var params []*v1alpha1.ComponentParameter
for _, ksParam := range ksParams {
value, err := strconv.Unquote(ksParam.Value)
if err != nil {
value = ksParam.Value
}
for _, ksParam := range ksParams.Data {
value := strings.Trim(ksParam.Value, `'"`)
componentParam := v1alpha1.ComponentParameter{
Component: ksParam.Component,
Name: ksParam.Key,

View File

@@ -2,7 +2,7 @@ package ksonnet
import (
"encoding/json"
"path"
"path/filepath"
"reflect"
"runtime"
"testing"
@@ -23,20 +23,19 @@ const (
func init() {
_, filename, _, _ := runtime.Caller(0)
testDataDir = path.Join(path.Dir(filename), "testdata")
testDataDir = filepath.Join(filepath.Dir(filename), "testdata")
}
func TestKsonnet(t *testing.T) {
ksApp, err := NewKsonnetApp(path.Join(testDataDir, testAppName))
ksApp, err := NewKsonnetApp(filepath.Join(testDataDir, testAppName))
assert.Nil(t, err)
app := ksApp.App()
defaultEnv, err := app.Environment(testEnvName)
defaultDest, err := ksApp.Destination(testEnvName)
assert.True(t, err == nil)
assert.Equal(t, "https://1.2.3.4", defaultEnv.Destination.Server)
assert.Equal(t, "https://1.2.3.4", defaultDest.Server)
}
func TestShow(t *testing.T) {
ksApp, err := NewKsonnetApp(path.Join(testDataDir, testAppName))
ksApp, err := NewKsonnetApp(filepath.Join(testDataDir, testAppName))
assert.Nil(t, err)
objs, err := ksApp.Show(testEnvName)
assert.Nil(t, err)
@@ -49,7 +48,7 @@ func TestShow(t *testing.T) {
}
func TestListEnvParams(t *testing.T) {
ksApp, err := NewKsonnetApp(path.Join(testDataDir, testAppName))
ksApp, err := NewKsonnetApp(filepath.Join(testDataDir, testAppName))
assert.Nil(t, err)
paramPointers, err := ksApp.ListEnvParams(testEnvName)
assert.Nil(t, err)

View File

@@ -2,16 +2,20 @@ package kube
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os/exec"
"strings"
"sync"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
@@ -21,19 +25,53 @@ type Kubectl interface {
ApplyResource(config *rest.Config, obj *unstructured.Unstructured, namespace string, dryRun, force bool) (string, error)
ConvertToVersion(obj *unstructured.Unstructured, group, version string) (*unstructured.Unstructured, error)
DeleteResource(config *rest.Config, obj *unstructured.Unstructured, namespace string) error
WatchResources(ctx context.Context, config *rest.Config, namespace string, selector func(kind schema.GroupVersionKind) metav1.ListOptions) (chan watch.Event, error)
}
type KubectlCmd struct{}
// WatchResources Watches all the existing resources with the provided label name in the provided namespace in the cluster provided by the config
func (k KubectlCmd) WatchResources(
ctx context.Context,
config *rest.Config,
namespace string,
selector func(kind schema.GroupVersionKind) metav1.ListOptions,
) (chan watch.Event, error) {
log.Infof("Start watching for resources changes with in cluster %s", config.Host)
apiResIfs, err := filterAPIResources(config, watchSupported, namespace)
if err != nil {
return nil, err
}
ch := make(chan watch.Event)
go func() {
var wg sync.WaitGroup
wg.Add(len(apiResIfs))
for _, a := range apiResIfs {
go func(apiResIf apiResourceInterface) {
defer wg.Done()
gvk := schema.FromAPIVersionAndKind(apiResIf.groupVersion, apiResIf.apiResource.Kind)
w, err := apiResIf.resourceIf.Watch(selector(gvk))
if err == nil {
defer w.Stop()
copyEventsChannel(ctx, w.ResultChan(), ch)
}
}(a)
}
wg.Wait()
close(ch)
log.Infof("Stop watching for resources changes with in cluster %s", config.ServerName)
}()
return ch, nil
}
// DeleteResource deletes resource
func (k KubectlCmd) DeleteResource(config *rest.Config, obj *unstructured.Unstructured, namespace string) error {
dynClientPool := dynamic.NewDynamicClientPool(config)
disco, err := discovery.NewDiscoveryClientForConfig(config)
dynamicIf, err := dynamic.NewForConfig(config)
if err != nil {
return err
}
gvk := obj.GroupVersionKind()
dclient, err := dynClientPool.ClientForGroupVersionKind(gvk)
disco, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return err
}
@@ -41,9 +79,10 @@ func (k KubectlCmd) DeleteResource(config *rest.Config, obj *unstructured.Unstru
if err != nil {
return err
}
reIf := dclient.Resource(apiResource, namespace)
resource := gvk.GroupVersion().WithResource(apiResource.Name)
resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace)
propagationPolicy := metav1.DeletePropagationForeground
return reIf.Delete(obj.GetName(), &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
return resourceIf.Delete(obj.GetName(), &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
}
// ApplyResource performs an apply of a unstructured resource

View File

@@ -108,7 +108,11 @@ func UnsetLabel(target *unstructured.Unstructured, key string) {
if labels := target.GetLabels(); labels != nil {
if _, ok := labels[key]; ok {
delete(labels, key)
target.SetLabels(labels)
if len(labels) == 0 {
unstructured.RemoveNestedField(target.Object, "metadata", "labels")
} else {
target.SetLabels(labels)
}
}
}
}
@@ -200,17 +204,32 @@ func FlushServerResourcesCache() {
apiResourceCache.Flush()
}
func ToGroupVersionResource(groupVersion string, apiResource *metav1.APIResource) schema.GroupVersionResource {
gvk := schema.FromAPIVersionAndKind(groupVersion, apiResource.Kind)
gv := gvk.GroupVersion()
return gv.WithResource(apiResource.Name)
}
func ToResourceInterface(dynamicIf dynamic.Interface, apiResource *metav1.APIResource, resource schema.GroupVersionResource, namespace string) dynamic.ResourceInterface {
if apiResource.Namespaced {
return dynamicIf.Resource(resource).Namespace(namespace)
}
return dynamicIf.Resource(resource)
}
// GetLiveResource returns the corresponding live resource from a unstructured object
func GetLiveResource(dclient dynamic.Interface, obj *unstructured.Unstructured, apiResource *metav1.APIResource, namespace string) (*unstructured.Unstructured, error) {
func GetLiveResource(dynamicIf dynamic.Interface, obj *unstructured.Unstructured, apiResource *metav1.APIResource, namespace string) (*unstructured.Unstructured, error) {
resourceName := obj.GetName()
if resourceName == "" {
return nil, fmt.Errorf("resource was supplied without a name")
}
reIf := dclient.Resource(apiResource, namespace)
gvk := obj.GroupVersionKind()
resource := ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
reIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace)
liveObj, err := reIf.Get(resourceName, metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
log.Infof("No live counterpart to %s/%s/%s/%s in namespace: '%s'", apiResource.Group, apiResource.Version, apiResource.Name, resourceName, namespace)
log.Infof("No live counterpart to %s, %s/%s", gvk.String(), namespace, resourceName)
return nil, nil
}
return nil, errors.WithStack(err)
@@ -226,9 +245,24 @@ func IsCRD(obj *unstructured.Unstructured) bool {
return IsCRDGroupVersionKind(obj.GroupVersionKind())
}
func WatchResourcesWithLabel(ctx context.Context, config *rest.Config, namespace string, labelName string) (chan watch.Event, error) {
log.Infof("Start watching for resources changes with label %s in cluster %s", labelName, config.Host)
dynClientPool := dynamic.NewDynamicClientPool(config)
// temporal solution for https://github.com/argoproj/argo-cd/issues/650.
func isExcludedResourceGroup(resource metav1.APIResource) bool {
return resource.Group == "servicecatalog.k8s.io"
}
type apiResourceInterface struct {
groupVersion string
apiResource metav1.APIResource
resourceIf dynamic.ResourceInterface
}
type filterFunc func(groupVersion string, apiResource *metav1.APIResource) bool
func filterAPIResources(config *rest.Config, filter filterFunc, namespace string) ([]apiResourceInterface, error) {
dynamicIf, err := dynamic.NewForConfig(config)
if err != nil {
return nil, err
}
disco, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return nil, err
@@ -237,41 +271,58 @@ func WatchResourcesWithLabel(ctx context.Context, config *rest.Config, namespace
if err != nil {
return nil, err
}
resources := make([]dynamic.ResourceInterface, 0)
apiResIfs := make([]apiResourceInterface, 0)
for _, apiResourcesList := range serverResources {
for i := range apiResourcesList.APIResources {
apiResource := apiResourcesList.APIResources[i]
watchSupported := false
for _, verb := range apiResource.Verbs {
if verb == watchVerb {
watchSupported = true
break
for _, apiResource := range apiResourcesList.APIResources {
if filter(apiResourcesList.GroupVersion, &apiResource) {
resource := ToGroupVersionResource(apiResourcesList.GroupVersion, &apiResource)
resourceIf := ToResourceInterface(dynamicIf, &apiResource, resource, namespace)
apiResIf := apiResourceInterface{
groupVersion: apiResourcesList.GroupVersion,
apiResource: apiResource,
resourceIf: resourceIf,
}
}
if watchSupported {
dclient, err := dynClientPool.ClientForGroupVersionKind(schema.FromAPIVersionAndKind(apiResourcesList.GroupVersion, apiResource.Kind))
if err != nil {
return nil, err
}
resources = append(resources, dclient.Resource(&apiResource, namespace))
apiResIfs = append(apiResIfs, apiResIf)
}
}
}
return apiResIfs, nil
}
// isSupportedVerb returns whether or not a APIResource supports a specific verb
func isSupportedVerb(apiResource *metav1.APIResource, verb string) bool {
for _, v := range apiResource.Verbs {
if v == verb {
return true
}
}
return false
}
func watchSupported(groupVersion string, apiResource *metav1.APIResource) bool {
return isSupportedVerb(apiResource, watchVerb) && !isExcludedResourceGroup(*apiResource)
}
func WatchResourcesWithLabel(ctx context.Context, config *rest.Config, namespace string, labelName string) (chan watch.Event, error) {
log.Infof("Start watching for resources changes with label %s in cluster %s", labelName, config.Host)
apiResIfs, err := filterAPIResources(config, watchSupported, namespace)
if err != nil {
return nil, err
}
ch := make(chan watch.Event)
go func() {
var wg sync.WaitGroup
wg.Add(len(resources))
for i := 0; i < len(resources); i++ {
resource := resources[i]
go func() {
wg.Add(len(apiResIfs))
for _, apiResIf := range apiResIfs {
go func(resourceIf dynamic.ResourceInterface) {
defer wg.Done()
w, err := resource.Watch(metav1.ListOptions{LabelSelector: labelName})
w, err := resourceIf.Watch(metav1.ListOptions{LabelSelector: labelName})
if err == nil {
defer w.Stop()
copyEventsChannel(ctx, w.ResultChan(), ch)
}
}()
}(apiResIf.resourceIf)
}
wg.Wait()
close(ch)
@@ -301,47 +352,23 @@ func copyEventsChannel(ctx context.Context, src <-chan watch.Event, dst chan wat
// GetResourcesWithLabel returns all kubernetes resources with specified label
func GetResourcesWithLabel(config *rest.Config, namespace string, labelName string, labelValue string) ([]*unstructured.Unstructured, error) {
dynClientPool := dynamic.NewDynamicClientPool(config)
disco, err := discovery.NewDiscoveryClientForConfig(config)
listSupported := func(groupVersion string, apiResource *metav1.APIResource) bool {
return isSupportedVerb(apiResource, listVerb) && !isExcludedResourceGroup(*apiResource)
}
apiResIfs, err := filterAPIResources(config, listSupported, namespace)
if err != nil {
return nil, err
}
resources, err := GetCachedServerResources(config.Host, disco)
if err != nil {
return nil, err
}
var resourceInterfaces []dynamic.ResourceInterface
for _, apiResourcesList := range resources {
for i := range apiResourcesList.APIResources {
apiResource := apiResourcesList.APIResources[i]
listSupported := false
for _, verb := range apiResource.Verbs {
if verb == listVerb {
listSupported = true
break
}
}
if listSupported {
dclient, err := dynClientPool.ClientForGroupVersionKind(schema.FromAPIVersionAndKind(apiResourcesList.GroupVersion, apiResource.Kind))
if err != nil {
return nil, err
}
resourceInterfaces = append(resourceInterfaces, dclient.Resource(&apiResource, namespace))
}
}
}
var asyncErr error
var result []*unstructured.Unstructured
var wg sync.WaitGroup
var lock sync.Mutex
wg.Add(len(resourceInterfaces))
for _, c := range resourceInterfaces {
go func(client dynamic.ResourceInterface) {
wg.Add(len(apiResIfs))
for _, apiResIf := range apiResIfs {
go func(resourceIf dynamic.ResourceInterface) {
defer wg.Done()
list, err := client.List(metav1.ListOptions{
list, err := resourceIf.List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", labelName, labelValue),
})
if err != nil {
@@ -351,8 +378,8 @@ func GetResourcesWithLabel(config *rest.Config, namespace string, labelName stri
return
}
// apply client side filtering since not every kubernetes API supports label filtering
for i := range list.(*unstructured.UnstructuredList).Items {
item := list.(*unstructured.UnstructuredList).Items[i]
for i := range list.Items {
item := list.Items[i]
labels := item.GetLabels()
if labels != nil {
if value, ok := labels[labelName]; ok && value == labelValue {
@@ -362,84 +389,62 @@ func GetResourcesWithLabel(config *rest.Config, namespace string, labelName stri
}
}
}
}(c)
}(apiResIf.resourceIf)
}
wg.Wait()
return result, asyncErr
}
// DeleteResourceWithLabel delete all resources which match to specified label selector
func DeleteResourceWithLabel(config *rest.Config, namespace string, labelName string, labelValue string) error {
dynClientPool := dynamic.NewDynamicClientPool(config)
disco, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return err
}
resources, err := GetCachedServerResources(config.Host, disco)
if err != nil {
return err
}
type resClient struct {
dynamic.ResourceInterface
deleteCollectionSupported bool
}
var resourceInterfaces []resClient
for _, apiResourcesList := range resources {
for i := range apiResourcesList.APIResources {
apiResource := apiResourcesList.APIResources[i]
deleteCollectionSupported := false
deleteSupported := false
for _, verb := range apiResource.Verbs {
if verb == deleteCollectionVerb {
deleteCollectionSupported = true
} else if verb == deleteVerb {
deleteSupported = true
}
}
dclient, err := dynClientPool.ClientForGroupVersionKind(schema.FromAPIVersionAndKind(apiResourcesList.GroupVersion, apiResource.Kind))
if err != nil {
return err
}
if deleteCollectionSupported || deleteSupported && !IsCRDGroupVersionKind(schema.FromAPIVersionAndKind(apiResourcesList.GroupVersion, apiResource.Kind)) {
resourceInterfaces = append(resourceInterfaces, resClient{
dclient.Resource(&apiResource, namespace),
deleteCollectionSupported,
})
// DeleteResourcesWithLabel delete all resources which match to specified label selector
func DeleteResourcesWithLabel(config *rest.Config, namespace string, labelName string, labelValue string) error {
deleteSupported := func(groupVersion string, apiResource *metav1.APIResource) bool {
if !isSupportedVerb(apiResource, deleteCollectionVerb) {
// if we can't delete by collection, we better be able to list and delete
if !isSupportedVerb(apiResource, listVerb) || !isSupportedVerb(apiResource, deleteVerb) {
return false
}
}
if isExcludedResourceGroup(*apiResource) {
return false
}
if IsCRDGroupVersionKind(schema.FromAPIVersionAndKind(groupVersion, apiResource.Kind)) {
return false
}
return true
}
apiResIfs, err := filterAPIResources(config, deleteSupported, namespace)
if err != nil {
return err
}
var asyncErr error
propagationPolicy := metav1.DeletePropagationForeground
var wg sync.WaitGroup
wg.Add(len(resourceInterfaces))
for _, c := range resourceInterfaces {
go func(client resClient) {
wg.Add(len(apiResIfs))
for _, a := range apiResIfs {
go func(apiResIf apiResourceInterface) {
defer wg.Done()
if client.deleteCollectionSupported {
err = client.DeleteCollection(&metav1.DeleteOptions{
deleteCollectionSupported := isSupportedVerb(&apiResIf.apiResource, deleteCollectionVerb)
resourceIf := apiResIf.resourceIf
if deleteCollectionSupported {
err = resourceIf.DeleteCollection(&metav1.DeleteOptions{
PropagationPolicy: &propagationPolicy,
}, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", labelName, labelValue)})
if err != nil && !apierr.IsNotFound(err) {
asyncErr = err
}
} else {
items, err := client.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", labelName, labelValue)})
items, err := resourceIf.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", labelName, labelValue)})
if err != nil {
asyncErr = err
return
}
for _, item := range items.(*unstructured.UnstructuredList).Items {
for _, item := range items.Items {
// apply client side filtering since not every kubernetes API supports label filtering
labels := item.GetLabels()
if labels != nil {
if value, ok := labels[labelName]; ok && value == labelValue {
err = client.Delete(item.GetName(), &metav1.DeleteOptions{
err = resourceIf.Delete(item.GetName(), &metav1.DeleteOptions{
PropagationPolicy: &propagationPolicy,
})
if err != nil && !apierr.IsNotFound(err) {
@@ -450,7 +455,7 @@ func DeleteResourceWithLabel(config *rest.Config, namespace string, labelName st
}
}
}
}(c)
}(a)
}
wg.Wait()
return asyncErr
@@ -476,8 +481,10 @@ type listResult struct {
}
// ListResources returns a list of resources of a particular API type using the dynamic client
func ListResources(dclient dynamic.Interface, apiResource metav1.APIResource, namespace string, listOpts metav1.ListOptions) ([]*unstructured.Unstructured, error) {
reIf := dclient.Resource(&apiResource, namespace)
func ListResources(dynamicIf dynamic.Interface, apiResource metav1.APIResource, namespace string, listOpts metav1.ListOptions) ([]*unstructured.Unstructured, error) {
gvk := schema.FromAPIVersionAndKind(apiResource.Version, apiResource.Kind)
resource := ToGroupVersionResource(gvk.GroupVersion().String(), &apiResource)
reIf := ToResourceInterface(dynamicIf, &apiResource, resource, namespace)
liveObjs, err := reIf.List(listOpts)
if err != nil {
return nil, errors.WithStack(err)
@@ -515,7 +522,13 @@ func cleanKubectlOutput(s string) string {
// WriteKubeConfig takes a rest.Config and writes it as a kubeconfig at the specified path
func WriteKubeConfig(restConfig *rest.Config, namespace, filename string) error {
var kubeConfig = clientcmdapi.Config{
kubeConfig := NewKubeConfig(restConfig, namespace)
return clientcmd.WriteToFile(*kubeConfig, filename)
}
// NewKubeConfig converts a clientcmdapi.Config (kubeconfig) from a rest.Config
func NewKubeConfig(restConfig *rest.Config, namespace string) *clientcmdapi.Config {
return &clientcmdapi.Config{
CurrentContext: restConfig.Host,
Contexts: map[string]*clientcmdapi.Context{
restConfig.Host: {
@@ -526,49 +539,62 @@ func WriteKubeConfig(restConfig *rest.Config, namespace, filename string) error
},
Clusters: map[string]*clientcmdapi.Cluster{
restConfig.Host: {
Server: restConfig.Host,
Server: restConfig.Host,
InsecureSkipTLSVerify: restConfig.TLSClientConfig.Insecure,
CertificateAuthority: restConfig.TLSClientConfig.CAFile,
CertificateAuthorityData: restConfig.TLSClientConfig.CAData,
},
},
AuthInfos: map[string]*clientcmdapi.AuthInfo{
restConfig.Host: {},
restConfig.Host: newAuthInfo(restConfig),
},
}
// Set Cluster info
if restConfig.TLSClientConfig.Insecure {
kubeConfig.Clusters[restConfig.Host].InsecureSkipTLSVerify = true
}
if restConfig.TLSClientConfig.CAFile != "" {
kubeConfig.Clusters[restConfig.Host].CertificateAuthority = restConfig.TLSClientConfig.CAFile
}
// Set AuthInfo
if len(restConfig.TLSClientConfig.CAData) > 0 {
kubeConfig.Clusters[restConfig.Host].CertificateAuthorityData = restConfig.TLSClientConfig.CAData
}
}
// newAuthInfo returns an AuthInfo from a rest config, detecting if the rest.Config is an
// in-cluster config and automatically setting the token path appropriately.
func newAuthInfo(restConfig *rest.Config) *clientcmdapi.AuthInfo {
authInfo := clientcmdapi.AuthInfo{}
haveCredentials := false
if restConfig.TLSClientConfig.CertFile != "" {
kubeConfig.AuthInfos[restConfig.Host].ClientCertificate = restConfig.TLSClientConfig.CertFile
authInfo.ClientCertificate = restConfig.TLSClientConfig.CertFile
haveCredentials = true
}
if len(restConfig.TLSClientConfig.CertData) > 0 {
kubeConfig.AuthInfos[restConfig.Host].ClientCertificateData = restConfig.TLSClientConfig.CertData
authInfo.ClientCertificateData = restConfig.TLSClientConfig.CertData
haveCredentials = true
}
if restConfig.TLSClientConfig.KeyFile != "" {
kubeConfig.AuthInfos[restConfig.Host].ClientKey = restConfig.TLSClientConfig.KeyFile
authInfo.ClientKey = restConfig.TLSClientConfig.KeyFile
haveCredentials = true
}
if len(restConfig.TLSClientConfig.KeyData) > 0 {
kubeConfig.AuthInfos[restConfig.Host].ClientKeyData = restConfig.TLSClientConfig.KeyData
authInfo.ClientKeyData = restConfig.TLSClientConfig.KeyData
haveCredentials = true
}
if restConfig.Username != "" {
kubeConfig.AuthInfos[restConfig.Host].Username = restConfig.Username
authInfo.Username = restConfig.Username
haveCredentials = true
}
if restConfig.Password != "" {
kubeConfig.AuthInfos[restConfig.Host].Password = restConfig.Password
authInfo.Password = restConfig.Password
haveCredentials = true
}
if restConfig.BearerToken != "" {
kubeConfig.AuthInfos[restConfig.Host].Token = restConfig.BearerToken
authInfo.Token = restConfig.BearerToken
haveCredentials = true
}
if restConfig.ExecProvider != nil {
kubeConfig.AuthInfos[restConfig.Host].Exec = restConfig.ExecProvider
authInfo.Exec = restConfig.ExecProvider
haveCredentials = true
}
return clientcmd.WriteToFile(kubeConfig, filename)
if restConfig.ExecProvider == nil && !haveCredentials {
// If no credentials were set (or there was no exec provider), we assume in-cluster config.
// In-cluster configs from the go-client will no longer set bearer tokens, so we set the
// well known token path. See issue #774
authInfo.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
}
return &authInfo
}
var diffSeparator = regexp.MustCompile(`\n---`)

View File

@@ -18,7 +18,9 @@ import (
fakediscovery "k8s.io/client-go/discovery/fake"
fakedynamic "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
kubetesting "k8s.io/client-go/testing"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/argoproj/argo-cd/common"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
@@ -100,8 +102,8 @@ func TestGetLiveResource(t *testing.T) {
assert.True(t, ok)
fakeDiscovery.Fake.Resources = resourceList()
fakeDynClient := fakedynamic.FakeClient{
Fake: &kubetesting.Fake{},
fakeDynClient := fakedynamic.FakeDynamicClient{
Fake: kubetesting.Fake{},
}
fakeDynClient.Fake.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
svc, err := kubeclientset.CoreV1().Services(test.TestNamespace).Get(demoSvc.Name, metav1.GetOptions{})
@@ -119,8 +121,8 @@ func TestGetLiveResource(t *testing.T) {
func TestListResources(t *testing.T) {
kubeclientset := fake.NewSimpleClientset(test.DemoService(), test.DemoDeployment())
fakeDynClient := fakedynamic.FakeClient{
Fake: &kubetesting.Fake{},
fakeDynClient := fakedynamic.FakeDynamicClient{
Fake: kubetesting.Fake{},
}
fakeDynClient.Fake.AddReactor("list", "services", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
svcList, err := kubeclientset.CoreV1().Services(test.TestNamespace).List(metav1.ListOptions{})
@@ -304,3 +306,24 @@ spec:
assert.Equal(t, float64(0.2), requestsBefore["cpu"])
assert.Equal(t, "200m", requestsAfter["cpu"])
}
func TestInClusterKubeConfig(t *testing.T) {
restConfig := &rest.Config{}
kubeConfig := NewKubeConfig(restConfig, "")
assert.NotEmpty(t, kubeConfig.AuthInfos[kubeConfig.CurrentContext].TokenFile)
restConfig = &rest.Config{
Password: "foo",
}
kubeConfig = NewKubeConfig(restConfig, "")
assert.Empty(t, kubeConfig.AuthInfos[kubeConfig.CurrentContext].TokenFile)
restConfig = &rest.Config{
ExecProvider: &clientcmdapi.ExecConfig{
APIVersion: "client.authentication.k8s.io/v1alpha1",
Command: "aws-iam-authenticator",
},
}
kubeConfig = NewKubeConfig(restConfig, "")
assert.Empty(t, kubeConfig.AuthInfos[kubeConfig.CurrentContext].TokenFile)
}

View File

@@ -160,7 +160,29 @@ func (mgr *SessionManager) VerifyToken(tokenString string) (jwt.Claims, error) {
verifier := provider.Verifier(&oidc.Config{ClientID: claims.Audience})
idToken, err := verifier.Verify(context.Background(), tokenString)
if err != nil {
return nil, err
// HACK: if we failed token verification, it's possible the reason was because dex
// restarted and has new JWKS signing keys (we do not back dex with persistent storage
// so keys might be regenerated). Detect this by:
// 1. looking for the specific error message
// 2. re-initializing the OIDC provider
// 3. re-attempting token verification
// NOTE: the error message is sensitive to implementation of verifier.Verify()
if !strings.Contains(err.Error(), "failed to verify signature") {
return nil, err
}
provider, retryErr := mgr.initializeOIDCProvider()
if retryErr != nil {
// return original error if we fail to re-initialize OIDC
return nil, err
}
verifier = provider.Verifier(&oidc.Config{ClientID: claims.Audience})
idToken, err = verifier.Verify(context.Background(), tokenString)
if err != nil {
return nil, err
}
// If we get here, we successfully re-initialized OIDC and after re-initialization,
// the token is now valid.
log.Info("New OIDC settings detected")
}
var claims jwt.MapClaims
err = idToken.Claims(&claims)
@@ -168,6 +190,7 @@ func (mgr *SessionManager) VerifyToken(tokenString string) (jwt.Claims, error) {
}
}
// Username is a helper to extract a human readable username from a context
func Username(ctx context.Context) string {
claims, ok := ctx.Value("claims").(jwt.Claims)
if !ok {
@@ -194,8 +217,7 @@ func MakeCookieMetadata(key, value string, flags ...string) string {
return strings.Join(components, "; ")
}
// OIDCProvider lazily initializes and returns the OIDC provider, querying the well known oidc
// configuration path (http://example-argocd.com/api/dex/.well-known/openid-configuration).
// OIDCProvider lazily initializes, memoizes, and returns the OIDC provider.
// We have to initialize the provider lazily since ArgoCD is an OIDC client to itself, which
// presents a chicken-and-egg problem of (1) serving dex over HTTP, and (2) querying the OIDC
// provider (ourselves) to initialize the app.
@@ -203,6 +225,12 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
if mgr.provider != nil {
return mgr.provider, nil
}
return mgr.initializeOIDCProvider()
}
// initializeOIDCProvider re-initializes the OIDC provider, querying the well known oidc
// configuration path (http://example-argocd.com/api/dex/.well-known/openid-configuration)
func (mgr *SessionManager) initializeOIDCProvider() (*oidc.Provider, error) {
if !mgr.settings.IsSSOConfigured() {
return nil, fmt.Errorf("SSO is not configured")
}
@@ -213,7 +241,6 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
if err != nil {
return nil, fmt.Errorf("Failed to query provider %q: %v", issuerURL, err)
}
// Returns the scopes the provider supports
// See: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
var s struct {
@@ -223,24 +250,27 @@ func (mgr *SessionManager) OIDCProvider() (*oidc.Provider, error) {
return nil, fmt.Errorf("Failed to parse provider scopes_supported: %v", err)
}
log.Infof("OpenID supported scopes: %v", s.ScopesSupported)
offlineAsScope := false
if len(s.ScopesSupported) == 0 {
// scopes_supported is a "RECOMMENDED" discovery claim, not a required
// one. If missing, assume that the provider follows the spec and has
// an "offline_access" scope.
mgr.offlineAsScope = true
offlineAsScope = true
} else {
// See if scopes_supported has the "offline_access" scope.
for _, scope := range s.ScopesSupported {
if scope == oidc.ScopeOfflineAccess {
mgr.offlineAsScope = true
offlineAsScope = true
break
}
}
}
mgr.provider = provider
mgr.offlineAsScope = offlineAsScope
return mgr.provider, nil
}
// OfflineAsScope returns whether or not the OIDC provider supports offline as a scope
func (mgr *SessionManager) OfflineAsScope() bool {
_, _ = mgr.OIDCProvider() // forces offlineAsScope to be determined
return mgr.offlineAsScope

View File

@@ -45,18 +45,18 @@ func RegisterHeapDumper(filePath string) {
if _, err := os.Stat(filePath); err == nil {
err = os.Remove(filePath)
if err != nil {
log.Warnf("could not delete heap profile file: ", err)
log.Warnf("could not delete heap profile file: %v", err)
return
}
}
f, err := os.Create(filePath)
if err != nil {
log.Warnf("could not create heap profile file: ", err)
log.Warnf("could not create heap profile file: %v", err)
return
}
if err := pprof.WriteHeapProfile(f); err != nil {
log.Warnf("could not write heap profile: ", err)
log.Warnf("could not write heap profile: %v", err)
return
} else {
log.Infof("dumped heap profile to %s", filePath)