Compare commits

...

56 Commits

Author SHA1 Message Date
Alexander Matyushentsev
1b0e3e7b80 Update manifests to v0.9.2 2018-09-28 09:36:21 -07:00
Jesse Suen
2faf45af94 Add version check during release to ensure compiled version is accurate (#646) 2018-09-27 18:57:44 -07:00
Jesse Suen
b8c4436870 Update generated files 2018-09-27 18:09:50 -07:00
Jesse Suen
f8b2576f19 Fix issue where argocd-server logged credentials in plain text during repo add (issue #653) 2018-09-27 18:04:10 -07:00
Jesse Suen
61f4513e52 Switch to go-git for all remote git interactions including auth (issue #651) 2018-09-27 18:03:56 -07:00
Jesse Suen
4d210c887b Do not append .git extension during normalization for Azure hosted git (issue #643) (#645) 2018-09-27 18:03:33 -07:00
Alexander Matyushentsev
ab6b2969ac Issue #650 - Temporary ignore service catalog resources (#661) 2018-09-27 18:00:24 -07:00
Andrew Merenbach
447dd30768 Update generated files (#660) 2018-09-27 13:53:56 -07:00
dthomson25
ab98589e1a Normalize policies by always adding space after comma (#659) 2018-09-27 13:30:49 -07:00
Stephen Haynes
2fd6c11338 update to kustomize 1.0.8 (#644) 2018-09-26 21:28:08 -07:00
Alexander Matyushentsev
b90102bc68 Update argocd VERSION file 2018-09-24 15:28:17 -07:00
Alexander Matyushentsev
1aa7146d3e Update manifests to v0.9.1 2018-09-24 14:26:20 -07:00
Alexander Matyushentsev
b366977265 Issue #639 - Repo server unable to execute ls-remote for private repos (#640) 2018-09-24 14:22:23 -07:00
Alexander Matyushentsev
b6439dc545 Update manifests to v0.9.0 2018-09-24 13:16:09 -07:00
Alexander Matyushentsev
bed82d68df Update changelog and fix release command dependency (#638) 2018-09-24 12:58:17 -07:00
Jesse Suen
359271dfa8 Update manifests to support in-cluster installations (#634) 2018-09-24 10:14:31 -07:00
Jesse Suen
0af77a0706 Add more event sources and provide better detail in event messages (issue #635) (#637)
* Expand SyncOperation to also store parameter overrides
Fix auto-sync when used with parameter overrides

* Add more event sources and provide better detail in event messages (issue #635)
2018-09-24 08:52:43 -07:00
Jesse Suen
e6efd79ad8 Support ability to use a helm values files from a URL (issue #624) 2018-09-21 16:05:42 -07:00
Jesse Suen
c953934d2e Simplify the RBAC resources to remove unnecessary sub-resources (issue #629) 2018-09-21 15:25:08 -07:00
Alexander Matyushentsev
5b4742d42b Issue #613 - Don't delete CRD (#630) 2018-09-21 10:29:32 -07:00
Jesse Suen
269f70df51 Trim git url during normalization (issue #614) (#623) 2018-09-20 16:26:17 -07:00
Jesse Suen
67177f933b Fix false OutOfSync condition when an explicit namespace is set in the config (#622) 2018-09-20 14:52:16 -07:00
Jesse Suen
606fdcded7 Rename server.crt/server.key to tls.crt/tls.key to integrate with Ingress (issue #617) 2018-09-20 12:49:23 -07:00
Alexander Matyushentsev
70b9db68b4 Issue #599 - Lazy enforcement of unknown cluster/namespace restricted resources (#612) 2018-09-20 09:48:54 -07:00
Jesse Suen
dc8a2f5d62 Support for exporting prometheus metrics about ArgoCD applications (#608) 2018-09-17 14:05:11 -07:00
Alexander Matyushentsev
8830cf9556 609 - Support restricting TLS version (#610) 2018-09-17 13:14:00 -07:00
Jesse Suen
bfb558eb92 Fix issue where helm hooks were being deployed as part of sync (issue #605) 2018-09-17 11:29:44 -07:00
Jesse Suen
505866a4c6 Support helm charts with dependencies and namespace sensitivity (issue #582) 2018-09-17 11:29:44 -07:00
Yuki Kodama
acd2de80fb Update getting started to point to v0.8.2 (#607) 2018-09-15 23:45:06 -07:00
Alexander Matyushentsev
0b08bf4537 Issue #523 - Use 'kubectl auth reconcile' for RBAC resources (#600) 2018-09-14 20:38:35 -07:00
Jesse Suen
223091482c Improve three-way diff to provide more accurate Sync status and diff result (issue #597) (#604) 2018-09-14 19:10:11 -07:00
Andrew Merenbach
4699946e1b Derive dedicated Dex deployment (#564)
Put Dex into its own deployment and service to decouple API server stability from auth token processing
2018-09-14 17:08:12 -07:00
Jesse Suen
097f87fd52 Improve remarshalling to use reflection/schema builders to handle all k8s core types (#603) 2018-09-14 16:17:20 -07:00
Alexander Matyushentsev
66b4f3a685 Issue #515 - handle concurrent settings initialization by api servers (#602) 2018-09-14 15:09:12 -07:00
Jesse Suen
02116d4bfc Fix comparison failure when app contains unregistered custom resource (issue #583) (#596) 2018-09-13 14:02:04 -07:00
Jesse Suen
fb17589af6 Fix race conditions in kube.GetResourcesWithLabel and DeleteResourceWithLabel (issue #587) (#593) 2018-09-13 13:58:47 -07:00
Alexander Matyushentsev
15ce7ea880 Issue #584 - ArgoCD fails to deploy resources list (#598) 2018-09-13 13:52:30 -07:00
Alexander Matyushentsev
57a3123a55 Issue #482 - Support IAM Authentication for managing external K8s clusters (#588) 2018-09-13 00:09:23 -07:00
Jesse Suen
32e96e4bb2 Fix app sync / wait panic in CLI 2018-09-12 23:41:42 -07:00
Jesse Suen
47ee26a77a Downgrade ksonnet from v0.12.0 to v0.11.0 due to quote unescape regression 2018-09-12 23:41:42 -07:00
dthomson25
9cd5d52fbc Add iat as path param for delete token http call (#586) 2018-09-12 19:49:20 -07:00
Alexander Matyushentsev
aa2afcd47b Issue #330 - Projects need controls on cluster-scoped resources (#558)
* Issue #330 - Projects need controls on cluster-scoped resources

* Issue #330 - Introduce namespace resources black-list
2018-09-11 15:10:47 -07:00
Jesse Suen
fd510e7933 Support an automated sync policy upon detection of OutOfSync status from git (#571) 2018-09-11 14:28:53 -07:00
Jesse Suen
e29d5b9634 In-memory implementation of ls-remote using go-git to reduce repo lock contention (#574) 2018-09-11 13:53:51 -07:00
Conor Fennell
2f9891b15b Issue #577 - Add rbac non resource url policy for argocd-manager-role (#578)
* Add rbac non resource url policy for argocd-manager-role
* allow all non resource urls to be added through rbac
2018-09-11 13:23:10 -07:00
Jesse Suen
c3ecd615ff Update getting started and docs to point to v0.8.1 (#575) 2018-09-10 19:05:47 -07:00
Jesse Suen
4e22a3cb21 Add link to SigApps video and update CHANGELOG for v0.8.1 (#572) 2018-09-10 16:08:08 -07:00
Jesse Suen
bc98b65190 Fix controller hot loop when app source contains bad manifests (issue #568) (#570) 2018-09-10 10:58:13 -07:00
Jesse Suen
02b756ef40 Fix issue where branch checkout did not have accurate git tree state (issue #567) (#569) 2018-09-10 10:55:12 -07:00
dthomson25
954706570c Reorder K8s resources to correct creation order (#551) 2018-09-10 10:14:14 -07:00
Alexander Matyushentsev
e2faf6970f Issue #527 - Support --in-cluster authentication without providing a kubeconfig (#559)
* Issue #527 - Support --in-cluster authentication without providing a kubeconfig

* Issue #527 - make sure resources are watched for 'local' cluster
2018-09-10 08:20:17 -07:00
Alexander Matyushentsev
a528ae9c12 Issue #553 - Turn on TLS for repo server (#563) 2018-09-08 00:17:29 +03:00
Alexander Matyushentsev
0a5871eba4 Issue #470 - K8s secrets need to be redacted in API server (#560) 2018-09-07 23:51:32 +03:00
Alexander Matyushentsev
27471d5249 Issue #540 - Support raw jsonnet as an application source (#561) 2018-09-07 21:15:19 +03:00
Alexander Matyushentsev
ed484c00db Issue 499 - fileFiles path should be relative to app directory (#552) 2018-09-05 23:37:26 +03:00
Jesse Suen
b868f26ca4 Update documentation for v0.8.0 (#550) 2018-09-04 22:31:21 -07:00
157 changed files with 12212 additions and 2618 deletions

View File

@@ -1,5 +1,71 @@
# Changelog
## v0.9.0
### Notes about upgrading from v0.8
* The `server.crt` and `server.key` fields of `argocd-secret` had been renamed to `tls.crt` and `tls.key` for
better integration with cert manager(issue #617). Existing `argocd-secret` should be updated accordingly to
preserve existing TLS certificate.
* Cluster wide resources should be allowed in default project (due to issue #330):
```
argocd project allow-cluster-resource default '*' '*'
```
### Changes since v0.8:
+ Auto-sync option in application CRD instance (issue #79)
+ Support raw jsonnet as an application source (issue #540)
+ Reorder K8s resources to correct creation order (issue #102)
+ Redact K8s secrets from API server payloads (issue #470)
+ Support --in-cluster authentication without providing a kubeconfig (issue #527)
+ Special handling of CustomResourceDefinitions (issue #613)
+ ArgoCD should download helm chart dependencies (issue #582)
+ Export ArgoCD stats as prometheus style metrics (issue #513)
+ Support restricting TLS version (issue #609)
+ Use 'kubectl auth reconcile' before 'kubectl apply' (issue #523)
+ Projects need controls on cluster-scoped resources (issue #330)
+ Support IAM Authentication for managing external K8s clusters (issue #482)
+ Compatibility with cert manager (issue #617)
* Enable TLS for repo server (issue #553)
* Split out dex into it's own deployment (instead of sidecar) (issue #555)
+ [UI] Support selection of helm values files in App creation wizard (issue #499)
+ [UI] Support specifying source revision in App creation wizard allow (issue #503)
+ [UI] Improve resource diff rendering (issue #457)
+ [UI] Indicate number of ready containers in pod (issue #539)
+ [UI] Indicate when app is overriding parameters (issue #503)
+ [UI] Provide a YAML view of resources (issue #396)
+ [UI] Project Role/Token management from UI (issue #548)
+ [UI] App creation wizard should allow specifying source revision (issue #562)
+ [UI] Ability to modify application from UI (issue #615)
+ [UI] indicate when operation is in progress or has failed (issue #566)
- Fix issue where changes were not pulled when tracking a branch (issue #567)
- Lazy enforcement of unknown cluster/namespace restricted resources (issue #599)
- Fix controller hot loop when app source contains bad manifests (issue #568)
- [UI] Fix issue where projects filter does not work when application got changed
- [UI] Creating apps from directories is not obvious (issue #565)
- Helm hooks are being deployed as resources (issue #605)
- Disagreement in three way diff calculation (issue #597)
- SIGSEGV in kube.GetResourcesWithLabel (issue #587)
- ArgoCD fails to deploy resources list (issue #584)
- Branch tracking not working properly (issue #567)
- Controller hot loop when application source has bad manifests (issue #568)
## v0.8.2 (2018-09-12)
- Downgrade ksonnet from v0.12.0 to v0.11.0 due to quote unescape regression
- Fix CLI panic when performing an initial `argocd sync/wait`
## v0.8.1 (2018-09-10)
+ [UI] Support selection of helm values files in App creation wizard (issue #499)
+ [UI] Support specifying source revision in App creation wizard allow (issue #503)
+ [UI] Improve resource diff rendering (issue #457)
+ [UI] Indicate number of ready containers in pod (issue #539)
+ [UI] Indicate when app is overriding parameters (issue #503)
+ [UI] Provide a YAML view of resources (issue #396)
- Fix issue where changes were not pulled when tracking a branch (issue #567)
- Fix controller hot loop when app source contains bad manifests (issue #568)
- [UI] Fix issue where projects filter does not work when application got changed
## v0.8.0 (2018-09-04)
### Notes about upgrading from v0.7

View File

@@ -49,7 +49,7 @@ RUN curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kuberne
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /usr/local/bin/ks
# Option 2: use official tagged ksonnet release
ENV KSONNET_VERSION=0.12.0
ENV KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks
@@ -61,10 +61,14 @@ RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-li
mv /tmp/linux-amd64/helm /usr/local/bin/helm
# Install kustomize
ENV KUSTOMIZE_VERSION=1.0.7
ENV KUSTOMIZE_VERSION=1.0.8
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/kustomize
ENV AWS_IAM_AUTHENTICATOR_VERSION=0.3.0
RUN curl -L -o /usr/local/bin/aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.3.0/heptio-authenticator-aws_${AWS_IAM_AUTHENTICATOR_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/aws-iam-authenticator
####################################################################################################
# ArgoCD Build stage which performs the actual build of ArgoCD binaries
@@ -109,6 +113,7 @@ COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
COPY --from=builder /usr/local/bin/aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=argocd
@@ -123,6 +128,9 @@ RUN ln -s /usr/local/bin/argocd /argocd && \
ln -s /usr/local/bin/argocd-repo-server /argocd-repo-server
USER argocd
RUN helm init --client-only
WORKDIR /home/argocd
ARG BINARY
CMD ${BINARY}

283
Gopkg.lock generated
View File

@@ -43,15 +43,15 @@
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
[[projects]]
digest = "1:26a8fd03a1fb25aa92c58080d8ca76363d56694c148f6175266e0393c0d2e729"
digest = "1:0c024ed5f8ee58bb5bcafcc1d55678cbaec13884a9798eaadfe6ca0d16ef9392"
name = "github.com/argoproj/argo"
packages = [
"pkg/apis/workflow",
"pkg/apis/workflow/v1alpha1",
]
pruneopts = ""
revision = "ac241c95c13f08e868cd6f5ee32c9ce273e239ff"
version = "v2.1.1"
revision = "af636ddd8455660f307d835814d3112b90815dfd"
version = "v2.2.0"
[[projects]]
branch = "master"
@@ -72,6 +72,14 @@
revision = "ccb8e960c48f04d6935e72476ae4a51028f9e22f"
version = "v9"
[[projects]]
branch = "master"
digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = ""
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:79421244ba5848aae4b0a5c41e633a04e4894cb0b164a219dc8c15ec7facb7f1"
name = "github.com/blang/semver"
@@ -157,6 +165,21 @@
revision = "3658237ded108b4134956c1b3050349d93e7b895"
version = "v2.7.1"
[[projects]]
digest = "1:ba7c75e38d81b9cf3e8601c081567be3b71bccca8c11aee5de98871360aa4d7b"
name = "github.com/emirpasic/gods"
packages = [
"containers",
"lists",
"lists/arraylist",
"trees",
"trees/binaryheap",
"utils",
]
pruneopts = ""
revision = "f6c17b524822278a87e3b3bd809fec33b51f5b46"
version = "v1.9.0"
[[projects]]
digest = "1:b13707423743d41665fd23f0c36b2f37bb49c30e94adb813319c44188a51ba22"
name = "github.com/ghodss/yaml"
@@ -290,7 +313,7 @@
version = "v1.11.0"
[[projects]]
digest = "1:0a3f6a0c68ab8f3d455f8892295503b179e571b7fefe47cc6c556405d1f83411"
digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
@@ -314,6 +337,7 @@
"protoc-gen-gofast",
"protoc-gen-gogo/descriptor",
"protoc-gen-gogo/generator",
"protoc-gen-gogo/generator/internal/remap",
"protoc-gen-gogo/grpc",
"protoc-gen-gogo/plugin",
"protoc-gen-gogofast",
@@ -322,8 +346,8 @@
"vanity/command",
]
pruneopts = ""
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
[[projects]]
branch = "master"
@@ -334,8 +358,7 @@
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
branch = "master"
digest = "1:27828cf74799ad14fcafece9f78f350cdbcd4fbe92c14ad4cba256fbbfa328ef"
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
name = "github.com/golang/protobuf"
packages = [
"jsonpb",
@@ -343,6 +366,7 @@
"protoc-gen-go",
"protoc-gen-go/descriptor",
"protoc-gen-go/generator",
"protoc-gen-go/generator/internal/remap",
"protoc-gen-go/grpc",
"protoc-gen-go/plugin",
"ptypes",
@@ -353,10 +377,11 @@
"ptypes/timestamp",
]
pruneopts = ""
revision = "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
digest = "1:b3679b7aa6d7243a170d5f43810e0f4a7c3a2120340069f7a077625c51c83fd9"
digest = "1:14d826ee25139b4674e9768ac287a135f4e7c14e1134a5b15e4e152edfd49f41"
name = "github.com/google/go-jsonnet"
packages = [
".",
@@ -364,7 +389,7 @@
"parser",
]
pruneopts = ""
revision = "v0.11.2"
revision = "dfddf2b4e3aec377b0dcdf247ff92e7d078b8179"
[[projects]]
branch = "master"
@@ -458,6 +483,14 @@
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
digest = "1:95abc4eba158a39873bd4fabdee576d0ae13826b550f8b710881d80ae4093a0f"
name = "github.com/jbenet/go-context"
packages = ["io"]
pruneopts = ""
revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4"
[[projects]]
digest = "1:dd5cdbd84daf24b2a009364f3c24859b1e4de1eab87c451fb3bce09935d909fc"
name = "github.com/json-iterator/go"
@@ -475,7 +508,15 @@
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
digest = "1:f4975a8aa19d1a4e512cfebf9a2c3751faedd8939be80d193d157463b47eb334"
digest = "1:41e0bed5df4f9fd04c418bf9b6b7179b3671e416ad6175332601ca1c8dc74606"
name = "github.com/kevinburke/ssh_config"
packages = ["."]
pruneopts = ""
revision = "81db2a75821ed34e682567d48be488a1c3121088"
version = "0.5"
[[projects]]
digest = "1:2fe45da14d25bce0a58c5a991967149cc5d07f94be327b928a9fd306466815a3"
name = "github.com/ksonnet/ksonnet"
packages = [
"metadata/params",
@@ -491,11 +532,10 @@
"pkg/util/jsonnet",
"pkg/util/kslib",
"pkg/util/strings",
"pkg/util/version",
]
pruneopts = ""
revision = "5b4917a292a76a62e3d97852279575293ba73b50"
version = "v0.12.0"
revision = "e943ae55d4fe256c8330a047ce8426ad9dac110c"
version = "v0.11.0"
[[projects]]
digest = "1:a345c560e5609bd71b1f54993f3b087ca45eb0e6226886c642ce519de81896cb"
@@ -525,6 +565,22 @@
pruneopts = ""
revision = "32fa128f234d041f196a9f3e0fea5ac9772c08e1"
[[projects]]
digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = ""
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = ""
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:eb9117392ee8e7aa44f78e0db603f70b1050ee0ebda4bd40040befb5b218c546"
@@ -541,6 +597,14 @@
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
digest = "1:049b5bee78dfdc9628ee0e557219c41f683e5b06c5a5f20eaba0105ccc586689"
name = "github.com/pelletier/go-buffruneio"
packages = ["."]
pruneopts = ""
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
version = "v0.2.0"
[[projects]]
digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
name = "github.com/pkg/errors"
@@ -568,6 +632,50 @@
pruneopts = ""
revision = "525d0eb5f91d30e3b1548de401b7ef9ea6898520"
[[projects]]
digest = "1:9d34d575593e3dd27bbd119138ba009ef1535a0df2aad7259e1dd5aed7405eea"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/internal",
"prometheus/promhttp",
]
pruneopts = ""
revision = "7858729281ec582767b20e0d696b6041d995d5e0"
[[projects]]
branch = "master"
digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = ""
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
[[projects]]
branch = "master"
digest = "1:f477ef7b65d94fb17574fc6548cef0c99a69c1634ea3b6da248b63a61ebe0498"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = ""
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
[[projects]]
branch = "master"
digest = "1:e04aaa0e8f8da0ed3d6c0700bd77eda52a47f38510063209d72d62f0ef807d5e"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs",
]
pruneopts = ""
revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
[[projects]]
branch = "master"
digest = "1:1ee3e3e12ffdb5ba70b918148685cab6340bbc0d03ba723bcb46062d1bea69c6"
@@ -632,6 +740,19 @@
pruneopts = ""
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
[[projects]]
digest = "1:b1861b9a1aa0801b0b62945ed7477c1ab61a4bd03b55dfbc27f6d4f378110c8c"
name = "github.com/src-d/gcfg"
packages = [
".",
"scanner",
"token",
"types",
]
pruneopts = ""
revision = "f187355171c936ac84a82793659ebb4936bc1c23"
version = "v1.3.0"
[[projects]]
digest = "1:306417ea2f31ea733df356a2b895de63776b6a5107085b33458e5cd6eb1d584d"
name = "github.com/stretchr/objx"
@@ -662,6 +783,14 @@
revision = "a053f3dac71df214bfe8b367f34220f0029c9c02"
version = "v3.3.1"
[[projects]]
digest = "1:afc0b8068986a01e2d8f449917829753a54f6bd4d1265c2b4ad9cba75560020f"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
pruneopts = ""
revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c"
version = "v0.2.0"
[[projects]]
digest = "1:529ed3f98838f69e13761788d0cc71b44e130058fab13bae2ce09f7a176bced4"
name = "github.com/yudai/gojsondiff"
@@ -688,8 +817,21 @@
packages = [
"bcrypt",
"blowfish",
"cast5",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"openpgp",
"openpgp/armor",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"poly1305",
"ssh",
"ssh/agent",
"ssh/knownhosts",
"ssh/terminal",
]
pruneopts = ""
@@ -727,22 +869,14 @@
[[projects]]
branch = "master"
digest = "1:8aad4e360d6645abe564e925bd6d8d3b94975e52ce68af0c28f91b5aedb0637f"
name = "golang.org/x/sync"
packages = ["errgroup"]
pruneopts = ""
revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5"
[[projects]]
branch = "master"
digest = "1:407b5f905024dd94ee08c1777fabb380fb3d380f92a7f7df2592be005337eeb3"
digest = "1:ed900376500543ca05f2a2383e1f541b4606f19cd22f34acb81b17a0b90c7f3e"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = ""
revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd"
revision = "d0be0721c37eeb5299f245a996a483160fc36940"
[[projects]]
branch = "master"
@@ -781,8 +915,6 @@
digest = "1:77e1d6ed91936b206979806b0aacbf817ec54b840803d8f8cd7a1de5bfbf92a4"
name = "golang.org/x/tools"
packages = [
"cmd/cover",
"cover",
"go/ast/astutil",
"imports",
]
@@ -821,7 +953,7 @@
revision = "2b5a72b8730b0b16380010cfe5286c42108d88e7"
[[projects]]
digest = "1:d2dc833c73202298c92b63a7e180e2b007b5a3c3c763e3b9fe1da249b5c7f5b9"
digest = "1:15656947b87a6a240e61dcfae9e71a55a8d5677f240d12ab48f02cdbabf1e309"
name = "google.golang.org/grpc"
packages = [
".",
@@ -833,9 +965,13 @@
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/transport",
"keepalive",
"metadata",
"naming",
@@ -848,11 +984,10 @@
"stats",
"status",
"tap",
"transport",
]
pruneopts = ""
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
version = "v1.10.0"
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1"
version = "v1.15.0"
[[projects]]
digest = "1:bf7444e1e6a36e633f4f1624a67b9e4734cfb879c27ac0a2082ac16aff8462ac"
@@ -898,6 +1033,77 @@
revision = "76dd09796242edb5b897103a75df2645c028c960"
version = "v2.1.6"
[[projects]]
digest = "1:c8f3ff1edaf7208bf7633e5952ffb8d697552343f8010aee12427400b434ae63"
name = "gopkg.in/src-d/go-billy.v4"
packages = [
".",
"helper/chroot",
"helper/polyfill",
"osfs",
"util",
]
pruneopts = ""
revision = "59952543636f55de3f860b477b615093d5c2c3e4"
version = "v4.2.1"
[[projects]]
digest = "1:a77c60b21f224f0ddc9c7e9407008c6e7dfbca88e5a6e827aa27ecf80497ebb6"
name = "gopkg.in/src-d/go-git.v4"
packages = [
".",
"config",
"internal/revision",
"plumbing",
"plumbing/cache",
"plumbing/filemode",
"plumbing/format/config",
"plumbing/format/diff",
"plumbing/format/gitignore",
"plumbing/format/idxfile",
"plumbing/format/index",
"plumbing/format/objfile",
"plumbing/format/packfile",
"plumbing/format/pktline",
"plumbing/object",
"plumbing/protocol/packp",
"plumbing/protocol/packp/capability",
"plumbing/protocol/packp/sideband",
"plumbing/revlist",
"plumbing/storer",
"plumbing/transport",
"plumbing/transport/client",
"plumbing/transport/file",
"plumbing/transport/git",
"plumbing/transport/http",
"plumbing/transport/internal/common",
"plumbing/transport/server",
"plumbing/transport/ssh",
"storage",
"storage/filesystem",
"storage/filesystem/dotgit",
"storage/memory",
"utils/binary",
"utils/diff",
"utils/ioutil",
"utils/merkletrie",
"utils/merkletrie/filesystem",
"utils/merkletrie/index",
"utils/merkletrie/internal/frame",
"utils/merkletrie/noder",
]
pruneopts = ""
revision = "d3cec13ac0b195bfb897ed038a08b5130ab9969e"
version = "v4.7.0"
[[projects]]
digest = "1:ceec7e96590fb8168f36df4795fefe17051d4b0c2acc7ec4e260d8138c4dafac"
name = "gopkg.in/warnings.v0"
packages = ["."]
pruneopts = ""
revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
version = "v0.1.2"
[[projects]]
digest = "1:81314a486195626940617e43740b4fa073f265b0715c9f54ce2027fee1cb5f61"
name = "gopkg.in/yaml.v2"
@@ -1257,6 +1463,7 @@
"github.com/golang/protobuf/proto",
"github.com/golang/protobuf/protoc-gen-go",
"github.com/golang/protobuf/ptypes/empty",
"github.com/google/go-jsonnet",
"github.com/grpc-ecosystem/go-grpc-middleware",
"github.com/grpc-ecosystem/go-grpc-middleware/auth",
"github.com/grpc-ecosystem/go-grpc-middleware/logging",
@@ -1270,6 +1477,8 @@
"github.com/ksonnet/ksonnet/pkg/component",
"github.com/patrickmn/go-cache",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/qiangmzsx/string-adapter",
"github.com/sirupsen/logrus",
"github.com/skratchdot/open-golang/open",
@@ -1283,11 +1492,10 @@
"github.com/yudai/gojsondiff",
"github.com/yudai/gojsondiff/formatter",
"golang.org/x/crypto/bcrypt",
"golang.org/x/crypto/ssh",
"golang.org/x/crypto/ssh/terminal",
"golang.org/x/net/context",
"golang.org/x/oauth2",
"golang.org/x/sync/errgroup",
"golang.org/x/tools/cmd/cover",
"google.golang.org/genproto/googleapis/api/annotations",
"google.golang.org/grpc",
"google.golang.org/grpc/codes",
@@ -1300,6 +1508,13 @@
"gopkg.in/go-playground/webhooks.v3/bitbucket",
"gopkg.in/go-playground/webhooks.v3/github",
"gopkg.in/go-playground/webhooks.v3/gitlab",
"gopkg.in/src-d/go-git.v4",
"gopkg.in/src-d/go-git.v4/config",
"gopkg.in/src-d/go-git.v4/plumbing",
"gopkg.in/src-d/go-git.v4/plumbing/transport",
"gopkg.in/src-d/go-git.v4/plumbing/transport/http",
"gopkg.in/src-d/go-git.v4/plumbing/transport/ssh",
"gopkg.in/src-d/go-git.v4/storage/memory",
"k8s.io/api/apps/v1",
"k8s.io/api/apps/v1beta1",
"k8s.io/api/apps/v1beta2",

View File

@@ -1,29 +1,43 @@
# Packages should only be added to the following list when we use them *outside* of our go code.
# (e.g. we want to build the binary to invoke as part of the build process, such as in
# generate-proto.sh). Normal use of golang packages should be added via `dep ensure`, and pinned
# with a [[constraint]] or [[override]] when version is important.
required = [
"github.com/golang/protobuf/protoc-gen-go",
"github.com/gogo/protobuf/protoc-gen-gofast",
"github.com/gogo/protobuf/protoc-gen-gogofast",
"golang.org/x/sync/errgroup",
"k8s.io/code-generator/cmd/go-to-protobuf",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
"github.com/golang/protobuf/protoc-gen-go",
"golang.org/x/tools/cmd/cover",
"github.com/argoproj/pkg/time",
"github.com/dustin/go-humanize",
]
[[constraint]]
name = "google.golang.org/grpc"
version = "1.9.2"
version = "1.15.0"
[[constraint]]
name = "github.com/gogo/protobuf"
version = "1.1.1"
# override github.com/grpc-ecosystem/go-grpc-middleware's constraint on master
[[override]]
name = "github.com/golang/protobuf"
version = "1.2.0"
[[constraint]]
name = "github.com/grpc-ecosystem/grpc-gateway"
version = "v1.3.1"
# override argo outdated dependency
[[override]]
# prometheus does not believe in semversioning yet
[[constraint]]
name = "github.com/prometheus/client_golang"
revision = "7858729281ec582767b20e0d696b6041d995d5e0"
[[constraint]]
branch = "release-1.10"
name = "k8s.io/api"
# override ksonnet dependency
[[override]]
branch = "release-1.10"
name = "k8s.io/apimachinery"
@@ -36,7 +50,7 @@ required = [
branch = "release-1.10"
name = "k8s.io/code-generator"
[[override]]
[[constraint]]
branch = "release-7.0"
name = "k8s.io/client-go"
@@ -46,7 +60,7 @@ required = [
[[constraint]]
name = "github.com/ksonnet/ksonnet"
version = "v0.12.0"
version = "v0.11.0"
[[constraint]]
name = "github.com/gobuffalo/packr"

View File

@@ -78,9 +78,8 @@ cli-darwin: clean-debug
argocd-util: clean-debug
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
.PHONY: install-manifest
install-manifest:
if [ "${IMAGE_NAMESPACE}" = "" ] ; then echo "IMAGE_NAMESPACE must be set to build install manifest" ; exit 1 ; fi
.PHONY: manifests
manifests:
./hack/update-manifests.sh
.PHONY: server
@@ -149,9 +148,10 @@ clean: clean-debug
precheckin: test lint
.PHONY: release-precheck
release-precheck: install-manifest
release-precheck: manifests
@if [ "$(GIT_TREE_STATE)" != "clean" ]; then echo 'git tree state is $(GIT_TREE_STATE)' ; exit 1; fi
@if [ -z "$(GIT_TAG)" ]; then echo 'commit must be tagged to perform release' ; exit 1; fi
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
.PHONY: release
release: release-precheck precheckin cli-darwin cli-linux server-image controller-image repo-server-image cli-image

View File

@@ -1,4 +1,4 @@
controller: go run ./cmd/argocd-application-controller/main.go
api-server: go run ./cmd/argocd-server/main.go --insecure
api-server: go run ./cmd/argocd-server/main.go --insecure --disable-auth
repo-server: go run ./cmd/argocd-repo-server/main.go --loglevel debug
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -p 5557:5557 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/coreos/dex:v2.10.0 serve /dex.yaml"

View File

@@ -23,14 +23,21 @@ is provided for additional features.
Argo CD follows the **GitOps** pattern of using git repositories as the source of truth for defining
the desired application state. Kubernetes manifests can be specified in several ways:
* [ksonnet](https://ksonnet.io) applications
* [kustomize](https://kustomize.io) applications
* [helm](https://helm.sh) charts
* Simple directory of YAML/json manifests
* Plain directory of YAML/json manifests
Argo CD automates the deployment of the desired application states in the specified target environments.
Application deployments can track updates to branches, tags, or pinned to a specific version of
manifests at a git commit. See [tracking strategies](docs/tracking_strategies.md) for additional
details about the different tracking strategies available.
For a quick 10 minute overview of ArgoCD, check out the demo presented to the Sig Apps community
meeting:
[![Alt text](https://img.youtube.com/vi/aWDIQMbp1cc/0.jpg)](https://youtu.be/aWDIQMbp1cc?t=1m4s)
## Architecture
![Argo CD Architecture](docs/argocd_architecture.png)
@@ -48,6 +55,7 @@ For additional details, see [architecture overview](docs/architecture.md).
## Features
* Automated deployment of applications to specified target environments
* Flexibility in support for multiple config management tools (Ksonnet, Kustomize, Helm, plain-YAML)
* Continuous monitoring of deployed applications
* Automated or manual syncing of applications to its desired state
* Web and CLI based visualization of applications and differences between live vs. desired state
@@ -58,13 +66,12 @@ For additional details, see [architecture overview](docs/architecture.md).
* PreSync, Sync, PostSync hooks to support complex application rollouts (e.g.blue/green & canary upgrades)
* Audit trails for application events and API calls
* Parameter overrides for overriding ksonnet/helm parameters in git
* Service account/access key management for CI pipelines
## Development Status
* Argo CD is being used in production to deploy SaaS services at Intuit
## Roadmap
* Auto-sync toggle to directly apply git state changes to live state
* Service account/access key management for CI pipelines
* Support for additional config management tools (Kustomize?)
* Revamped UI, and feature parity with CLI
* Customizable application actions

View File

@@ -1 +1 @@
0.8.0
0.9.2

View File

@@ -12,6 +12,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// load the oidc plugin (required to authenticate with OpenID Connect).
@@ -23,7 +24,6 @@ import (
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/stats"
)
@@ -66,25 +66,14 @@ func newCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
// TODO (amatyushentsev): Use config map to store controller configuration
controllerConfig := controller.ApplicationControllerConfig{
Namespace: namespace,
InstanceID: "",
}
db := db.NewDB(namespace, kubeClient)
resyncDuration := time.Duration(appResyncPeriod) * time.Second
repoClientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
appStateManager := controller.NewAppStateManager(db, appClient, repoClientset, namespace)
appController := controller.NewApplicationController(
namespace,
kubeClient,
appClient,
repoClientset,
db,
appStateManager,
resyncDuration,
&controllerConfig)
resyncDuration)
secretController := controller.NewSecretController(kubeClient, repoClientset, resyncDuration, namespace)
ctx, cancel := context.WithCancel(context.Background())

View File

@@ -17,6 +17,7 @@ import (
"github.com/argoproj/argo-cd/util/git"
"github.com/argoproj/argo-cd/util/ksonnet"
"github.com/argoproj/argo-cd/util/stats"
"github.com/argoproj/argo-cd/util/tls"
)
const (
@@ -27,7 +28,8 @@ const (
func newCommand() *cobra.Command {
var (
logLevel string
logLevel string
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
)
var command = cobra.Command{
Use: cliName,
@@ -37,7 +39,11 @@ func newCommand() *cobra.Command {
errors.CheckError(err)
log.SetLevel(level)
server := reposerver.NewServer(git.NewFactory(), newCache())
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
errors.CheckError(err)
server, err := reposerver.NewServer(git.NewFactory(), newCache(), tlsConfigCustomizer)
errors.CheckError(err)
grpc := server.CreateGRPC()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
errors.CheckError(err)
@@ -57,6 +63,7 @@ func newCommand() *cobra.Command {
}
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
return &command
}

View File

@@ -17,18 +17,20 @@ import (
"github.com/argoproj/argo-cd/server"
"github.com/argoproj/argo-cd/util/cli"
"github.com/argoproj/argo-cd/util/stats"
"github.com/argoproj/argo-cd/util/tls"
)
// NewCommand returns a new instance of an argocd command
func NewCommand() *cobra.Command {
var (
insecure bool
logLevel string
glogLevel int
clientConfig clientcmd.ClientConfig
staticAssetsDir string
repoServerAddress string
disableAuth bool
insecure bool
logLevel string
glogLevel int
clientConfig clientcmd.ClientConfig
staticAssetsDir string
repoServerAddress string
disableAuth bool
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
)
var command = &cobra.Command{
Use: cliName,
@@ -50,18 +52,22 @@ func NewCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
tlsConfigCustomizer, err := tlsConfigCustomizerSrc()
errors.CheckError(err)
kubeclientset := kubernetes.NewForConfigOrDie(config)
appclientset := appclientset.NewForConfigOrDie(config)
repoclientset := reposerver.NewRepositoryServerClientset(repoServerAddress)
argoCDOpts := server.ArgoCDServerOpts{
Insecure: insecure,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DisableAuth: disableAuth,
Insecure: insecure,
Namespace: namespace,
StaticAssetsDir: staticAssetsDir,
KubeClientset: kubeclientset,
AppClientset: appclientset,
RepoClientset: repoclientset,
DisableAuth: disableAuth,
TLSConfigCustomizer: tlsConfigCustomizer,
}
stats.RegisterStackDumper()
@@ -86,5 +92,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&repoServerAddress, "repo-server", "localhost:8081", "Repo server address.")
command.Flags().BoolVar(&disableAuth, "disable-auth", false, "Disable client authentication")
command.AddCommand(cli.NewVersionCmd(cliName))
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
return command
}

View File

@@ -366,7 +366,8 @@ func NewSettingsCommand() *cobra.Command {
errors.CheckError(err)
settingsMgr := settings.NewSettingsManager(kubeclientset, namespace)
_ = settings.UpdateSettings(superuserPassword, settingsMgr, updateSignature, updateSuperuser, namespace)
_, err = settings.UpdateSettings(superuserPassword, settingsMgr, updateSignature, updateSuperuser, namespace)
errors.CheckError(err)
},
}
command.Flags().BoolVar(&updateSuperuser, "update-superuser", false, "force updating the superuser password")

View File

@@ -50,7 +50,9 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
fmt.Print("\n")
}
if newPassword == "" {
newPassword = settings.ReadAndConfirmPassword()
var err error
newPassword, err = settings.ReadAndConfirmPassword()
errors.CheckError(err)
}
updatePasswordRequest := account.UpdatePasswordRequest{

View File

@@ -119,6 +119,18 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
if len(appOpts.valuesFiles) > 0 {
app.Spec.Source.ValuesFiles = appOpts.valuesFiles
}
switch appOpts.syncPolicy {
case "automated":
app.Spec.SyncPolicy = &argoappv1.SyncPolicy{
Automated: &argoappv1.SyncPolicyAutomated{
Prune: appOpts.autoPrune,
},
}
case "none", "":
app.Spec.SyncPolicy = nil
default:
log.Fatalf("Invalid sync-policy: %s", appOpts.syncPolicy)
}
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
defer util.Close(conn)
appCreateRequest := application.ApplicationCreateRequest{
@@ -182,6 +194,16 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
if len(app.Spec.Source.ValuesFiles) > 0 {
fmt.Printf(printOpFmtStr, "Helm Values:", strings.Join(app.Spec.Source.ValuesFiles, ","))
}
var syncPolicy string
if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil {
syncPolicy = "Automated"
if app.Spec.SyncPolicy.Automated.Prune {
syncPolicy += " (Prune)"
}
} else {
syncPolicy = "<none>"
}
fmt.Printf(printOpFmtStr, "Sync Policy:", syncPolicy)
if len(app.Status.Conditions) > 0 {
fmt.Println()
@@ -313,6 +335,17 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
app.Spec.Destination.Namespace = appOpts.destNamespace
case "project":
app.Spec.Project = appOpts.project
case "sync-policy":
switch appOpts.syncPolicy {
case "automated":
app.Spec.SyncPolicy = &argoappv1.SyncPolicy{
Automated: &argoappv1.SyncPolicyAutomated{},
}
case "none":
app.Spec.SyncPolicy = nil
default:
log.Fatalf("Invalid sync-policy: %s", appOpts.syncPolicy)
}
}
})
if visited == 0 {
@@ -320,6 +353,13 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
c.HelpFunc()(c, args)
os.Exit(1)
}
if c.Flags().Changed("auto-prune") {
if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil {
log.Fatal("Cannot set --auto-prune: application not configured with automatic sync")
}
app.Spec.SyncPolicy.Automated.Prune = appOpts.autoPrune
}
setParameterOverrides(app, appOpts.parameters)
oldOverrides := app.Spec.Source.ComponentParameterOverrides
updatedSpec, err := appIf.UpdateSpec(context.Background(), &application.ApplicationUpdateSpecRequest{
@@ -358,6 +398,8 @@ type appOptions struct {
parameters []string
valuesFiles []string
project string
syncPolicy string
autoPrune bool
}
func addAppFlags(command *cobra.Command, opts *appOptions) {
@@ -370,6 +412,8 @@ func addAppFlags(command *cobra.Command, opts *appOptions) {
command.Flags().StringArrayVarP(&opts.parameters, "parameter", "p", []string{}, "set a parameter override (e.g. -p guestbook=image=example/guestbook:latest)")
command.Flags().StringArrayVar(&opts.valuesFiles, "values", []string{}, "Helm values file(s) to use")
command.Flags().StringVar(&opts.project, "project", "", "Application project name")
command.Flags().StringVar(&opts.syncPolicy, "sync-policy", "", "Set the sync policy (one of: automated, none)")
command.Flags().BoolVar(&opts.autoPrune, "auto-prune", false, "Set automatic pruning when sync is automated")
}
// NewApplicationUnsetCommand returns a new instance of an `argocd app unset` command
@@ -911,10 +955,12 @@ func calculateResourceStates(app *argoappv1.Application) map[string]*resourceSta
}
var opResult *argoappv1.SyncOperationResult
if app.Status.OperationState.SyncResult != nil {
opResult = app.Status.OperationState.SyncResult
} else if app.Status.OperationState.RollbackResult != nil {
opResult = app.Status.OperationState.SyncResult
if app.Status.OperationState != nil {
if app.Status.OperationState.SyncResult != nil {
opResult = app.Status.OperationState.SyncResult
} else if app.Status.OperationState.RollbackResult != nil {
opResult = app.Status.OperationState.SyncResult
}
}
if opResult == nil {
return resStates

View File

@@ -44,8 +44,10 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
// NewClusterAddCommand returns a new instance of an `argocd cluster add` command
func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
var (
inCluster bool
upsert bool
inCluster bool
upsert bool
awsRoleArn string
awsClusterName string
)
var command = &cobra.Command{
Use: "add",
@@ -63,6 +65,7 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
if clstContext == nil {
log.Fatalf("Context %s does not exist in kubeconfig", args[0])
}
overrides := clientcmd.ConfigOverrides{
Context: *clstContext,
}
@@ -70,15 +73,23 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
conf, err := clientConfig.ClientConfig()
errors.CheckError(err)
// Install RBAC resources for managing the cluster
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
managerBearerToken, err := common.InstallClusterManagerRBAC(clientset)
errors.CheckError(err)
managerBearerToken := ""
var awsAuthConf *argoappv1.AWSAuthConfig
if awsClusterName != "" {
awsAuthConf = &argoappv1.AWSAuthConfig{
ClusterName: awsClusterName,
RoleARN: awsRoleArn,
}
} else {
// Install RBAC resources for managing the cluster
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
managerBearerToken, err = common.InstallClusterManagerRBAC(clientset)
errors.CheckError(err)
}
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
defer util.Close(conn)
clst := NewCluster(args[0], conf, managerBearerToken)
clst := NewCluster(args[0], conf, managerBearerToken, awsAuthConf)
if inCluster {
clst.Server = common.KubernetesInternalAPIServerAddr
}
@@ -94,6 +105,8 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file")
command.Flags().BoolVar(&inCluster, "in-cluster", false, "Indicates ArgoCD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs")
command.Flags().StringVar(&awsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws-iam-authenticator will be used to access cluster")
command.Flags().StringVar(&awsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.")
return command
}
@@ -136,7 +149,7 @@ func printKubeContexts(ca clientcmd.ConfigAccess) {
}
}
func NewCluster(name string, conf *rest.Config, managerBearerToken string) *argoappv1.Cluster {
func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAuthConf *argoappv1.AWSAuthConfig) *argoappv1.Cluster {
tlsClientConfig := argoappv1.TLSClientConfig{
Insecure: conf.TLSClientConfig.Insecure,
ServerName: conf.TLSClientConfig.ServerName,
@@ -165,6 +178,7 @@ func NewCluster(name string, conf *rest.Config, managerBearerToken string) *argo
Config: argoappv1.ClusterConfig{
BearerToken: managerBearerToken,
TLSClientConfig: tlsClientConfig,
AWSAuthConfig: awsAuthConf,
},
}
return &clst

View File

@@ -76,6 +76,10 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
command.AddCommand(NewProjectRemoveDestinationCommand(clientOpts))
command.AddCommand(NewProjectAddSourceCommand(clientOpts))
command.AddCommand(NewProjectRemoveSourceCommand(clientOpts))
command.AddCommand(NewProjectAllowClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyClusterResourceCommand(clientOpts))
command.AddCommand(NewProjectAllowNamespaceResourceCommand(clientOpts))
command.AddCommand(NewProjectDenyNamespaceResourceCommand(clientOpts))
return command
}
@@ -603,6 +607,104 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
return command
}
func modifyProjectResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, action func(proj *v1alpha1.AppProject, group string, kind string) bool) *cobra.Command {
return &cobra.Command{
Use: cmdUse,
Short: cmdDesc,
Run: func(c *cobra.Command, args []string) {
if len(args) != 3 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName, group, kind := args[0], args[1], args[2]
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
defer util.Close(conn)
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
errors.CheckError(err)
if action(proj, group, kind) {
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
}
},
}
}
// NewProjectAllowNamespaceResourceCommand returns a new instance of an `deny-cluster-resources` command
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-namespace-resource PROJECT group kind"
desc := "Removes namespaced resource from black list"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.NamespaceResourceBlacklist {
if item.Group == group && item.Kind == kind {
index = i
break
}
}
if index == -1 {
log.Info("Specified cluster resource is not blacklisted")
return false
}
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist[:index], proj.Spec.NamespaceResourceBlacklist[index+1:]...)
return true
})
}
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-namespace-resource PROJECT group kind"
desc := "Adds namespaced resource to black list"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.NamespaceResourceBlacklist {
if item.Group == group && item.Kind == kind {
log.Infof("Group '%s' and kind '%s' are already blacklisted in project", item.Group, item.Kind)
return false
}
}
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist, v1.GroupKind{Group: group, Kind: kind})
return true
})
}
// NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-cluster-resource PROJECT group kind"
desc := "Adds cluster wide resource to white list"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
index := -1
for i, item := range proj.Spec.ClusterResourceWhitelist {
if item.Group == group && item.Kind == kind {
index = i
break
}
}
if index == -1 {
log.Info("Specified cluster resource already denied in project")
return false
}
proj.Spec.ClusterResourceWhitelist = append(proj.Spec.ClusterResourceWhitelist[:index], proj.Spec.ClusterResourceWhitelist[index+1:]...)
return true
})
}
// NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-cluster-resource PROJECT group kind"
desc := "Removed cluster wide resource from white list"
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
for _, item := range proj.Spec.ClusterResourceWhitelist {
if item.Group == group && item.Kind == kind {
log.Infof("Group '%s' and kind '%s' are already whitelisted in project", item.Group, item.Kind)
return false
}
}
proj.Spec.ClusterResourceWhitelist = append(proj.Spec.ClusterResourceWhitelist, v1.GroupKind{Group: group, Kind: kind})
return true
})
}
// NewProjectRemoveSourceCommand returns a new instance of an `argocd proj remove-src` command
func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
@@ -677,9 +779,9 @@ func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
projects, err := projIf.List(context.Background(), &project.ProjectQuery{})
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\n")
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\n")
for _, p := range projects.Items {
fmt.Fprintf(w, "%s\t%s\t%v\n", p.Name, p.Spec.Description, p.Spec.Destinations)
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, p.Spec.Destinations, p.Spec.SourceRepos, p.Spec.ClusterResourceWhitelist, p.Spec.NamespaceResourceBlacklist)
}
_ = w.Flush()
},

View File

@@ -87,7 +87,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
}
command.Flags().StringVar(&repo.Username, "username", "", "username to the repository")
command.Flags().StringVar(&repo.Password, "password", "", "password to the repository")
command.Flags().StringVar(&sshPrivateKeyPath, "sshPrivateKeyPath", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().StringVar(&sshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
return command
}

View File

@@ -102,4 +102,8 @@ var ArgoCDManagerPolicyRules = []rbacv1.PolicyRule{
Resources: []string{"*"},
Verbs: []string{"*"},
},
{
NonResourceURLs: []string{"*"},
Verbs: []string{"*"},
},
}

View File

@@ -14,9 +14,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
@@ -46,6 +43,7 @@ const (
type ApplicationController struct {
namespace string
kubeClientset kubernetes.Interface
kubectl kube.Kubectl
applicationClientset appclientset.Interface
auditLogger *argo.AuditLogger
appRefreshQueue workqueue.RateLimitingInterface
@@ -70,28 +68,28 @@ func NewApplicationController(
kubeClientset kubernetes.Interface,
applicationClientset appclientset.Interface,
repoClientset reposerver.Clientset,
db db.ArgoDB,
appStateManager AppStateManager,
appResyncPeriod time.Duration,
config *ApplicationControllerConfig,
) *ApplicationController {
appRefreshQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
appOperationQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
return &ApplicationController{
db := db.NewDB(namespace, kubeClientset)
kubectlCmd := kube.KubectlCmd{}
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd)
ctrl := ApplicationController{
namespace: namespace,
kubeClientset: kubeClientset,
kubectl: kubectlCmd,
applicationClientset: applicationClientset,
repoClientset: repoClientset,
appRefreshQueue: appRefreshQueue,
appOperationQueue: appOperationQueue,
appRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
appOperationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
appStateManager: appStateManager,
appInformer: newApplicationInformer(applicationClientset, appRefreshQueue, appOperationQueue, appResyncPeriod, config),
db: db,
statusRefreshTimeout: appResyncPeriod,
forceRefreshApps: make(map[string]bool),
forceRefreshAppsMutex: &sync.Mutex{},
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "application-controller"),
}
ctrl.appInformer = ctrl.newApplicationInformer()
return &ctrl
}
// Run starts the Application CRD controller.
@@ -100,13 +98,14 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
defer ctrl.appRefreshQueue.ShutDown()
go ctrl.appInformer.Run(ctx.Done())
go ctrl.watchAppsResources()
if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced) {
log.Error("Timed out waiting for caches to sync")
return
}
go ctrl.watchAppsResources()
for i := 0; i < statusProcessors; i++ {
go wait.Until(func() {
for ctrl.processAppRefreshQueueItem() {
@@ -142,8 +141,13 @@ func (ctrl *ApplicationController) isRefreshForced(appName string) bool {
// watchClusterResources watches for resource changes annotated with application label on specified cluster and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchClusterResources(ctx context.Context, item appv1.Cluster) {
config := item.RESTConfig()
retryUntilSucceed(func() error {
retryUntilSucceed(func() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("Recovered from panic: %v\n", r)
}
}()
config := item.RESTConfig()
ch, err := kube.WatchResourcesWithLabel(ctx, config, "", common.LabelApplicationName)
if err != nil {
return err
@@ -160,26 +164,68 @@ func (ctrl *ApplicationController) watchClusterResources(ctx context.Context, it
}
}
return fmt.Errorf("resource updates channel has closed")
}, fmt.Sprintf("watch app resources on %s", config.Host), ctx, watchResourcesRetryTimeout)
}, fmt.Sprintf("watch app resources on %s", item.Server), ctx, watchResourcesRetryTimeout)
}
func isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool {
for _, obj := range apps {
if app, ok := obj.(*appv1.Application); ok && app.Spec.Destination.Server == cluster.Server {
return true
}
}
return false
}
// WatchAppsResources watches for resource changes annotated with application label on all registered clusters and schedule corresponding app refresh.
func (ctrl *ApplicationController) watchAppsResources() {
watchingClusters := make(map[string]context.CancelFunc)
watchingClusters := make(map[string]struct {
cancel context.CancelFunc
cluster *appv1.Cluster
})
retryUntilSucceed(func() error {
return ctrl.db.WatchClusters(context.Background(), func(event *db.ClusterEvent) {
cancel, ok := watchingClusters[event.Cluster.Server]
if event.Type == watch.Deleted && ok {
cancel()
clusterEventCallback := func(event *db.ClusterEvent) {
info, ok := watchingClusters[event.Cluster.Server]
hasApps := isClusterHasApps(ctrl.appInformer.GetStore().List(), event.Cluster)
// cluster resources must be watched only if cluster has at least one app
if (event.Type == watch.Deleted || !hasApps) && ok {
info.cancel()
delete(watchingClusters, event.Cluster.Server)
} else if event.Type != watch.Deleted && !ok {
} else if event.Type != watch.Deleted && !ok && hasApps {
ctx, cancel := context.WithCancel(context.Background())
watchingClusters[event.Cluster.Server] = cancel
watchingClusters[event.Cluster.Server] = struct {
cancel context.CancelFunc
cluster *appv1.Cluster
}{
cancel: cancel,
cluster: event.Cluster,
}
go ctrl.watchClusterResources(ctx, *event.Cluster)
}
})
}
onAppModified := func(obj interface{}) {
if app, ok := obj.(*appv1.Application); ok {
var cluster *appv1.Cluster
info, infoOk := watchingClusters[app.Spec.Destination.Server]
if infoOk {
cluster = info.cluster
} else {
cluster, _ = ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
}
if cluster != nil {
// trigger cluster event every time when app created/deleted to either start or stop watching resources
clusterEventCallback(&db.ClusterEvent{Cluster: cluster, Type: watch.Modified})
}
}
}
ctrl.appInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{AddFunc: onAppModified, DeleteFunc: onAppModified})
return ctrl.db.WatchClusters(context.Background(), clusterEventCallback)
}, "watch clusters", context.Background(), watchResourcesRetryTimeout)
<-context.Background().Done()
@@ -252,12 +298,13 @@ func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext b
}
func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application) {
log.Infof("Deleting resources for application %s", app.Name)
logCtx := log.WithField("application", app.Name)
logCtx.Infof("Deleting resources")
// Get refreshed application info, since informer app copy might be stale
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(app.Name, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
log.Errorf("Unable to get refreshed application info prior deleting resources: %v", err)
logCtx.Errorf("Unable to get refreshed application info prior deleting resources: %v", err)
}
return
}
@@ -281,14 +328,14 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
}
}
if err != nil {
log.Errorf("Unable to delete application resources: %v", err)
ctrl.setAppCondition(app, appv1.ApplicationCondition{
Type: appv1.ApplicationConditionDeletionError,
Message: err.Error(),
})
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonStatusRefreshed, Action: "refresh_status"}, v1.EventTypeWarning)
message := fmt.Sprintf("Unable to delete application resources: %v", err)
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonStatusRefreshed, Type: v1.EventTypeWarning}, message)
} else {
log.Infof("Successfully deleted resources for application %s", app.Name)
logCtx.Info("Successfully deleted resources")
}
}
@@ -320,11 +367,12 @@ func (ctrl *ApplicationController) setAppCondition(app *appv1.Application, condi
}
func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Application) {
logCtx := log.WithField("application", app.Name)
var state *appv1.OperationState
// Recover from any unexpected panics and automatically set the status to be failed
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
logCtx.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
state.Phase = appv1.OperationError
if rerr, ok := r.(error); ok {
state.Message = rerr.Error()
@@ -341,20 +389,20 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
// again. To detect this, always retrieve the latest version to ensure it is not stale.
freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
log.Errorf("Failed to retrieve latest application state: %v", err)
logCtx.Errorf("Failed to retrieve latest application state: %v", err)
return
}
if !isOperationInProgress(freshApp) {
log.Infof("Skipping operation on stale application state (%s)", app.ObjectMeta.Name)
logCtx.Infof("Skipping operation on stale application state")
return
}
app = freshApp
state = app.Status.OperationState.DeepCopy()
log.Infof("Resuming in-progress operation. app: %s, phase: %s, message: %s", app.ObjectMeta.Name, state.Phase, state.Message)
logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
} else {
state = &appv1.OperationState{Phase: appv1.OperationRunning, Operation: *app.Operation, StartedAt: metav1.Now()}
ctrl.setOperationState(app, state)
log.Infof("Initialized new operation. app: %s, operation: %v", app.ObjectMeta.Name, *app.Operation)
logCtx.Infof("Initialized new operation: %v", *app.Operation)
}
ctrl.appStateManager.SyncAppState(app, state)
@@ -400,7 +448,6 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
// If operation is completed, clear the operation field to indicate no operation is
// in progress.
patch["operation"] = nil
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Action: "refresh_status"}, v1.EventTypeNormal)
}
if reflect.DeepEqual(app.Status.OperationState, state) {
log.Infof("No operation updates necessary to '%s'. Skipping patch", app.Name)
@@ -416,6 +463,18 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
return err
}
log.Infof("updated '%s' operation (phase: %s)", app.Name, state.Phase)
if state.Phase.Completed() {
eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted}
var message string
if state.Phase.Successful() {
eventInfo.Type = v1.EventTypeNormal
message = "Operation succeeded"
} else {
eventInfo.Type = v1.EventTypeWarning
message = fmt.Sprintf("Operation failed: %v", state.Message)
}
ctrl.auditLogger.LogAppEvent(app, eventInfo, message)
}
return nil
}, "Update application operation state", context.Background(), updateOperationStateTimeout)
}
@@ -475,10 +534,16 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
parameters = manifestInfo.Params
}
healthState, err := setApplicationHealth(comparisonResult)
healthState, err := setApplicationHealth(ctrl.kubectl, comparisonResult)
if err != nil {
conditions = append(conditions, appv1.ApplicationCondition{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()})
}
syncErrCond := ctrl.autoSync(app, comparisonResult)
if syncErrCond != nil {
conditions = append(conditions, *syncErrCond)
}
ctrl.updateAppStatus(app, comparisonResult, healthState, parameters, conditions)
return
}
@@ -486,18 +551,20 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
// needRefreshAppStatus answers if application status needs to be refreshed.
// Returns true if application never been compared, has changed or comparison result has expired.
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) bool {
logCtx := log.WithFields(log.Fields{"application": app.Name})
var reason string
expired := app.Status.ComparisonResult.ComparedAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
if ctrl.isRefreshForced(app.Name) {
reason = "force refresh"
} else if app.Status.ComparisonResult.Status == appv1.ComparisonStatusUnknown {
} else if app.Status.ComparisonResult.Status == appv1.ComparisonStatusUnknown && expired {
reason = "comparison status unknown"
} else if !app.Spec.Source.Equals(app.Status.ComparisonResult.ComparedTo) {
reason = "spec.source differs"
} else if app.Status.ComparisonResult.ComparedAt.Add(statusRefreshTimeout).Before(time.Now().UTC()) {
} else if expired {
reason = fmt.Sprintf("comparison expired. comparedAt: %v, expiry: %v", app.Status.ComparisonResult.ComparedAt, statusRefreshTimeout)
}
if reason != "" {
log.Infof("Refreshing application '%s' status (%s)", app.Name, reason)
logCtx.Infof("Refreshing app status (%s)", reason)
return true
}
return false
@@ -536,6 +603,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
appv1.ApplicationConditionUnknownError: true,
appv1.ApplicationConditionComparisonError: true,
appv1.ApplicationConditionSharedResourceWarning: true,
appv1.ApplicationConditionSyncError: true,
}
appConditions := make([]appv1.ApplicationCondition, 0)
for i := 0; i < len(app.Status.Conditions); i++ {
@@ -557,7 +625,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
}
// setApplicationHealth updates the health statuses of all resources performed in the comparison
func setApplicationHealth(comparisonResult *appv1.ComparisonResult) (*appv1.HealthStatus, error) {
func setApplicationHealth(kubectl kube.Kubectl, comparisonResult *appv1.ComparisonResult) (*appv1.HealthStatus, error) {
var savedErr error
appHealth := appv1.HealthStatus{Status: appv1.HealthStatusHealthy}
if comparisonResult.Status == appv1.ComparisonStatusUnknown {
@@ -572,7 +640,7 @@ func setApplicationHealth(comparisonResult *appv1.ComparisonResult) (*appv1.Heal
if err != nil {
return nil, err
}
healthState, err := health.GetAppHealth(&obj)
healthState, err := health.GetAppHealth(kubectl, &obj)
if err != nil && savedErr == nil {
savedErr = err
}
@@ -594,12 +662,21 @@ func (ctrl *ApplicationController) updateAppStatus(
parameters []*appv1.ComponentParameter,
conditions []appv1.ApplicationCondition,
) {
logCtx := log.WithFields(log.Fields{"application": app.Name})
modifiedApp := app.DeepCopy()
if comparisonResult != nil {
modifiedApp.Status.ComparisonResult = *comparisonResult
log.Infof("App %s comparison result: prev: %s. current: %s", app.Name, app.Status.ComparisonResult.Status, comparisonResult.Status)
if app.Status.ComparisonResult.Status != comparisonResult.Status {
message := fmt.Sprintf("Updated sync status: %s -> %s", app.Status.ComparisonResult.Status, comparisonResult.Status)
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message)
}
logCtx.Infof("Comparison result: prev: %s. current: %s", app.Status.ComparisonResult.Status, comparisonResult.Status)
}
if healthState != nil {
if modifiedApp.Status.Health.Status != healthState.Status {
message := fmt.Sprintf("Updated health status: %s -> %s", modifiedApp.Status.Health.Status, healthState.Status)
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message)
}
modifiedApp.Status.Health = *healthState
}
if parameters != nil {
@@ -613,59 +690,104 @@ func (ctrl *ApplicationController) updateAppStatus(
}
origBytes, err := json.Marshal(app)
if err != nil {
log.Errorf("Error updating application %s (marshal orig app): %v", app.Name, err)
logCtx.Errorf("Error updating (marshal orig app): %v", err)
return
}
modifiedBytes, err := json.Marshal(modifiedApp)
if err != nil {
log.Errorf("Error updating application %s (marshal modified app): %v", app.Name, err)
logCtx.Errorf("Error updating (marshal modified app): %v", err)
return
}
patch, err := strategicpatch.CreateTwoWayMergePatch(origBytes, modifiedBytes, appv1.Application{})
if err != nil {
log.Errorf("Error calculating patch for app %s update: %v", app.Name, err)
logCtx.Errorf("Error calculating patch for update: %v", err)
return
}
if string(patch) == "{}" {
log.Infof("No status changes to %s. Skipping patch", app.Name)
logCtx.Infof("No status changes. Skipping patch")
return
}
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
_, err = appClient.Patch(app.Name, types.MergePatchType, patch)
if err != nil {
log.Warnf("Error updating application %s: %v", app.Name, err)
logCtx.Warnf("Error updating application: %v", err)
} else {
log.Infof("Application %s update successful", app.Name)
logCtx.Infof("Update successful")
}
}
func newApplicationInformer(
appClientset appclientset.Interface,
appQueue workqueue.RateLimitingInterface,
appOperationQueue workqueue.RateLimitingInterface,
appResyncPeriod time.Duration,
config *ApplicationControllerConfig) cache.SharedIndexInformer {
// autoSync will initiate a sync operation for an application configured with automated sync
func (ctrl *ApplicationController) autoSync(app *appv1.Application, comparisonResult *appv1.ComparisonResult) *appv1.ApplicationCondition {
if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil {
return nil
}
logCtx := log.WithFields(log.Fields{"application": app.Name})
if app.Operation != nil {
logCtx.Infof("Skipping auto-sync: another operation is in progress")
return nil
}
// Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting
// a sync when application is already in a Synced or Unknown state
if comparisonResult.Status != appv1.ComparisonStatusOutOfSync {
logCtx.Infof("Skipping auto-sync: application status is %s", comparisonResult.Status)
return nil
}
desiredCommitSHA := comparisonResult.Revision
appInformerFactory := appinformers.NewFilteredSharedInformerFactory(
appClientset,
appResyncPeriod,
config.Namespace,
func(options *metav1.ListOptions) {
var instanceIDReq *labels.Requirement
var err error
if config.InstanceID != "" {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.Equals, []string{config.InstanceID})
} else {
instanceIDReq, err = labels.NewRequirement(common.LabelKeyApplicationControllerInstanceID, selection.DoesNotExist, nil)
}
if err != nil {
panic(err)
}
// It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g.
// auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an
// application in an infinite loop. To detect this, we only attempt the Sync if the revision
// and parameter overrides are different from our most recent sync operation.
if alreadyAttemptedSync(app, desiredCommitSHA) {
if app.Status.OperationState.Phase != appv1.OperationSucceeded {
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA)
message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
}
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
return nil
}
options.FieldSelector = fields.Everything().String()
labelSelector := labels.NewSelector().Add(*instanceIDReq)
options.LabelSelector = labelSelector.String()
op := appv1.Operation{
Sync: &appv1.SyncOperation{
Revision: desiredCommitSHA,
Prune: app.Spec.SyncPolicy.Automated.Prune,
ParameterOverrides: app.Spec.Source.ComponentParameterOverrides,
},
}
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
_, err := argo.SetAppOperation(context.Background(), appIf, ctrl.auditLogger, app.Name, &op)
if err != nil {
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}
}
message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message)
logCtx.Info(message)
return nil
}
// alreadyAttemptedSync returns whether or not the most recent sync was performed against the
// commitSHA and with the same parameter overrides which are currently set in the app
func alreadyAttemptedSync(app *appv1.Application, commitSHA string) bool {
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
return false
}
if app.Status.OperationState.SyncResult.Revision != commitSHA {
return false
}
if !reflect.DeepEqual(appv1.ParameterOverrides(app.Spec.Source.ComponentParameterOverrides), app.Status.OperationState.Operation.Sync.ParameterOverrides) {
return false
}
return true
}
func (ctrl *ApplicationController) newApplicationInformer() cache.SharedIndexInformer {
appInformerFactory := appinformers.NewFilteredSharedInformerFactory(
ctrl.applicationClientset,
ctrl.statusRefreshTimeout,
ctrl.namespace,
func(options *metav1.ListOptions) {},
)
informer := appInformerFactory.Argoproj().V1alpha1().Applications().Informer()
informer.AddEventHandler(
@@ -673,23 +795,32 @@ func newApplicationInformer(
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
appOperationQueue.Add(key)
ctrl.appRefreshQueue.Add(key)
ctrl.appOperationQueue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
appQueue.Add(key)
appOperationQueue.Add(key)
if err != nil {
return
}
oldApp, oldOK := old.(*appv1.Application)
newApp, newOK := new.(*appv1.Application)
if oldOK && newOK {
if toggledAutomatedSync(oldApp, newApp) {
log.WithField("application", newApp.Name).Info("Enabled automated sync")
ctrl.forceAppRefresh(newApp.Name)
}
}
ctrl.appRefreshQueue.Add(key)
ctrl.appOperationQueue.Add(key)
},
DeleteFunc: func(obj interface{}) {
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
appQueue.Add(key)
ctrl.appRefreshQueue.Add(key)
}
},
},
@@ -700,3 +831,17 @@ func newApplicationInformer(
func isOperationInProgress(app *appv1.Application) bool {
return app.Status.OperationState != nil && !app.Status.OperationState.Phase.Completed()
}
// toggledAutomatedSync tests if an app went from auto-sync disabled to enabled.
// if it was toggled to be enabled, the informer handler will force a refresh
func toggledAutomatedSync(old *appv1.Application, new *appv1.Application) bool {
if new.Spec.SyncPolicy == nil || new.Spec.SyncPolicy.Automated == nil {
return false
}
// auto-sync is enabled. check if it was previously disabled
if old.Spec.SyncPolicy == nil || old.Spec.SyncPolicy.Automated == nil {
return true
}
// nothing changed
return false
}

View File

@@ -0,0 +1,231 @@
package controller
import (
"testing"
"time"
"github.com/ghodss/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
reposerver "github.com/argoproj/argo-cd/reposerver/mocks"
"github.com/stretchr/testify/assert"
)
func newFakeController(apps ...runtime.Object) *ApplicationController {
kubeClientset := fake.NewSimpleClientset()
appClientset := appclientset.NewSimpleClientset(apps...)
repoClientset := reposerver.Clientset{}
return NewApplicationController(
"argocd",
kubeClientset,
appClientset,
&repoClientset,
time.Minute,
)
}
var fakeApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app
namespace: argocd
spec:
destination:
namespace: dummy-namespace
server: https://localhost:6443
project: default
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
syncPolicy:
automated: {}
status:
operationState:
finishedAt: 2018-09-21T23:50:29Z
message: successfully synced
operation:
sync:
revision: HEAD
phase: Succeeded
startedAt: 2018-09-21T23:50:25Z
syncResult:
resources:
- kind: RoleBinding
message: |-
rolebinding.rbac.authorization.k8s.io/always-outofsync reconciled
rolebinding.rbac.authorization.k8s.io/always-outofsync configured
name: always-outofsync
namespace: default
status: Synced
revision: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
`
func newFakeApp() *argoappv1.Application {
var app argoappv1.Application
err := yaml.Unmarshal([]byte(fakeApp), &app)
if err != nil {
panic(err)
}
return &app
}
func TestAutoSync(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(app)
compRes := argoappv1.ComparisonResult{
Status: argoappv1.ComparisonStatusOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond := ctrl.autoSync(app, &compRes)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications("argocd").Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.NotNil(t, app.Operation)
assert.NotNil(t, app.Operation.Sync)
assert.False(t, app.Operation.Sync.Prune)
}
func TestSkipAutoSync(t *testing.T) {
// Verify we skip when we previously synced to it in our most recent history
// Set current to 'aaaaa', desired to 'aaaa' and mark system OutOfSync
app := newFakeApp()
ctrl := newFakeController(app)
compRes := argoappv1.ComparisonResult{
Status: argoappv1.ComparisonStatusOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
cond := ctrl.autoSync(app, &compRes)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications("argocd").Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
// Verify we skip when we are already Synced (even if revision is different)
app = newFakeApp()
ctrl = newFakeController(app)
compRes = argoappv1.ComparisonResult{
Status: argoappv1.ComparisonStatusSynced,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond = ctrl.autoSync(app, &compRes)
assert.Nil(t, cond)
app, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications("argocd").Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
// Verify we skip when auto-sync is disabled
app = newFakeApp()
app.Spec.SyncPolicy = nil
ctrl = newFakeController(app)
compRes = argoappv1.ComparisonResult{
Status: argoappv1.ComparisonStatusOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond = ctrl.autoSync(app, &compRes)
assert.Nil(t, cond)
app, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications("argocd").Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
// Verify we skip when previous sync attempt failed and return error condition
// Set current to 'aaaaa', desired to 'bbbbb' and add 'bbbbb' to failure history
app = newFakeApp()
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
},
}
ctrl = newFakeController(app)
compRes = argoappv1.ComparisonResult{
Status: argoappv1.ComparisonStatusOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond = ctrl.autoSync(app, &compRes)
assert.NotNil(t, cond)
app, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications("argocd").Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// TestAutoSyncIndicateError verifies we skip auto-sync and return error condition if previous sync failed
func TestAutoSyncIndicateError(t *testing.T) {
app := newFakeApp()
app.Spec.Source.ComponentParameterOverrides = []argoappv1.ComponentParameter{
{
Name: "a",
Value: "1",
},
}
ctrl := newFakeController(app)
compRes := argoappv1.ComparisonResult{
Status: argoappv1.ComparisonStatusOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{
ParameterOverrides: argoappv1.ParameterOverrides{
{
Name: "a",
Value: "1",
},
},
},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
}
cond := ctrl.autoSync(app, &compRes)
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications("argocd").Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.Nil(t, app.Operation)
}
// TestAutoSyncParameterOverrides verifies we auto-sync if revision is same but parameter overrides are different
func TestAutoSyncParameterOverrides(t *testing.T) {
app := newFakeApp()
app.Spec.Source.ComponentParameterOverrides = []argoappv1.ComponentParameter{
{
Name: "a",
Value: "1",
},
}
ctrl := newFakeController(app)
compRes := argoappv1.ComparisonResult{
Status: argoappv1.ComparisonStatusOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
app.Status.OperationState = &argoappv1.OperationState{
Operation: argoappv1.Operation{
Sync: &argoappv1.SyncOperation{
ParameterOverrides: argoappv1.ParameterOverrides{
{
Name: "a",
Value: "2", // this value changed
},
},
},
},
Phase: argoappv1.OperationFailed,
SyncResult: &argoappv1.SyncOperationResult{
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
}
cond := ctrl.autoSync(app, &compRes)
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications("argocd").Get("my-app", metav1.GetOptions{})
assert.NoError(t, err)
assert.NotNil(t, app.Operation)
}

View File

@@ -3,16 +3,9 @@ package controller
import (
"context"
"encoding/json"
"runtime/debug"
"time"
"runtime/debug"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,6 +18,12 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/git"
)
type SecretController struct {
@@ -93,22 +92,13 @@ func (ctrl *SecretController) getRepoConnectionState(repo *v1alpha1.Repository)
ModifiedAt: repo.ConnectionState.ModifiedAt,
Status: v1alpha1.ConnectionStatusUnknown,
}
closer, client, err := ctrl.repoClientset.NewRepositoryClient()
if err != nil {
log.Errorf("Unable to create repository client: %v", err)
return state
}
defer util.Close(closer)
_, err = client.ListDir(context.Background(), &repository.ListDirRequest{Repo: repo, Path: ".gitignore"})
err := git.TestRepo(repo.Repo, repo.Username, repo.Password, repo.SSHPrivateKey)
if err == nil {
state.Status = v1alpha1.ConnectionStatusSuccessful
} else {
state.Status = v1alpha1.ConnectionStatusFailed
state.Message = err.Error()
}
return state
}

View File

@@ -7,6 +7,7 @@ import (
"time"
log "github.com/sirupsen/logrus"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
@@ -35,10 +36,11 @@ type AppStateManager interface {
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
}
// ksonnetAppStateManager allows to compare application using KSonnet CLI
type ksonnetAppStateManager struct {
// appStateManager allows to compare application using KSonnet CLI
type appStateManager struct {
db db.ArgoDB
appclientset appclientset.Interface
kubectl kubeutil.Kubectl
repoClientset reposerver.Clientset
namespace string
}
@@ -83,7 +85,7 @@ func groupLiveObjects(liveObjs []*unstructured.Unstructured, targetObjs []*unstr
return liveByFullName
}
func (s *ksonnetAppStateManager) getTargetObjs(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, *repository.ManifestResponse, error) {
func (s *appStateManager) getTargetObjs(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, *repository.ManifestResponse, error) {
repo := s.getRepo(app.Spec.Source.RepoURL)
conn, repoClient, err := s.repoClientset.NewRepositoryClient()
if err != nil {
@@ -121,6 +123,7 @@ func (s *ksonnetAppStateManager) getTargetObjs(app *v1alpha1.Application, revisi
ComponentParameterOverrides: mfReqOverrides,
AppLabel: app.Name,
ValueFiles: app.Spec.Source.ValuesFiles,
Namespace: app.Spec.Destination.Namespace,
})
if err != nil {
return nil, nil, err
@@ -140,7 +143,7 @@ func (s *ksonnetAppStateManager) getTargetObjs(app *v1alpha1.Application, revisi
return targetObjs, manifestInfo, nil
}
func (s *ksonnetAppStateManager) getLiveObjs(app *v1alpha1.Application, targetObjs []*unstructured.Unstructured) (
func (s *appStateManager) getLiveObjs(app *v1alpha1.Application, targetObjs []*unstructured.Unstructured) (
[]*unstructured.Unstructured, map[string]*unstructured.Unstructured, error) {
// Get the REST config for the cluster corresponding to the environment
@@ -188,24 +191,27 @@ func (s *ksonnetAppStateManager) getLiveObjs(app *v1alpha1.Application, targetOb
}
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(disco, gvk)
if err != nil {
return nil, nil, err
}
liveObj, err = kubeutil.GetLiveResource(dclient, targetObj, apiResource, app.Spec.Destination.Namespace)
if err != nil {
return nil, nil, err
if !apierr.IsNotFound(err) {
return nil, nil, err
}
// If we get here, the app is comprised of a custom resource which has yet to be registered
} else {
liveObj, err = kubeutil.GetLiveResource(dclient, targetObj, apiResource, app.Spec.Destination.Namespace)
if err != nil {
return nil, nil, err
}
}
}
controlledLiveObj[i] = liveObj
delete(liveObjByFullName, fullName)
}
return controlledLiveObj, liveObjByFullName, nil
}
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied overrides. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
func (s *ksonnetAppStateManager) CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) (
func (s *appStateManager) CompareAppState(app *v1alpha1.Application, revision string, overrides []v1alpha1.ComponentParameter) (
*v1alpha1.ComparisonResult, *repository.ManifestResponse, []v1alpha1.ApplicationCondition, error) {
failedToLoadObjs := false
@@ -318,6 +324,10 @@ func (s *ksonnetAppStateManager) CompareAppState(app *v1alpha1.Application, revi
Resources: resources,
Status: comparisonStatus,
}
if manifestInfo != nil {
compResult.Revision = manifestInfo.Revision
}
return &compResult, manifestInfo, conditions, nil
}
@@ -360,7 +370,7 @@ func getResourceFullName(obj *unstructured.Unstructured) string {
return fmt.Sprintf("%s:%s", obj.GetKind(), obj.GetName())
}
func (s *ksonnetAppStateManager) getRepo(repoURL string) *v1alpha1.Repository {
func (s *appStateManager) getRepo(repoURL string) *v1alpha1.Repository {
repo, err := s.db.GetRepository(context.Background(), repoURL)
if err != nil {
// If we couldn't retrieve from the repo service, assume public repositories
@@ -369,7 +379,7 @@ func (s *ksonnetAppStateManager) getRepo(repoURL string) *v1alpha1.Repository {
return repo
}
func (s *ksonnetAppStateManager) persistDeploymentInfo(
func (s *appStateManager) persistDeploymentInfo(
app *v1alpha1.Application, revision string, envParams []*v1alpha1.ComponentParameter, overrides *[]v1alpha1.ComponentParameter) error {
params := make([]v1alpha1.ComponentParameter, len(envParams))
@@ -411,10 +421,12 @@ func NewAppStateManager(
appclientset appclientset.Interface,
repoClientset reposerver.Clientset,
namespace string,
kubectl kubeutil.Kubectl,
) AppStateManager {
return &ksonnetAppStateManager{
return &appStateManager{
db: db,
appclientset: appclientset,
kubectl: kubectl,
repoClientset: repoClientset,
namespace: namespace,
}

52
controller/state_test.go Normal file
View File

@@ -0,0 +1,52 @@
package controller
import (
"testing"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
var podManifest = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: my-pod
spec:
containers:
- image: nginx:1.7.9
name: nginx
resources:
requests:
cpu: 0.2
`)
func newPod() *unstructured.Unstructured {
var un unstructured.Unstructured
err := yaml.Unmarshal(podManifest, &un)
if err != nil {
panic(err)
}
return &un
}
func TestIsHook(t *testing.T) {
pod := newPod()
assert.False(t, isHook(pod))
pod.SetAnnotations(map[string]string{"helm.sh/hook": "post-install"})
assert.True(t, isHook(pod))
pod = newPod()
pod.SetAnnotations(map[string]string{"argocd.argoproj.io/hook": "PreSync"})
assert.True(t, isHook(pod))
pod = newPod()
pod.SetAnnotations(map[string]string{"argocd.argoproj.io/hook": "Skip"})
assert.False(t, isHook(pod))
pod = newPod()
pod.SetAnnotations(map[string]string{"argocd.argoproj.io/hook": "Unknown"})
assert.False(t, isHook(pod))
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"sync"
@@ -29,10 +30,12 @@ import (
type syncContext struct {
appName string
proj *appv1.AppProject
comparison *appv1.ComparisonResult
config *rest.Config
dynClientPool dynamic.ClientPool
disco *discovery.DiscoveryClient
disco discovery.DiscoveryInterface
kubectl kube.Kubectl
namespace string
syncOp *appv1.SyncOperation
syncRes *appv1.SyncOperationResult
@@ -43,8 +46,8 @@ type syncContext struct {
lock sync.Mutex
}
func (s *ksonnetAppStateManager) SyncAppState(app *appv1.Application, state *appv1.OperationState) {
// Sync requests are usually requested with ambiguous revisions (e.g. master, HEAD, v1.2.3).
func (s *appStateManager) SyncAppState(app *appv1.Application, state *appv1.OperationState) {
// Sync requests might be requested with ambiguous revisions (e.g. master, HEAD, v1.2.3).
// This can change meaning when resuming operations (e.g a hook sync). After calculating a
// concrete git commit SHA, the SHA is remembered in the status.operationState.syncResult and
// rollbackResult fields. This ensures that when resuming an operation, we sync to the same
@@ -56,6 +59,7 @@ func (s *ksonnetAppStateManager) SyncAppState(app *appv1.Application, state *app
if state.Operation.Sync != nil {
syncOp = *state.Operation.Sync
overrides = []appv1.ComponentParameter(state.Operation.Sync.ParameterOverrides)
if state.SyncResult != nil {
syncRes = state.SyncResult
revision = state.SyncResult.Revision
@@ -140,12 +144,21 @@ func (s *ksonnetAppStateManager) SyncAppState(app *appv1.Application, state *app
return
}
proj, err := argo.GetAppProject(&app.Spec, s.appclientset, s.namespace)
if err != nil {
state.Phase = appv1.OperationError
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
return
}
syncCtx := syncContext{
appName: app.Name,
proj: proj,
comparison: comparison,
config: restConfig,
dynClientPool: dynClientPool,
disco: disco,
kubectl: s.kubectl,
namespace: app.Spec.Destination.Namespace,
syncOp: &syncOp,
syncRes: syncRes,
@@ -310,12 +323,13 @@ func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun
Kind: targetObj.GetKind(),
Namespace: sc.namespace,
}
message, err := kube.ApplyResource(sc.config, targetObj, sc.namespace, dryRun, force)
message, err := sc.kubectl.ApplyResource(sc.config, targetObj, sc.namespace, dryRun, force)
if err != nil {
resDetails.Message = err.Error()
resDetails.Status = appv1.ResourceDetailsSyncFailed
return resDetails
}
resDetails.Message = message
resDetails.Status = appv1.ResourceDetailsSynced
return resDetails
@@ -333,7 +347,7 @@ func (sc *syncContext) pruneObject(liveObj *unstructured.Unstructured, prune, dr
resDetails.Message = "pruned (dry run)"
resDetails.Status = appv1.ResourceDetailsSyncedAndPruned
} else {
err := kube.DeleteResource(sc.config, liveObj, sc.namespace)
err := sc.kubectl.DeleteResource(sc.config, liveObj, sc.namespace)
if err != nil {
resDetails.Message = err.Error()
resDetails.Status = appv1.ResourceDetailsSyncFailed
@@ -349,26 +363,49 @@ func (sc *syncContext) pruneObject(liveObj *unstructured.Unstructured, prune, dr
return resDetails
}
func hasCRDOfGroupKind(tasks []syncTask, group, kind string) bool {
for _, task := range tasks {
if kube.IsCRD(task.targetObj) {
crdGroup, ok, err := unstructured.NestedString(task.targetObj.Object, "spec", "group")
if err != nil || !ok {
continue
}
crdKind, ok, err := unstructured.NestedString(task.targetObj.Object, "spec", "names", "kind")
if err != nil || !ok {
continue
}
if group == crdGroup && crdKind == kind {
return true
}
}
}
return false
}
// performs a apply based sync of the given sync tasks (possibly pruning the objects).
// If update is true, will updates the resource details with the result.
// Or if the prune/apply failed, will also update the result.
func (sc *syncContext) doApplySync(syncTasks []syncTask, dryRun, force, update bool) bool {
syncSuccessful := true
// apply all resources in parallel
var createTasks []syncTask
var pruneTasks []syncTask
for _, syncTask := range syncTasks {
if syncTask.targetObj == nil {
pruneTasks = append(pruneTasks, syncTask)
} else {
createTasks = append(createTasks, syncTask)
}
}
sort.Sort(newKindSorter(createTasks, resourceOrder))
var wg sync.WaitGroup
for _, task := range syncTasks {
for _, task := range pruneTasks {
wg.Add(1)
go func(t syncTask) {
defer wg.Done()
var resDetails appv1.ResourceDetails
if t.targetObj == nil {
resDetails = sc.pruneObject(t.liveObj, sc.syncOp.Prune, dryRun)
} else {
if isHook(t.targetObj) {
return
}
resDetails = sc.applyObject(t.targetObj, dryRun, force)
}
resDetails = sc.pruneObject(t.liveObj, sc.syncOp.Prune, dryRun)
if !resDetails.Status.Successful() {
syncSuccessful = false
}
@@ -378,6 +415,76 @@ func (sc *syncContext) doApplySync(syncTasks []syncTask, dryRun, force, update b
}(task)
}
wg.Wait()
processCreateTasks := func(tasks []syncTask, gvk schema.GroupVersionKind) {
serverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
if err != nil {
// Special case for custom resources: if custom resource definition is not supported by the cluster by defined in application then
// skip verification using `kubectl apply --dry-run` and since CRD should be created during app synchronization.
if dryRun && apierr.IsNotFound(err) && hasCRDOfGroupKind(createTasks, gvk.Group, gvk.Kind) {
return
} else {
syncSuccessful = false
for _, task := range tasks {
sc.setResourceDetails(&appv1.ResourceDetails{
Name: task.targetObj.GetName(),
Kind: task.targetObj.GetKind(),
Namespace: sc.namespace,
Message: err.Error(),
Status: appv1.ResourceDetailsSyncFailed,
})
}
return
}
}
if !sc.proj.IsResourcePermitted(metav1.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, serverRes.Namespaced) {
syncSuccessful = false
for _, task := range tasks {
sc.setResourceDetails(&appv1.ResourceDetails{
Name: task.targetObj.GetName(),
Kind: task.targetObj.GetKind(),
Namespace: sc.namespace,
Message: fmt.Sprintf("Resource %s:%s is not permitted in project %s.", gvk.Group, gvk.Kind, sc.proj.Name),
Status: appv1.ResourceDetailsSyncFailed,
})
}
return
}
var createWg sync.WaitGroup
for i := range tasks {
createWg.Add(1)
go func(t syncTask) {
defer createWg.Done()
if isHook(t.targetObj) {
return
}
resDetails := sc.applyObject(t.targetObj, dryRun, force)
if !resDetails.Status.Successful() {
syncSuccessful = false
}
if update || !resDetails.Status.Successful() {
sc.setResourceDetails(&resDetails)
}
}(tasks[i])
}
createWg.Wait()
}
var tasksGroup []syncTask
for _, task := range createTasks {
//Only wait if the type of the next task is different than the previous type
if len(tasksGroup) > 0 && tasksGroup[0].targetObj.GetKind() != task.targetObj.GetKind() {
processCreateTasks(tasksGroup, tasksGroup[0].targetObj.GroupVersionKind())
tasksGroup = []syncTask{task}
} else {
tasksGroup = append(tasksGroup, task)
}
}
if len(tasksGroup) > 0 {
processCreateTasks(tasksGroup, tasksGroup[0].targetObj.GroupVersionKind())
}
return syncSuccessful
}
@@ -412,7 +519,7 @@ func (sc *syncContext) doHookSync(syncTasks []syncTask, hooks []*unstructured.Un
// already started the post-sync phase, then we do not need to perform the health check.
postSyncHooks, _ := sc.getHooks(appv1.HookTypePostSync)
if len(postSyncHooks) > 0 && !sc.startedPostSyncPhase() {
healthState, err := setApplicationHealth(sc.comparison)
healthState, err := setApplicationHealth(sc.kubectl, sc.comparison)
sc.log.Infof("PostSync application health check: %s", healthState.Status)
if err != nil {
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("failed to check application health: %v", err))
@@ -555,7 +662,7 @@ func (sc *syncContext) runHook(hook *unstructured.Unstructured, hookType appv1.H
if err != nil {
sc.log.Warnf("Failed to set application label on hook %v: %v", hook, err)
}
_, err := kube.ApplyResource(sc.config, hook, sc.namespace, false, false)
_, err := sc.kubectl.ApplyResource(sc.config, hook, sc.namespace, false, false)
if err != nil {
return false, fmt.Errorf("Failed to create %s hook %s '%s': %v", hookType, gvk, hook.GetName(), err)
}
@@ -627,7 +734,7 @@ func isHelmHook(obj *unstructured.Unstructured) bool {
if annotations == nil {
return false
}
_, ok := annotations[common.AnnotationHook]
_, ok := annotations[common.AnnotationHelmHook]
return ok
}
@@ -856,3 +963,89 @@ func (sc *syncContext) deleteHook(name, kind, apiVersion string) error {
resIf := dclient.Resource(apiResource, sc.namespace)
return resIf.Delete(name, &metav1.DeleteOptions{})
}
// This code is mostly taken from https://github.com/helm/helm/blob/release-2.10/pkg/tiller/kind_sorter.go
// sortOrder is an ordering of Kinds.
type sortOrder []string
// resourceOrder represents the correct order of Kubernetes resources within a manifest
var resourceOrder sortOrder = []string{
"Namespace",
"ResourceQuota",
"LimitRange",
"PodSecurityPolicy",
"Secret",
"ConfigMap",
"StorageClass",
"PersistentVolume",
"PersistentVolumeClaim",
"ServiceAccount",
"CustomResourceDefinition",
"ClusterRole",
"ClusterRoleBinding",
"Role",
"RoleBinding",
"Service",
"DaemonSet",
"Pod",
"ReplicationController",
"ReplicaSet",
"Deployment",
"StatefulSet",
"Job",
"CronJob",
"Ingress",
"APIService",
}
type kindSorter struct {
ordering map[string]int
manifests []syncTask
}
func newKindSorter(m []syncTask, s sortOrder) *kindSorter {
o := make(map[string]int, len(s))
for v, k := range s {
o[k] = v
}
return &kindSorter{
manifests: m,
ordering: o,
}
}
func (k *kindSorter) Len() int { return len(k.manifests) }
func (k *kindSorter) Swap(i, j int) { k.manifests[i], k.manifests[j] = k.manifests[j], k.manifests[i] }
func (k *kindSorter) Less(i, j int) bool {
a := k.manifests[i].targetObj
if a == nil {
return false
}
b := k.manifests[j].targetObj
if b == nil {
return true
}
first, aok := k.ordering[a.GetKind()]
second, bok := k.ordering[b.GetKind()]
// if same kind (including unknown) sub sort alphanumeric
if first == second {
// if both are unknown and of different kind sort by kind alphabetically
if !aok && !bok && a.GetKind() != b.GetKind() {
return a.GetKind() < b.GetKind()
}
return a.GetName() < b.GetName()
}
// unknown kind is last
if !aok {
return false
}
if !bok {
return true
}
// sort different kinds
return first < second
}

View File

@@ -1,26 +1,391 @@
package controller
import (
"fmt"
"sort"
"testing"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/kube"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
fakedisco "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/rest"
testcore "k8s.io/client-go/testing"
)
func newTestSyncCtx() *syncContext {
type kubectlOutput struct {
output string
err error
}
type mockKubectlCmd struct {
commands map[string]kubectlOutput
}
func (k mockKubectlCmd) DeleteResource(config *rest.Config, obj *unstructured.Unstructured, namespace string) error {
command, ok := k.commands[obj.GetName()]
if !ok {
return nil
}
return command.err
}
func (k mockKubectlCmd) ApplyResource(config *rest.Config, obj *unstructured.Unstructured, namespace string, dryRun, force bool) (string, error) {
command, ok := k.commands[obj.GetName()]
if !ok {
return "", nil
}
return command.output, command.err
}
// ConvertToVersion converts an unstructured object into the specified group/version
func (k mockKubectlCmd) ConvertToVersion(obj *unstructured.Unstructured, group, version string) (*unstructured.Unstructured, error) {
return obj, nil
}
func newTestSyncCtx(resources ...*v1.APIResourceList) *syncContext {
fakeDisco := &fakedisco.FakeDiscovery{Fake: &testcore.Fake{}}
fakeDisco.Resources = append(resources, &v1.APIResourceList{
APIResources: []v1.APIResource{
{Kind: "pod", Namespaced: true},
{Kind: "deployment", Namespaced: true},
{Kind: "service", Namespaced: true},
},
})
kube.FlushServerResourcesCache()
return &syncContext{
comparison: &v1alpha1.ComparisonResult{},
config: &rest.Config{},
namespace: "test-namespace",
syncOp: &v1alpha1.SyncOperation{},
opState: &v1alpha1.OperationState{},
log: log.WithFields(log.Fields{"application": "fake-app"}),
syncRes: &v1alpha1.SyncOperationResult{},
syncOp: &v1alpha1.SyncOperation{
Prune: true,
SyncStrategy: &v1alpha1.SyncStrategy{
Apply: &v1alpha1.SyncStrategyApply{},
},
},
proj: &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Name: "test",
},
Spec: v1alpha1.AppProjectSpec{
ClusterResourceWhitelist: []v1.GroupKind{
{Group: "*", Kind: "*"},
},
},
},
opState: &v1alpha1.OperationState{},
disco: fakeDisco,
log: log.WithFields(log.Fields{"application": "fake-app"}),
}
}
func TestSyncCreateInSortedOrder(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = mockKubectlCmd{}
syncCtx.comparison = &v1alpha1.ComparisonResult{
Resources: []v1alpha1.ResourceState{{
LiveState: "",
TargetState: "{\"kind\":\"pod\"}",
}, {
LiveState: "",
TargetState: "{\"kind\":\"service\"}",
},
},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 2)
for i := range syncCtx.syncRes.Resources {
if syncCtx.syncRes.Resources[i].Kind == "pod" {
assert.Equal(t, v1alpha1.ResourceDetailsSynced, syncCtx.syncRes.Resources[i].Status)
} else if syncCtx.syncRes.Resources[i].Kind == "service" {
assert.Equal(t, v1alpha1.ResourceDetailsSynced, syncCtx.syncRes.Resources[i].Status)
} else {
t.Error("Resource isn't a pod or a service")
}
}
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestSyncCreateNotWhitelistedClusterResources(t *testing.T) {
syncCtx := newTestSyncCtx(&v1.APIResourceList{
GroupVersion: v1alpha1.SchemeGroupVersion.String(),
APIResources: []v1.APIResource{
{Name: "workflows", Namespaced: false, Kind: "Workflow", Group: "argoproj.io"},
{Name: "application", Namespaced: false, Kind: "Application", Group: "argoproj.io"},
},
}, &v1.APIResourceList{
GroupVersion: "rbac.authorization.k8s.io/v1",
APIResources: []v1.APIResource{
{Name: "clusterroles", Namespaced: false, Kind: "ClusterRole", Group: "rbac.authorization.k8s.io"},
},
})
syncCtx.proj.Spec.ClusterResourceWhitelist = []v1.GroupKind{
{Group: "argoproj.io", Kind: "*"},
}
syncCtx.kubectl = mockKubectlCmd{}
syncCtx.comparison = &v1alpha1.ComparisonResult{
Resources: []v1alpha1.ResourceState{{
LiveState: "",
TargetState: `{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "ClusterRole", "metadata": {"name": "argo-ui-cluster-role" }}`,
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResourceDetailsSyncFailed, syncCtx.syncRes.Resources[0].Status)
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
}
func TestSyncBlacklistedNamespacedResources(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.proj.Spec.NamespaceResourceBlacklist = []v1.GroupKind{
{Group: "*", Kind: "deployment"},
}
syncCtx.kubectl = mockKubectlCmd{}
syncCtx.comparison = &v1alpha1.ComparisonResult{
Resources: []v1alpha1.ResourceState{{
LiveState: "",
TargetState: "{\"kind\":\"deployment\"}",
}},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResourceDetailsSyncFailed, syncCtx.syncRes.Resources[0].Status)
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
}
func TestSyncSuccessfully(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = mockKubectlCmd{}
syncCtx.comparison = &v1alpha1.ComparisonResult{
Resources: []v1alpha1.ResourceState{{
LiveState: "",
TargetState: "{\"kind\":\"service\"}",
}, {
LiveState: "{\"kind\":\"pod\"}",
TargetState: "",
},
},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 2)
for i := range syncCtx.syncRes.Resources {
if syncCtx.syncRes.Resources[i].Kind == "pod" {
assert.Equal(t, v1alpha1.ResourceDetailsSyncedAndPruned, syncCtx.syncRes.Resources[i].Status)
} else if syncCtx.syncRes.Resources[i].Kind == "service" {
assert.Equal(t, v1alpha1.ResourceDetailsSynced, syncCtx.syncRes.Resources[i].Status)
} else {
t.Error("Resource isn't a pod or a service")
}
}
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestSyncDeleteSuccessfully(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = mockKubectlCmd{}
syncCtx.comparison = &v1alpha1.ComparisonResult{
Resources: []v1alpha1.ResourceState{{
LiveState: "{\"kind\":\"service\"}",
TargetState: "",
}, {
LiveState: "{\"kind\":\"pod\"}",
TargetState: "",
},
},
}
syncCtx.sync()
for i := range syncCtx.syncRes.Resources {
if syncCtx.syncRes.Resources[i].Kind == "pod" {
assert.Equal(t, v1alpha1.ResourceDetailsSyncedAndPruned, syncCtx.syncRes.Resources[i].Status)
} else if syncCtx.syncRes.Resources[i].Kind == "service" {
assert.Equal(t, v1alpha1.ResourceDetailsSyncedAndPruned, syncCtx.syncRes.Resources[i].Status)
} else {
t.Error("Resource isn't a pod or a service")
}
}
syncCtx.sync()
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
}
func TestSyncCreateFailure(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = mockKubectlCmd{
commands: map[string]kubectlOutput{
"test-service": {
output: "",
err: fmt.Errorf("error: error validating \"test.yaml\": error validating data: apiVersion not set; if you choose to ignore these errors, turn validation off with --validate=false"),
},
},
}
syncCtx.comparison = &v1alpha1.ComparisonResult{
Resources: []v1alpha1.ResourceState{{
LiveState: "",
TargetState: "{\"kind\":\"service\", \"metadata\":{\"name\":\"test-service\"}}",
},
},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResourceDetailsSyncFailed, syncCtx.syncRes.Resources[0].Status)
}
func TestSyncPruneFailure(t *testing.T) {
syncCtx := newTestSyncCtx()
syncCtx.kubectl = mockKubectlCmd{
commands: map[string]kubectlOutput{
"test-service": {
output: "",
err: fmt.Errorf(" error: timed out waiting for \"test-service\" to be synced"),
},
},
}
syncCtx.comparison = &v1alpha1.ComparisonResult{
Resources: []v1alpha1.ResourceState{{
LiveState: "{\"kind\":\"service\", \"metadata\":{\"name\":\"test-service\"}}",
TargetState: "",
},
},
}
syncCtx.sync()
assert.Len(t, syncCtx.syncRes.Resources, 1)
assert.Equal(t, v1alpha1.ResourceDetailsSyncFailed, syncCtx.syncRes.Resources[0].Status)
}
func TestRunWorkflows(t *testing.T) {
// syncCtx := newTestSyncCtx()
// syncCtx.doWorkflowSync(nil, nil)
}
func unsortedManifest() []syncTask {
return []syncTask{
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Pod",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "PersistentVolume",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "ConfigMap",
},
},
},
}
}
func sortedManifest() []syncTask {
return []syncTask{
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "ConfigMap",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "PersistentVolume",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Pod",
},
},
},
{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
},
},
},
}
}
func TestSortKubernetesResourcesSuccessfully(t *testing.T) {
unsorted := unsortedManifest()
ks := newKindSorter(unsorted, resourceOrder)
sort.Sort(ks)
expectedOrder := sortedManifest()
assert.Equal(t, len(unsorted), len(expectedOrder))
for i, sorted := range unsorted {
assert.Equal(t, expectedOrder[i], sorted)
}
}
func TestSortManifestHandleNil(t *testing.T) {
task := syncTask{
targetObj: &unstructured.Unstructured{
Object: map[string]interface{}{
"GroupVersion": apiv1.SchemeGroupVersion.String(),
"kind": "Service",
},
},
}
manifest := []syncTask{
{},
task,
}
ks := newKindSorter(manifest, resourceOrder)
sort.Sort(ks)
assert.Equal(t, task, manifest[0])
assert.Nil(t, manifest[1].targetObj)
}

View File

@@ -3,6 +3,7 @@
ArgoCD supports several different ways in which kubernetes manifests can be defined:
* [ksonnet](https://ksonnet.io) applications
* [kustomize](https://kustomize.io) applications
* [helm](https://helm.sh) charts
* Simple directory of YAML/json manifests

View File

@@ -7,15 +7,15 @@ An example guestbook application is provided to demonstrate how ArgoCD works.
* Have a [kubeconfig](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) file (default location is `~/.kube/config`).
## 1. Install ArgoCD
```
```bash
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v0.7.2/manifests/install.yaml
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v0.8.2/manifests/install.yaml
```
This will create a new namespace, `argocd`, where ArgoCD services and application resources will live.
NOTE:
* On GKE with RBAC enabled, you may need to grant your account the ability to create new cluster roles
```
```bash
kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=cluster-admin --user=YOUREMAIL@gmail.com
```
@@ -24,14 +24,14 @@ kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=c
Download the latest ArgoCD version:
On Mac:
```
```bash
brew install argoproj/tap/argocd
```
On Linux:
```
curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v0.7.2/argocd-linux-amd64
```bash
curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v0.8.2/argocd-linux-amd64
chmod +x /usr/local/bin/argocd
```
@@ -40,32 +40,40 @@ chmod +x /usr/local/bin/argocd
By default, the ArgoCD API server is not exposed with an external IP. To expose the API server,
change the service type to `LoadBalancer`:
```
```bash
kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
```
### Notes about Ingress and AWS Load Balancers
* If using Ingress objects without TLS from the ingress-controller to ArgoCD API server, you will
need to add the `--insecure` command line flag to the argocd-server deployment.
* AWS Classic ELB (in HTTP mode) and ALB do not have full support for HTTP2/gRPC which is the
protocol used by the `argocd` CLI. When using an AWS load balancer, either Classic ELB in
passthrough mode is needed, or NLBs.
## 4. Login to the server from the CLI
Login with using the `admin` user. The initial password is autogenerated to be the pod name of the
ArgoCD API server. This can be retrieved with the command:
```
```bash
kubectl get pods -n argocd -l app=argocd-server -o name | cut -d'/' -f 2
```
Using the above password, login to ArgoCD's external IP:
On Minikube:
```
```bash
argocd login $(minikube service argocd-server -n argocd --url | cut -d'/' -f 3) --name minikube
```
Other clusters:
```
```bash
kubectl get svc -n argocd argocd-server
argocd login <EXTERNAL-IP>
```
After logging in, change the password using the command:
```
```bash
argocd account update-password
argocd relogin
```
@@ -75,13 +83,13 @@ argocd relogin
We will now register a cluster to deploy applications to. First list all clusters contexts in your
kubconfig:
```
```bash
argocd cluster add
```
Choose a context name from the list and supply it to `argocd cluster add CONTEXTNAME`. For example,
for minikube context, run:
```
```bash
argocd cluster add minikube --in-cluster
```
@@ -101,7 +109,7 @@ flag should be omitted.
Open a browser to the ArgoCD external UI, and login using the credentials set in step 4.
On Minikube:
```
```bash
minikube service argocd-server -n argocd
```
@@ -122,7 +130,7 @@ After connecting a git repository, select the guestbook application for creation
Applications can be also be created using the ArgoCD CLI:
```
```bash
argocd app create guestbook-default --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --env default
```
@@ -134,7 +142,7 @@ From UI:
![create app](assets/guestbook-app.png)
From CLI:
```
```bash
$ argocd app get guestbook-default
Name: guestbook-default
Server: https://kubernetes.default.svc
@@ -153,7 +161,7 @@ Deployment guestbook-ui OutOfSync
The application status is initially in an `OutOfSync` state, since the application has yet to be
deployed, and no Kubernetes resources have been created. To sync (deploy) the application, run:
```
```bash
$ argocd app sync guestbook-default
Application: guestbook-default
Operation: Sync

View File

@@ -0,0 +1,45 @@
# ArgoCD Release Instructions
1. Tag, build, and push argo-cd-ui
```bash
cd argo-cd-ui
git tag vX.Y.Z
git push upstream vX.Y.Z
IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z DOCKER_PUSH=true yarn docker
```
2. Create release-X.Y branch (if creating initial X.Y release)
```bash
git checkout -b release-X.Y
git push upstream release-X.Y
```
3. Update manifests with new version
```bash
vi manifests/base/kustomization.yaml # update with new image tags
make manifests
git commit -a -m "Update manifests to vX.Y.Z"
git push upstream master
```
4. Tag, build, and push release to docker hub
```bash
git tag vX.Y.Z
make release IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z DOCKER_PUSH=true
git push upstream vX.Y.Z
```
5. Update argocd brew formula
```bash
git clone https://github.com/argoproj/homebrew-tap
cd homebrew-tap
shasum -a 256 ~/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64
# edit argocd.rb with version and checksum
git commit -a -m "Update argocd to vX.Y.Z"
git push
```
6. Update documentation:
* Edit CHANGELOG.md with release notes
* Update getting_started.md with new version
* Create GitHub release from new tag and upload binaries (e.g. dist/argocd-darwin-amd64)

View File

@@ -64,8 +64,8 @@ for i in ${PROTO_FILES}; do
# Path to the google API gateway annotations.proto will be different depending if we are
# building natively (e.g. from workspace) vs. part of a docker build.
if [ -f /.dockerenv ]; then
GOOGLE_PROTO_API_PATH=/root/go/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
GOGO_PROTOBUF_PATH=/root/go/src/github.com/gogo/protobuf
GOOGLE_PROTO_API_PATH=$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
GOGO_PROTOBUF_PATH=$GOPATH/src/github.com/gogo/protobuf
else
GOOGLE_PROTO_API_PATH=${PROJECT_ROOT}/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
GOGO_PROTOBUF_PATH=${PROJECT_ROOT}/vendor/github.com/gogo/protobuf

View File

@@ -1,11 +1,22 @@
#!/bin/sh
IMAGE_NAMESPACE=${IMAGE_NAMESPACE:='argoproj'}
IMAGE_TAG=${IMAGE_TAG:='latest'}
SRCROOT="$( cd "$(dirname "$0")/.." ; pwd -P )"
AUTOGENMSG="# This is an auto-generated file. DO NOT EDIT"
for i in "$(ls manifests/components/*.yaml)"; do
sed -i '' 's@\( image: \(.*\)/\(argocd-.*\):.*\)@ image: '"${IMAGE_NAMESPACE}"'/\3:'"${IMAGE_TAG}"'@g' $i
done
update_image () {
if [ ! -z "${IMAGE_NAMESPACE}" ]; then
sed -i '' 's| image: \(.*\)/\(argocd-.*\)| image: '"${IMAGE_NAMESPACE}"'/\2|g' ${1}
fi
if [ ! -z "${IMAGE_TAG}" ]; then
sed -i '' 's|\( image: .*/argocd-.*\)\:.*|\1:'"${IMAGE_TAG}"'|g' ${1}
fi
}
echo "${AUTOGENMSG}" > ${SRCROOT}/manifests/install.yaml
kustomize build ${SRCROOT}/manifests/cluster-install >> ${SRCROOT}/manifests/install.yaml
update_image ${SRCROOT}/manifests/install.yaml
echo "${AUTOGENMSG}" > ${SRCROOT}/manifests/namespace-install.yaml
kustomize build ${SRCROOT}/manifests/base >> ${SRCROOT}/manifests/namespace-install.yaml
update_image ${SRCROOT}/manifests/namespace-install.yaml
echo "# This is an auto-generated file. DO NOT EDIT" > manifests/install.yaml
cat manifests/components/*.yaml >> manifests/install.yaml

16
manifests/README.md Normal file
View File

@@ -0,0 +1,16 @@
# ArgoCD Installation Manifests
Two sets of installation manifests are provided:
* [install.yaml](install.yaml) - Standard ArgoCD installation with cluster-admin access. Use this
manifest set if you plan to use ArgoCD to deploy applications in the same cluster that ArgoCD runs
in (i.e. kubernetes.svc.default). Will still be able to deploy to external clusters with inputted
credentials.
* [namespace-install.yaml](namespace-install.yaml) - Installation of ArgoCD which requires only
namespace level privileges (does not need cluster roles). Use this manifest set if you do not
need ArgoCD to deploy applications in the same cluster that ArgoCD runs in, and will rely solely
on inputted cluster credentials. An example of using this set of manifests is if you run several
ArgoCD instances for different teams, where each instance will bedeploying applications to
external clusters. Will still be possible to deploy to the same cluster (kubernetes.svc.default)
with inputted credentials (i.e. `argocd cluster add <CONTEXT> --in-cluster`).

View File

@@ -1,4 +1,3 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -14,6 +13,6 @@ spec:
spec:
containers:
- command: [/argocd-application-controller, --repo-server, 'argocd-repo-server:8081']
image: argoproj/argocd-application-controller:v0.8.0
image: argoproj/argocd-application-controller:latest
name: application-controller
serviceAccountName: application-controller

View File

@@ -1,4 +1,3 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:

View File

@@ -1,4 +1,3 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:

View File

@@ -1,4 +1,3 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -1,4 +1,3 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@@ -1,4 +1,3 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
# data:
# # ArgoCD's externally facing base URL. Required for configuring SSO
# # url: https://argo-cd-demo.argoproj.io
#
# # A dex connector configuration. See documentation on how to configure SSO:
# # https://github.com/argoproj/argo-cd/blob/master/docs/sso.md#2-configure-argocd-for-sso
# dex.config: |
# connectors:
# # GitHub example
# - type: github
# id: github
# name: GitHub
# config:
# clientID: aabbccddeeff00112233
# clientSecret: $dex.github.clientSecret
# orgs:
# - name: your-github-org
# teams:
# - red-team

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: argocd-metrics
spec:
ports:
- name: http
protocol: TCP
port: 8082
targetPort: 8082
selector:
app: argocd-server

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
# data:
# # An RBAC policy .csv file containing additional policy and role definitions.
# # See https://github.com/argoproj/argo-cd/blob/master/docs/rbac.md on how to write RBAC policies.
# policy.csv: |
# # Give all members of "my-org:team-alpha" the ability to sync apps in "my-project"
# p, my-org:team-alpha, applications, sync, my-project/*, allow
# # Make all members of "my-org:team-beta" admins
# g, my-org:team-beta, role:admin
#
# # The default role ArgoCD will fall back to, when authorizing API requests
# policy.default: role:readonly

View File

@@ -1,4 +1,3 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -12,9 +11,15 @@ spec:
labels:
app: argocd-repo-server
spec:
automountServiceAccountToken: false
containers:
- name: argocd-repo-server
image: argoproj/argocd-repo-server:v0.8.0
image: argoproj/argocd-repo-server:latest
command: [/argocd-repo-server]
ports:
- containerPort: 8081
- containerPort: 8081
readinessProbe:
tcpSocket:
port: 8081
initialDelaySeconds: 5
periodSeconds: 10

View File

@@ -1,4 +1,3 @@
---
apiVersion: v1
kind: Service
metadata:

View File

@@ -0,0 +1,26 @@
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
type: Opaque
# data:
# # TLS certificate and private key for API server.
# # Autogenerated with a self-signed ceritificate if keys are missing.
# tls.crt:
# tls.key:
#
# # bcrypt hash of the admin password and it's last modified time. Autogenerated on initial
# # startup. To reset a forgotten password, delete both keys and restart argocd-server.
# admin.password:
# admin.passwordMtime:
#
# # random server signature key for session validation. Autogenerated on initial startup
# server.secretkey:
#
# # The following keys hold the shared secret for authenticating GitHub/GitLab/BitBucket webhook
# # events. To enable webhooks, configure one or more of the following keys with the shared git
# # provider webhook secret. The payload URL configured in the git provider should use the
# # /api/webhook endpoint of your ArgoCD instance (e.g. https://argocd.example.com/api/webhook)
# github.webhook.secret:
# gitlab.webhook.secret:
# bitbucket.webhook.uuid:

View File

@@ -1,4 +1,3 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -14,21 +13,15 @@ spec:
spec:
serviceAccountName: argocd-server
initContainers:
- name: copyutil
image: argoproj/argocd-server:v0.8.0
command: [cp, /argocd-util, /shared]
volumeMounts:
- mountPath: /shared
name: static-files
- name: ui
image: argoproj/argocd-ui:v0.8.0
image: argoproj/argocd-ui:latest
command: [cp, -r, /app, /shared]
volumeMounts:
- mountPath: /shared
name: static-files
containers:
- name: argocd-server
image: argoproj/argocd-server:v0.8.0
image: argoproj/argocd-server:latest
command: [/argocd-server, --staticassets, /shared/app, --repo-server, 'argocd-repo-server:8081']
volumeMounts:
- mountPath: /shared
@@ -39,12 +32,6 @@ spec:
port: 8080
initialDelaySeconds: 3
periodSeconds: 30
- name: dex
image: quay.io/coreos/dex:v2.10.0
command: [/shared/argocd-util, rundex]
volumeMounts:
- mountPath: /shared
name: static-files
volumes:
- emptyDir: {}
name: static-files

View File

@@ -1,4 +1,3 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:

View File

@@ -1,4 +1,3 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:

View File

@@ -1,4 +1,3 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -1,4 +1,3 @@
---
apiVersion: v1
kind: Service
metadata:

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dex-server
spec:
selector:
matchLabels:
app: dex-server
template:
metadata:
labels:
app: dex-server
spec:
serviceAccountName: dex-server
initContainers:
- name: copyutil
image: argoproj/argocd-server:latest
command: [cp, /argocd-util, /shared]
volumeMounts:
- mountPath: /shared
name: static-files
containers:
- name: dex
image: quay.io/dexidp/dex:v2.11.0
command: [/shared/argocd-util, rundex]
ports:
- containerPort: 5556
- containerPort: 5557
volumeMounts:
- mountPath: /shared
name: static-files
volumes:
- emptyDir: {}
name: static-files

View File

@@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dex-server-role
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dex-server-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dex-server-role
subjects:
- kind: ServiceAccount
name: dex-server

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: dex-server

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: dex-server
spec:
ports:
- name: http
protocol: TCP
port: 5556
targetPort: 5556
- name: grpc
protocol: TCP
port: 5557
targetPort: 5557
selector:
app: dex-server

View File

@@ -0,0 +1,31 @@
resources:
- application-crd.yaml
- appproject-crd.yaml
- argocd-cm.yaml
- argocd-secret.yaml
- argocd-rbac-cm.yaml
- application-controller-sa.yaml
- application-controller-role.yaml
- application-controller-rolebinding.yaml
- application-controller-deployment.yaml
- argocd-server-sa.yaml
- argocd-server-role.yaml
- argocd-server-rolebinding.yaml
- argocd-server-deployment.yaml
- argocd-server-service.yaml
- argocd-metrics-service.yaml
- argocd-repo-server-deployment.yaml
- argocd-repo-server-service.yaml
- dex-server-sa.yaml
- dex-server-role.yaml
- dex-server-rolebinding.yaml
- dex-server-deployment.yaml
- dex-server-service.yaml
imageTags:
- name: argoproj/argocd-server
newTag: v0.9.2
- name: argoproj/argocd-repo-server
newTag: v0.9.2
- name: argoproj/application-controller
newTag: v0.9.2

View File

@@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-controller-clusterrole
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: application-controller-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-controller-clusterrole
subjects:
- kind: ServiceAccount
name: application-controller
namespace: argocd

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argocd-server-clusterrole
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- delete

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argocd-server-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server-clusterrole
subjects:
- kind: ServiceAccount
name: argocd-server
namespace: argocd

View File

@@ -0,0 +1,8 @@
bases:
- ../base
resources:
- application-controller-clusterrole.yaml
- application-controller-clusterrolebinding.yaml
- argocd-server-clusterrole.yaml
- argocd-server-clusterrolebinding.yaml

View File

@@ -1,25 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
#data:
# ArgoCD's externally facing URL
# url: https://argo-cd-demo.argoproj.io
# A dex connector configuration.
# Visit https://github.com/argoproj/argo-cd/blob/master/docs/sso.md#2-configure-argocd-for-sso
# for instructions on configuring SSO.
# dex.config: |
# connectors:
# # GitHub example
# - type: github
# id: github
# name: GitHub
# config:
# clientID: aabbccddeeff00112233
# clientSecret: $dex.github.clientSecret
# orgs:
# - name: your-github-org
# teams:
# - red-team

View File

@@ -1,26 +0,0 @@
---
# NOTE: some values in this secret will be populated by the initial startup of the API server
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
type: Opaque
#data:
# TLS certificate and private key for API server
# server.crt:
# server.key:
# The following keys hold the shared secret for authenticating GitHub/GitLab/BitBucket webhook
# events. To enable webhooks, configure one or more of the following keys with the shared git
# provider webhook secret. The payload URL configured in the git provider should use the
# /api/webhook endpoint of your ArgoCD instance (e.g. https://argocd.example.com/api/webhook)
# github.webhook.secret:
# gitlab.webhook.secret:
# bitbucket.webhook.uuid:
# bcrypt hash of the admin password (autogenerated on initial startup).
# To reset a forgotten password, delete this key and restart the argocd-server
# admin.password:
# random server signature key for session validation (autogenerated on initial startup)
# server.secretkey:

View File

@@ -1,16 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
#data:
# An RBAC policy .csv file containing additional policy and role definitions.
# See https://github.com/argoproj/argo-cd/blob/master/docs/rbac.md on how to write RBAC policies.
# policy.csv: |
# # Give all members of "my-org:team-alpha" the ability to sync apps in "my-project"
# p, my-org:team-alpha, applications, sync, my-project/*, allow
# # Make all members of "my-org:team-beta" admins
# g, my-org:team-beta, role:admin
# The default role ArgoCD will fall back to, when authorizing API requests
# policy.default: role:readonly

View File

@@ -1,5 +1,4 @@
# This is an auto-generated file. DO NOT EDIT
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
@@ -30,76 +29,19 @@ spec:
version: v1alpha1
---
apiVersion: v1
kind: ConfigMap
kind: ServiceAccount
metadata:
name: argocd-cm
#data:
# ArgoCD's externally facing URL
# url: https://argo-cd-demo.argoproj.io
# A dex connector configuration.
# Visit https://github.com/argoproj/argo-cd/blob/master/docs/sso.md#2-configure-argocd-for-sso
# for instructions on configuring SSO.
# dex.config: |
# connectors:
# # GitHub example
# - type: github
# id: github
# name: GitHub
# config:
# clientID: aabbccddeeff00112233
# clientSecret: $dex.github.clientSecret
# orgs:
# - name: your-github-org
# teams:
# - red-team
---
# NOTE: some values in this secret will be populated by the initial startup of the API server
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
type: Opaque
#data:
# TLS certificate and private key for API server
# server.crt:
# server.key:
# The following keys hold the shared secret for authenticating GitHub/GitLab/BitBucket webhook
# events. To enable webhooks, configure one or more of the following keys with the shared git
# provider webhook secret. The payload URL configured in the git provider should use the
# /api/webhook endpoint of your ArgoCD instance (e.g. https://argocd.example.com/api/webhook)
# github.webhook.secret:
# gitlab.webhook.secret:
# bitbucket.webhook.uuid:
# bcrypt hash of the admin password (autogenerated on initial startup).
# To reset a forgotten password, delete this key and restart the argocd-server
# admin.password:
# random server signature key for session validation (autogenerated on initial startup)
# server.secretkey:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
#data:
# An RBAC policy .csv file containing additional policy and role definitions.
# See https://github.com/argoproj/argo-cd/blob/master/docs/rbac.md on how to write RBAC policies.
# policy.csv: |
# # Give all members of "my-org:team-alpha" the ability to sync apps in "my-project"
# p, my-org:team-alpha, applications, sync, my-project/*, allow
# # Make all members of "my-org:team-beta" admins
# g, my-org:team-beta, role:admin
# The default role ArgoCD will fall back to, when authorizing API requests
# policy.default: role:readonly
name: application-controller
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: application-controller
name: argocd-server
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dex-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
@@ -136,43 +78,6 @@ rules:
verbs:
- create
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-controller-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: application-controller-role
subjects:
- kind: ServiceAccount
name: application-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: application-controller
spec:
selector:
matchLabels:
app: application-controller
template:
metadata:
labels:
app: application-controller
spec:
containers:
- command: [/argocd-application-controller, --repo-server, 'argocd-repo-server:8081']
image: argoproj/argocd-application-controller:v0.8.0
name: application-controller
serviceAccountName: application-controller
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argocd-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
@@ -214,6 +119,61 @@ rules:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dex-server-role
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-controller-clusterrole
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argocd-server-clusterrole
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-controller-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: application-controller-role
subjects:
- kind: ServiceAccount
name: application-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argocd-server-role-binding
@@ -225,55 +185,83 @@ subjects:
- kind: ServiceAccount
name: argocd-server
---
apiVersion: apps/v1
kind: Deployment
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dex-server-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dex-server-role
subjects:
- kind: ServiceAccount
name: dex-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: application-controller-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-controller-clusterrole
subjects:
- kind: ServiceAccount
name: application-controller
namespace: argocd
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argocd-server-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server-clusterrole
subjects:
- kind: ServiceAccount
name: argocd-server
namespace: argocd
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
---
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: argocd-metrics
spec:
ports:
- name: http
port: 8082
protocol: TCP
targetPort: 8082
selector:
matchLabels:
app: argocd-server
template:
metadata:
labels:
app: argocd-server
spec:
serviceAccountName: argocd-server
initContainers:
- name: copyutil
image: argoproj/argocd-server:v0.8.0
command: [cp, /argocd-util, /shared]
volumeMounts:
- mountPath: /shared
name: static-files
- name: ui
image: argoproj/argocd-ui:v0.8.0
command: [cp, -r, /app, /shared]
volumeMounts:
- mountPath: /shared
name: static-files
containers:
- name: argocd-server
image: argoproj/argocd-server:v0.8.0
command: [/argocd-server, --staticassets, /shared/app, --repo-server, 'argocd-repo-server:8081']
volumeMounts:
- mountPath: /shared
name: static-files
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 3
periodSeconds: 30
- name: dex
image: quay.io/coreos/dex:v2.10.0
command: [/shared/argocd-util, rundex]
volumeMounts:
- mountPath: /shared
name: static-files
volumes:
- emptyDir: {}
name: static-files
app: argocd-server
---
apiVersion: v1
kind: Service
metadata:
name: argocd-repo-server
spec:
ports:
- port: 8081
targetPort: 8081
selector:
app: argocd-repo-server
---
apiVersion: v1
kind: Service
@@ -282,16 +270,55 @@ metadata:
spec:
ports:
- name: http
protocol: TCP
port: 80
protocol: TCP
targetPort: 8080
- name: https
protocol: TCP
port: 443
protocol: TCP
targetPort: 8080
selector:
app: argocd-server
---
apiVersion: v1
kind: Service
metadata:
name: dex-server
spec:
ports:
- name: http
port: 5556
protocol: TCP
targetPort: 5556
- name: grpc
port: 5557
protocol: TCP
targetPort: 5557
selector:
app: dex-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: application-controller
spec:
selector:
matchLabels:
app: application-controller
template:
metadata:
labels:
app: application-controller
spec:
containers:
- command:
- /argocd-application-controller
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-application-controller:v0.9.2
name: application-controller
serviceAccountName: application-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -305,20 +332,103 @@ spec:
labels:
app: argocd-repo-server
spec:
automountServiceAccountToken: false
containers:
- name: argocd-repo-server
image: argoproj/argocd-repo-server:v0.8.0
command: [/argocd-repo-server]
- command:
- /argocd-repo-server
image: argoproj/argocd-repo-server:v0.9.2
name: argocd-repo-server
ports:
- containerPort: 8081
- containerPort: 8081
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 10
tcpSocket:
port: 8081
---
apiVersion: v1
kind: Service
apiVersion: apps/v1
kind: Deployment
metadata:
name: argocd-repo-server
name: argocd-server
spec:
ports:
- port: 8081
targetPort: 8081
selector:
app: argocd-repo-server
matchLabels:
app: argocd-server
template:
metadata:
labels:
app: argocd-server
spec:
containers:
- command:
- /argocd-server
- --staticassets
- /shared/app
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-server:v0.9.2
name: argocd-server
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 3
periodSeconds: 30
volumeMounts:
- mountPath: /shared
name: static-files
initContainers:
- command:
- cp
- -r
- /app
- /shared
image: argoproj/argocd-ui:v0.9.2
name: ui
volumeMounts:
- mountPath: /shared
name: static-files
serviceAccountName: argocd-server
volumes:
- emptyDir: {}
name: static-files
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dex-server
spec:
selector:
matchLabels:
app: dex-server
template:
metadata:
labels:
app: dex-server
spec:
containers:
- command:
- /shared/argocd-util
- rundex
image: quay.io/dexidp/dex:v2.11.0
name: dex
ports:
- containerPort: 5556
- containerPort: 5557
volumeMounts:
- mountPath: /shared
name: static-files
initContainers:
- command:
- cp
- /argocd-util
- /shared
image: argoproj/argocd-server:v0.9.2
name: copyutil
volumeMounts:
- mountPath: /shared
name: static-files
serviceAccountName: dex-server
volumes:
- emptyDir: {}
name: static-files

View File

@@ -0,0 +1,380 @@
# This is an auto-generated file. DO NOT EDIT
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: applications.argoproj.io
spec:
group: argoproj.io
names:
kind: Application
plural: applications
shortNames:
- app
scope: Namespaced
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: appprojects.argoproj.io
spec:
group: argoproj.io
names:
kind: AppProject
plural: appprojects
shortNames:
- appproj
- appprojs
scope: Namespaced
version: v1alpha1
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: application-controller
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argocd-server
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dex-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: application-controller-role
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- watch
- list
- patch
- update
- apiGroups:
- argoproj.io
resources:
- applications
- appprojects
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argocd-server-role
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- argoproj.io
resources:
- applications
- appprojects
verbs:
- create
- get
- list
- watch
- update
- delete
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dex-server-role
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-controller-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: application-controller-role
subjects:
- kind: ServiceAccount
name: application-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argocd-server-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argocd-server-role
subjects:
- kind: ServiceAccount
name: argocd-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dex-server-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dex-server-role
subjects:
- kind: ServiceAccount
name: dex-server
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
---
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: argocd-metrics
spec:
ports:
- name: http
port: 8082
protocol: TCP
targetPort: 8082
selector:
app: argocd-server
---
apiVersion: v1
kind: Service
metadata:
name: argocd-repo-server
spec:
ports:
- port: 8081
targetPort: 8081
selector:
app: argocd-repo-server
---
apiVersion: v1
kind: Service
metadata:
name: argocd-server
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
- name: https
port: 443
protocol: TCP
targetPort: 8080
selector:
app: argocd-server
---
apiVersion: v1
kind: Service
metadata:
name: dex-server
spec:
ports:
- name: http
port: 5556
protocol: TCP
targetPort: 5556
- name: grpc
port: 5557
protocol: TCP
targetPort: 5557
selector:
app: dex-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: application-controller
spec:
selector:
matchLabels:
app: application-controller
template:
metadata:
labels:
app: application-controller
spec:
containers:
- command:
- /argocd-application-controller
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-application-controller:v0.9.2
name: application-controller
serviceAccountName: application-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: argocd-repo-server
spec:
selector:
matchLabels:
app: argocd-repo-server
template:
metadata:
labels:
app: argocd-repo-server
spec:
automountServiceAccountToken: false
containers:
- command:
- /argocd-repo-server
image: argoproj/argocd-repo-server:v0.9.2
name: argocd-repo-server
ports:
- containerPort: 8081
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 10
tcpSocket:
port: 8081
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: argocd-server
spec:
selector:
matchLabels:
app: argocd-server
template:
metadata:
labels:
app: argocd-server
spec:
containers:
- command:
- /argocd-server
- --staticassets
- /shared/app
- --repo-server
- argocd-repo-server:8081
image: argoproj/argocd-server:v0.9.2
name: argocd-server
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 3
periodSeconds: 30
volumeMounts:
- mountPath: /shared
name: static-files
initContainers:
- command:
- cp
- -r
- /app
- /shared
image: argoproj/argocd-ui:v0.9.2
name: ui
volumeMounts:
- mountPath: /shared
name: static-files
serviceAccountName: argocd-server
volumes:
- emptyDir: {}
name: static-files
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dex-server
spec:
selector:
matchLabels:
app: dex-server
template:
metadata:
labels:
app: dex-server
spec:
containers:
- command:
- /shared/argocd-util
- rundex
image: quay.io/dexidp/dex:v2.11.0
name: dex
ports:
- containerPort: 5556
- containerPort: 5557
volumeMounts:
- mountPath: /shared
name: static-files
initContainers:
- command:
- cp
- /argocd-util
- /shared
image: argoproj/argocd-server:v0.9.2
name: copyutil
volumeMounts:
- mountPath: /shared
name: static-files
serviceAccountName: dex-server
volumes:
- emptyDir: {}
name: static-files

File diff suppressed because it is too large Load Diff

View File

@@ -13,6 +13,15 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1alpha1";
// AWSAuthConfig is an AWS IAM authentication configuration
message AWSAuthConfig {
// ClusterName contains AWS cluster name
optional string clusterName = 1;
// RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.
optional string roleARN = 2;
}
// AppProject is a definition of AppProject resource.
// +genclient
// +genclient:noStatus
@@ -43,6 +52,12 @@ message AppProjectSpec {
optional string description = 3;
repeated ProjectRole roles = 4;
// ClusterResourceWhitelist contains list of whitelisted cluster level resources
repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind clusterResourceWhitelist = 5;
// NamespaceResourceBlacklist contains list of blacklisted namespace level resources
repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind namespaceResourceBlacklist = 6;
}
// Application is a definition of Application resource.
@@ -117,6 +132,9 @@ message ApplicationSpec {
// Project is a application project name. Empty name means that application belongs to 'default' project.
optional string project = 3;
// SyncPolicy controls when a sync will be performed
optional SyncPolicy syncPolicy = 4;
}
// ApplicationStatus contains information about application status in target environment.
@@ -176,6 +194,9 @@ message ClusterConfig {
// TLSClientConfig contains settings to enable transport layer security
optional TLSClientConfig tlsClientConfig = 4;
// AWSAuthConfig contains IAM authentication configuration
optional AWSAuthConfig awsAuthConfig = 5;
}
// ClusterList is a collection of Clusters.
@@ -194,6 +215,8 @@ message ComparisonResult {
optional string status = 5;
repeated ResourceState resources = 6;
optional string revision = 7;
}
// ComponentParameter contains information about component parameter value
@@ -292,6 +315,15 @@ message OperationState {
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 7;
}
// ParameterOverrides masks the value so protobuf can generate
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
message ParameterOverrides {
// items, if empty, will result in an empty slice
repeated ComponentParameter items = 1;
}
// ProjectRole represents a role that has access to a project
message ProjectRole {
optional string name = 1;
@@ -366,7 +398,8 @@ message RollbackOperation {
// SyncOperation contains sync operation details.
message SyncOperation {
// Revision is the git revision in which to sync the application to
// Revision is the git revision in which to sync the application to.
// If omitted, will use the revision specified in app spec.
optional string revision = 1;
// Prune deletes resources that are no longer tracked in git
@@ -377,6 +410,11 @@ message SyncOperation {
// SyncStrategy describes how to perform the sync
optional SyncStrategy syncStrategy = 4;
// ParameterOverrides applies any parameter overrides as part of the sync
// If nil, uses the parameter override set in application.
// If empty, sets no parameter overrides
optional ParameterOverrides parameterOverrides = 5;
}
// SyncOperationResult represent result of sync operation
@@ -391,12 +429,24 @@ message SyncOperationResult {
repeated HookStatus hooks = 3;
}
// SyncStrategy indicates the
// SyncPolicy controls when a sync will be performed in response to updates in git
message SyncPolicy {
// Automated will keep an application synced to the target revision
optional SyncPolicyAutomated automated = 1;
}
// SyncPolicyAutomated controls the behavior of an automated sync
message SyncPolicyAutomated {
// Prune will prune resources automatically as part of automated sync (default: false)
optional bool prune = 1;
}
// SyncStrategy controls the manner in which a sync is performed
message SyncStrategy {
// Apply wil perform a `kubectl apply` to perform the sync. This is the default strategy
// Apply wil perform a `kubectl apply` to perform the sync.
optional SyncStrategyApply apply = 1;
// Hook will submit any referenced resources to perform the sync
// Hook will submit any referenced resources to perform the sync. This is the default strategy
optional SyncStrategyHook hook = 2;
}

View File

@@ -2,6 +2,8 @@ package v1alpha1
import (
"encoding/json"
fmt "fmt"
"path/filepath"
"reflect"
"strings"
@@ -9,6 +11,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd/api"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/util/git"
@@ -16,7 +19,8 @@ import (
// SyncOperation contains sync operation details.
type SyncOperation struct {
// Revision is the git revision in which to sync the application to
// Revision is the git revision in which to sync the application to.
// If omitted, will use the revision specified in app spec.
Revision string `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
// Prune deletes resources that are no longer tracked in git
Prune bool `json:"prune,omitempty" protobuf:"bytes,2,opt,name=prune"`
@@ -24,6 +28,19 @@ type SyncOperation struct {
DryRun bool `json:"dryRun,omitempty" protobuf:"bytes,3,opt,name=dryRun"`
// SyncStrategy describes how to perform the sync
SyncStrategy *SyncStrategy `json:"syncStrategy,omitempty" protobuf:"bytes,4,opt,name=syncStrategy"`
// ParameterOverrides applies any parameter overrides as part of the sync
// If nil, uses the parameter override set in application.
// If empty, sets no parameter overrides
ParameterOverrides ParameterOverrides `json:"parameterOverrides" protobuf:"bytes,5,opt,name=parameterOverrides"`
}
// ParameterOverrides masks the value so protobuf can generate
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
type ParameterOverrides []ComponentParameter
func (po ParameterOverrides) String() string {
return fmt.Sprintf("%v", []ComponentParameter(po))
}
type RollbackOperation struct {
@@ -78,11 +95,23 @@ type OperationState struct {
FinishedAt *metav1.Time `json:"finishedAt" protobuf:"bytes,7,opt,name=finishedAt"`
}
// SyncStrategy indicates the
// SyncPolicy controls when a sync will be performed in response to updates in git
type SyncPolicy struct {
// Automated will keep an application synced to the target revision
Automated *SyncPolicyAutomated `json:"automated,omitempty" protobuf:"bytes,1,opt,name=automated"`
}
// SyncPolicyAutomated controls the behavior of an automated sync
type SyncPolicyAutomated struct {
// Prune will prune resources automatically as part of automated sync (default: false)
Prune bool `json:"prune,omitempty" protobuf:"bytes,1,opt,name=prune"`
}
// SyncStrategy controls the manner in which a sync is performed
type SyncStrategy struct {
// Apply wil perform a `kubectl apply` to perform the sync. This is the default strategy
// Apply wil perform a `kubectl apply` to perform the sync.
Apply *SyncStrategyApply `json:"apply,omitempty" protobuf:"bytes,1,opt,name=apply"`
// Hook will submit any referenced resources to perform the sync
// Hook will submit any referenced resources to perform the sync. This is the default strategy
Hook *SyncStrategyHook `json:"hook,omitempty" protobuf:"bytes,2,opt,name=hook"`
}
@@ -218,6 +247,8 @@ type ApplicationSpec struct {
Destination ApplicationDestination `json:"destination" protobuf:"bytes,2,name=destination"`
// Project is a application project name. Empty name means that application belongs to 'default' project.
Project string `json:"project" protobuf:"bytes,3,name=project"`
// SyncPolicy controls when a sync will be performed
SyncPolicy *SyncPolicy `json:"syncPolicy,omitempty" protobuf:"bytes,4,name=syncPolicy"`
}
// ComponentParameter contains information about component parameter value
@@ -283,8 +314,10 @@ const (
ApplicationConditionDeletionError = "DeletionError"
// ApplicationConditionInvalidSpecError indicates that application source is invalid
ApplicationConditionInvalidSpecError = "InvalidSpecError"
// ApplicationComparisonError indicates controller failed to compare application state
// ApplicationConditionComparisonError indicates controller failed to compare application state
ApplicationConditionComparisonError = "ComparisonError"
// ApplicationConditionSyncError indicates controller failed to automatically sync the application
ApplicationConditionSyncError = "SyncError"
// ApplicationConditionUnknownError indicates an unknown controller error
ApplicationConditionUnknownError = "UnknownError"
// ApplicationConditionSharedResourceWarning indicates that controller detected resources which belongs to more than one application
@@ -305,6 +338,7 @@ type ComparisonResult struct {
ComparedTo ApplicationSource `json:"comparedTo" protobuf:"bytes,2,opt,name=comparedTo"`
Status ComparisonStatus `json:"status" protobuf:"bytes,5,opt,name=status,casttype=ComparisonStatus"`
Resources []ResourceState `json:"resources" protobuf:"bytes,6,opt,name=resources"`
Revision string `json:"revision" protobuf:"bytes,7,opt,name=revision"`
}
type HealthStatus struct {
@@ -374,6 +408,15 @@ type ClusterList struct {
Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// AWSAuthConfig is an AWS IAM authentication configuration
type AWSAuthConfig struct {
// ClusterName contains AWS cluster name
ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,1,opt,name=clusterName"`
// RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.
RoleARN string `json:"roleARN,omitempty" protobuf:"bytes,2,opt,name=roleARN"`
}
// ClusterConfig is the configuration attributes. This structure is subset of the go-client
// rest.Config with annotations added for marshalling.
type ClusterConfig struct {
@@ -388,6 +431,9 @@ type ClusterConfig struct {
// TLSClientConfig contains settings to enable transport layer security
TLSClientConfig `json:"tlsClientConfig" protobuf:"bytes,4,opt,name=tlsClientConfig"`
// AWSAuthConfig contains IAM authentication configuration
AWSAuthConfig *AWSAuthConfig `json:"awsAuthConfig" protobuf:"bytes,5,opt,name=awsAuthConfig"`
}
// TLSClientConfig contains settings to enable transport layer security
@@ -464,6 +510,12 @@ type AppProjectSpec struct {
Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"`
Roles []ProjectRole `json:"roles,omitempty" protobuf:"bytes,4,rep,name=roles"`
// ClusterResourceWhitelist contains list of whitelisted cluster level resources
ClusterResourceWhitelist []metav1.GroupKind `json:"clusterResourceWhitelist,omitempty" protobuf:"bytes,5,opt,name=clusterResourceWhitelist"`
// NamespaceResourceBlacklist contains list of blacklisted namespace level resources
NamespaceResourceBlacklist []metav1.GroupKind `json:"namespaceResourceBlacklist,omitempty" protobuf:"bytes,6,opt,name=namespaceResourceBlacklist"`
}
// ProjectRole represents a role that has access to a project
@@ -541,7 +593,28 @@ func (spec ApplicationSpec) GetProject() string {
return spec.Project
}
// IsSourcePermitted validiates if the provided application's source is a one of the allowed sources for the project.
func isResourceInList(res metav1.GroupKind, list []metav1.GroupKind) bool {
for _, item := range list {
ok, err := filepath.Match(item.Kind, res.Kind)
if ok && err == nil {
ok, err = filepath.Match(item.Group, res.Group)
if ok && err == nil {
return true
}
}
}
return false
}
func (proj AppProject) IsResourcePermitted(res metav1.GroupKind, namespaced bool) bool {
if namespaced {
return !isResourceInList(res, proj.Spec.NamespaceResourceBlacklist)
} else {
return isResourceInList(res, proj.Spec.ClusterResourceWhitelist)
}
}
// IsSourcePermitted validates if the provided application's source is a one of the allowed sources for the project.
func (proj AppProject) IsSourcePermitted(src ApplicationSource) bool {
normalizedURL := git.NormalizeGitURL(src.RepoURL)
@@ -558,7 +631,6 @@ func (proj AppProject) IsSourcePermitted(src ApplicationSource) bool {
// IsDestinationPermitted validiates if the provided application's destination is one of the allowed destinations for the project
func (proj AppProject) IsDestinationPermitted(dst ApplicationDestination) bool {
for _, item := range proj.Spec.Destinations {
if item.Server == dst.Server || item.Server == "*" {
if item.Namespace == dst.Namespace || item.Namespace == "*" {
@@ -571,18 +643,42 @@ func (proj AppProject) IsDestinationPermitted(dst ApplicationDestination) bool {
// RESTConfig returns a go-client REST config from cluster
func (c *Cluster) RESTConfig() *rest.Config {
if c.Server == common.KubernetesInternalAPIServerAddr && c.Config.Username == "" && c.Config.Password == "" && c.Config.BearerToken == "" {
config, err := rest.InClusterConfig()
if err != nil {
panic("Unable to create in-cluster config")
}
return config
}
tlsClientConfig := rest.TLSClientConfig{
Insecure: c.Config.TLSClientConfig.Insecure,
ServerName: c.Config.TLSClientConfig.ServerName,
CertData: c.Config.TLSClientConfig.CertData,
KeyData: c.Config.TLSClientConfig.KeyData,
CAData: c.Config.TLSClientConfig.CAData,
}
if c.Config.AWSAuthConfig != nil {
args := []string{"token", "-i", c.Config.AWSAuthConfig.ClusterName}
if c.Config.AWSAuthConfig.RoleARN != "" {
args = append(args, "-r", c.Config.AWSAuthConfig.RoleARN)
}
return &rest.Config{
Host: c.Server,
TLSClientConfig: tlsClientConfig,
ExecProvider: &api.ExecConfig{
APIVersion: "client.authentication.k8s.io/v1alpha1",
Command: "aws-iam-authenticator",
Args: args,
},
}
}
return &rest.Config{
Host: c.Server,
Username: c.Config.Username,
Password: c.Config.Password,
BearerToken: c.Config.BearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: c.Config.TLSClientConfig.Insecure,
ServerName: c.Config.TLSClientConfig.ServerName,
CertData: c.Config.TLSClientConfig.CertData,
KeyData: c.Config.TLSClientConfig.KeyData,
CAData: c.Config.TLSClientConfig.CAData,
},
Host: c.Server,
Username: c.Config.Username,
Password: c.Config.Password,
BearerToken: c.Config.BearerToken,
TLSClientConfig: tlsClientConfig,
}
}

View File

@@ -9,6 +9,22 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSAuthConfig) DeepCopyInto(out *AWSAuthConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSAuthConfig.
func (in *AWSAuthConfig) DeepCopy() *AWSAuthConfig {
if in == nil {
return nil
}
out := new(AWSAuthConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppProject) DeepCopyInto(out *AppProject) {
*out = *in
@@ -89,6 +105,16 @@ func (in *AppProjectSpec) DeepCopyInto(out *AppProjectSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ClusterResourceWhitelist != nil {
in, out := &in.ClusterResourceWhitelist, &out.ClusterResourceWhitelist
*out = make([]v1.GroupKind, len(*in))
copy(*out, *in)
}
if in.NamespaceResourceBlacklist != nil {
in, out := &in.NamespaceResourceBlacklist, &out.NamespaceResourceBlacklist
*out = make([]v1.GroupKind, len(*in))
copy(*out, *in)
}
return
}
@@ -235,6 +261,15 @@ func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
out.Destination = in.Destination
if in.SyncPolicy != nil {
in, out := &in.SyncPolicy, &out.SyncPolicy
if *in == nil {
*out = nil
} else {
*out = new(SyncPolicy)
(*in).DeepCopyInto(*out)
}
}
return
}
@@ -331,6 +366,15 @@ func (in *Cluster) DeepCopy() *Cluster {
func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig) {
*out = *in
in.TLSClientConfig.DeepCopyInto(&out.TLSClientConfig)
if in.AWSAuthConfig != nil {
in, out := &in.AWSAuthConfig, &out.AWSAuthConfig
if *in == nil {
*out = nil
} else {
*out = new(AWSAuthConfig)
**out = **in
}
}
return
}
@@ -746,6 +790,11 @@ func (in *SyncOperation) DeepCopyInto(out *SyncOperation) {
(*in).DeepCopyInto(*out)
}
}
if in.ParameterOverrides != nil {
in, out := &in.ParameterOverrides, &out.ParameterOverrides
*out = make(ParameterOverrides, len(*in))
copy(*out, *in)
}
return
}
@@ -799,6 +848,47 @@ func (in *SyncOperationResult) DeepCopy() *SyncOperationResult {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncPolicy) DeepCopyInto(out *SyncPolicy) {
*out = *in
if in.Automated != nil {
in, out := &in.Automated, &out.Automated
if *in == nil {
*out = nil
} else {
*out = new(SyncPolicyAutomated)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncPolicy.
func (in *SyncPolicy) DeepCopy() *SyncPolicy {
if in == nil {
return nil
}
out := new(SyncPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncPolicyAutomated) DeepCopyInto(out *SyncPolicyAutomated) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncPolicyAutomated.
func (in *SyncPolicyAutomated) DeepCopy() *SyncPolicyAutomated {
if in == nil {
return nil
}
out := new(SyncPolicyAutomated)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncStrategy) DeepCopyInto(out *SyncStrategy) {
*out = *in

View File

@@ -1,10 +1,13 @@
package reposerver
import (
"crypto/tls"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/util"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// Clientset represets repository server api clients
@@ -17,7 +20,7 @@ type clientSet struct {
}
func (c *clientSet) NewRepositoryClient() (util.Closer, repository.RepositoryServiceClient, error) {
conn, err := grpc.Dial(c.address, grpc.WithInsecure())
conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})))
if err != nil {
log.Errorf("Unable to connect to repository service with address %s", c.address)
return nil, nil, err

View File

@@ -11,11 +11,13 @@ import (
"strings"
"time"
"github.com/google/go-jsonnet"
"github.com/ksonnet/ksonnet/pkg/app"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
@@ -60,17 +62,7 @@ func NewService(gitFactory git.ClientFactory, cache cache.Cache) *Service {
// ListDir lists the contents of a GitHub repo
func (s *Service) ListDir(ctx context.Context, q *ListDirRequest) (*FileList, error) {
appRepoPath := tempRepoPath(q.Repo.Repo)
s.repoLock.Lock(appRepoPath)
defer s.repoLock.Unlock(appRepoPath)
gitClient := s.gitFactory.NewClient(q.Repo.Repo, appRepoPath, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey)
err := gitClient.Init()
if err != nil {
return nil, err
}
commitSHA, err := gitClient.LsRemote(q.Revision)
gitClient, commitSHA, err := s.newClientResolveRevision(q.Repo, q.Revision)
if err != nil {
return nil, err
}
@@ -82,7 +74,9 @@ func (s *Service) ListDir(ctx context.Context, q *ListDirRequest) (*FileList, er
return &res, nil
}
err = checkoutRevision(gitClient, q.Revision)
s.repoLock.Lock(gitClient.Root())
defer s.repoLock.Unlock(gitClient.Root())
commitSHA, err = checkoutRevision(gitClient, commitSHA)
if err != nil {
return nil, err
}
@@ -96,7 +90,7 @@ func (s *Service) ListDir(ctx context.Context, q *ListDirRequest) (*FileList, er
Items: lsFiles,
}
err = s.cache.Set(&cache.Item{
Key: cacheKey,
Key: listDirCacheKey(commitSHA, q),
Object: &res,
Expiration: DefaultRepoCacheExpiration,
})
@@ -107,16 +101,21 @@ func (s *Service) ListDir(ctx context.Context, q *ListDirRequest) (*FileList, er
}
func (s *Service) GetFile(ctx context.Context, q *GetFileRequest) (*GetFileResponse, error) {
appRepoPath := tempRepoPath(q.Repo.Repo)
s.repoLock.Lock(appRepoPath)
defer s.repoLock.Unlock(appRepoPath)
gitClient := s.gitFactory.NewClient(q.Repo.Repo, appRepoPath, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey)
err := gitClient.Init()
gitClient, commitSHA, err := s.newClientResolveRevision(q.Repo, q.Revision)
if err != nil {
return nil, err
}
err = checkoutRevision(gitClient, q.Revision)
cacheKey := getFileCacheKey(commitSHA, q)
var res GetFileResponse
err = s.cache.Get(cacheKey, &res)
if err == nil {
log.Infof("getfile cache hit: %s", cacheKey)
return &res, nil
}
s.repoLock.Lock(gitClient.Root())
defer s.repoLock.Unlock(gitClient.Root())
commitSHA, err = checkoutRevision(gitClient, commitSHA)
if err != nil {
return nil, err
}
@@ -124,36 +123,27 @@ func (s *Service) GetFile(ctx context.Context, q *GetFileRequest) (*GetFileRespo
if err != nil {
return nil, err
}
res := GetFileResponse{
res = GetFileResponse{
Data: data,
}
err = s.cache.Set(&cache.Item{
Key: getFileCacheKey(commitSHA, q),
Object: &res,
Expiration: DefaultRepoCacheExpiration,
})
if err != nil {
log.Warnf("getfile cache set error %s: %v", cacheKey, err)
}
return &res, nil
}
func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*ManifestResponse, error) {
var res ManifestResponse
if git.IsCommitSHA(q.Revision) {
cacheKey := manifestCacheKey(q.Revision, q)
err := s.cache.Get(cacheKey, res)
if err == nil {
log.Infof("manifest cache hit: %s", cacheKey)
return &res, nil
}
}
appRepoPath := tempRepoPath(q.Repo.Repo)
s.repoLock.Lock(appRepoPath)
defer s.repoLock.Unlock(appRepoPath)
gitClient := s.gitFactory.NewClient(q.Repo.Repo, appRepoPath, q.Repo.Username, q.Repo.Password, q.Repo.SSHPrivateKey)
err := gitClient.Init()
if err != nil {
return nil, err
}
commitSHA, err := gitClient.LsRemote(q.Revision)
gitClient, commitSHA, err := s.newClientResolveRevision(q.Repo, q.Revision)
if err != nil {
return nil, err
}
cacheKey := manifestCacheKey(commitSHA, q)
var res ManifestResponse
err = s.cache.Get(cacheKey, &res)
if err == nil {
log.Infof("manifest cache hit: %s", cacheKey)
@@ -165,11 +155,13 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
log.Infof("manifest cache miss: %s", cacheKey)
}
err = checkoutRevision(gitClient, q.Revision)
s.repoLock.Lock(gitClient.Root())
defer s.repoLock.Unlock(gitClient.Root())
commitSHA, err = checkoutRevision(gitClient, commitSHA)
if err != nil {
return nil, err
}
appPath := path.Join(appRepoPath, q.Path)
appPath := path.Join(gitClient.Root(), q.Path)
genRes, err := generateManifests(appPath, q)
if err != nil {
@@ -178,7 +170,7 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
res = *genRes
res.Revision = commitSHA
err = s.cache.Set(&cache.Item{
Key: cacheKey,
Key: manifestCacheKey(commitSHA, q),
Object: res,
Expiration: DefaultRepoCacheExpiration,
})
@@ -192,7 +184,7 @@ func (s *Service) GenerateManifest(c context.Context, q *ManifestRequest) (*Mani
func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, error) {
var targetObjs []*unstructured.Unstructured
var params []*v1alpha1.ComponentParameter
var env *app.EnvironmentConfig
var env *app.EnvironmentSpec
var err error
appSourceType := IdentifyAppSourceTypeByAppDir(appPath)
@@ -201,7 +193,11 @@ func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
targetObjs, params, env, err = ksShow(appPath, q.Environment, q.ComponentParameterOverrides)
case AppSourceHelm:
h := helm.NewHelmApp(appPath)
targetObjs, err = h.Template(q.AppLabel, q.ValueFiles, q.ComponentParameterOverrides)
err = h.DependencyBuild()
if err != nil {
return nil, err
}
targetObjs, err = h.Template(q.AppLabel, q.Namespace, q.ValueFiles, q.ComponentParameterOverrides)
if err != nil {
return nil, err
}
@@ -218,21 +214,40 @@ func generateManifests(appPath string, q *ManifestRequest) (*ManifestResponse, e
if err != nil {
return nil, err
}
// TODO(jessesuen): we need to sort objects based on their dependency order of creation
manifests := make([]string, len(targetObjs))
for i, target := range targetObjs {
if q.AppLabel != "" {
err = kube.SetLabel(target, common.LabelApplicationName, q.AppLabel)
manifests := make([]string, 0)
for _, obj := range targetObjs {
var targets []*unstructured.Unstructured
if obj.IsList() {
err = obj.EachListItem(func(object runtime.Object) error {
unstructuredObj, ok := object.(*unstructured.Unstructured)
if ok {
targets = append(targets, unstructuredObj)
return nil
} else {
return fmt.Errorf("resource list item has unexpected type")
}
})
if err != nil {
return nil, err
}
} else {
targets = []*unstructured.Unstructured{obj}
}
manifestStr, err := json.Marshal(target.Object)
if err != nil {
return nil, err
for _, target := range targets {
if q.AppLabel != "" && !kube.IsCRD(target) {
err = kube.SetLabel(target, common.LabelApplicationName, q.AppLabel)
if err != nil {
return nil, err
}
}
manifestStr, err := json.Marshal(target.Object)
if err != nil {
return nil, err
}
manifests = append(manifests, string(manifestStr))
}
manifests[i] = string(manifestStr)
}
res := ManifestResponse{
@@ -280,34 +295,39 @@ func IdentifyAppSourceTypeByAppPath(appFilePath string) AppSourceType {
}
// checkoutRevision is a convenience function to initialize a repo, fetch, and checkout a revision
func checkoutRevision(gitClient git.Client, revision string) error {
err := gitClient.Fetch()
// Returns the 40 character commit SHA after the checkout has been performed
func checkoutRevision(gitClient git.Client, commitSHA string) (string, error) {
err := gitClient.Init()
if err != nil {
return err
return "", status.Errorf(codes.Internal, "Failed to initialize git repo: %v", err)
}
err = gitClient.Reset()
err = gitClient.Fetch()
if err != nil {
log.Warn(err)
return "", status.Errorf(codes.Internal, "Failed to fetch git repo: %v", err)
}
err = gitClient.Checkout(revision)
err = gitClient.Checkout(commitSHA)
if err != nil {
return err
return "", status.Errorf(codes.Internal, "Failed to checkout %s: %v", commitSHA, err)
}
return nil
return gitClient.CommitSHA()
}
func manifestCacheKey(commitSHA string, q *ManifestRequest) string {
pStr, _ := json.Marshal(q.ComponentParameterOverrides)
valuesFiles := strings.Join(q.ValueFiles, ",")
return fmt.Sprintf("mfst|%s|%s|%s|%s|%s|%s", q.AppLabel, q.Path, q.Environment, commitSHA, string(pStr), valuesFiles)
return fmt.Sprintf("mfst|%s|%s|%s|%s|%s|%s|%s", q.AppLabel, q.Path, q.Environment, commitSHA, string(pStr), valuesFiles, q.Namespace)
}
func listDirCacheKey(commitSHA string, q *ListDirRequest) string {
return fmt.Sprintf("ldir|%s|%s", q.Path, commitSHA)
}
func getFileCacheKey(commitSHA string, q *GetFileRequest) string {
return fmt.Sprintf("gfile|%s|%s", q.Path, commitSHA)
}
// ksShow runs `ks show` in an app directory after setting any component parameter overrides
func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, []*v1alpha1.ComponentParameter, *app.EnvironmentConfig, error) {
func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) ([]*unstructured.Unstructured, []*v1alpha1.ComponentParameter, *app.EnvironmentSpec, error) {
ksApp, err := ksonnet.NewKsonnetApp(appPath)
if err != nil {
return nil, nil, nil, status.Errorf(codes.FailedPrecondition, "unable to load application from %s: %v", appPath, err)
@@ -336,7 +356,7 @@ func ksShow(appPath, envName string, overrides []*v1alpha1.ComponentParameter) (
return targetObjs, params, env, nil
}
var manifestFile = regexp.MustCompile(`^.*\.(yaml|yml|json)$`)
var manifestFile = regexp.MustCompile(`^.*\.(yaml|yml|json|jsonnet)$`)
// findManifests looks at all yaml files in a directory and unmarshals them into a list of unstructured objects
func findManifests(appPath string) ([]*unstructured.Unstructured, error) {
@@ -360,6 +380,29 @@ func findManifests(appPath string) ([]*unstructured.Unstructured, error) {
return nil, status.Errorf(codes.FailedPrecondition, "Failed to unmarshal %q: %v", f.Name(), err)
}
objs = append(objs, &obj)
} else if strings.HasSuffix(f.Name(), ".jsonnet") {
vm := jsonnet.MakeVM()
vm.Importer(&jsonnet.FileImporter{
JPaths: []string{appPath},
})
jsonStr, err := vm.EvaluateSnippet(f.Name(), string(out))
if err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "Failed to evaluate jsonnet %q: %v", f.Name(), err)
}
// attempt to unmarshal either array or single object
var jsonObjs []*unstructured.Unstructured
err = json.Unmarshal([]byte(jsonStr), &jsonObjs)
if err == nil {
objs = append(objs, jsonObjs...)
} else {
var jsonObj unstructured.Unstructured
err = json.Unmarshal([]byte(jsonStr), &jsonObj)
if err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "Failed to unmarshal generated json %q: %v", f.Name(), err)
}
objs = append(objs, &jsonObj)
}
} else {
yamlObjs, err := kube.SplitYAML(string(out))
if err != nil {
@@ -387,3 +430,18 @@ func pathExists(name string) bool {
}
return true
}
// newClientResolveRevision is a helper to perform the common task of instantiating a git client
// and resolving a revision to a commit SHA
func (s *Service) newClientResolveRevision(repo *v1alpha1.Repository, revision string) (git.Client, string, error) {
appRepoPath := tempRepoPath(repo.Repo)
gitClient, err := s.gitFactory.NewClient(repo.Repo, appRepoPath, repo.Username, repo.Password, repo.SSHPrivateKey)
if err != nil {
return nil, "", err
}
commitSHA, err := gitClient.LsRemote(revision)
if err != nil {
return nil, "", err
}
return gitClient, commitSHA, nil
}

View File

@@ -1,29 +1,15 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: reposerver/repository/repository.proto
/*
Package repository is a generated protocol buffer package.
It is generated from these files:
reposerver/repository/repository.proto
It has these top-level messages:
ManifestRequest
ManifestResponse
ListDirRequest
FileList
GetFileRequest
GetFileResponse
*/
package repository
package repository // import "github.com/argoproj/argo-cd/reposerver/repository"
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import _ "k8s.io/api/core/v1"
import github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
@@ -43,21 +29,53 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// ManifestRequest is a query for manifest generation.
type ManifestRequest struct {
Repo *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo" json:"repo,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
Environment string `protobuf:"bytes,4,opt,name=environment,proto3" json:"environment,omitempty"`
AppLabel string `protobuf:"bytes,5,opt,name=appLabel,proto3" json:"appLabel,omitempty"`
ComponentParameterOverrides []*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ComponentParameter `protobuf:"bytes,6,rep,name=componentParameterOverrides" json:"componentParameterOverrides,omitempty"`
ValueFiles []string `protobuf:"bytes,7,rep,name=valueFiles" json:"valueFiles,omitempty"`
Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo" json:"repo,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
Environment string `protobuf:"bytes,4,opt,name=environment,proto3" json:"environment,omitempty"`
AppLabel string `protobuf:"bytes,5,opt,name=appLabel,proto3" json:"appLabel,omitempty"`
ComponentParameterOverrides []*v1alpha1.ComponentParameter `protobuf:"bytes,6,rep,name=componentParameterOverrides" json:"componentParameterOverrides,omitempty"`
ValueFiles []string `protobuf:"bytes,7,rep,name=valueFiles" json:"valueFiles,omitempty"`
Namespace string `protobuf:"bytes,8,opt,name=namespace,proto3" json:"namespace,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ManifestRequest) Reset() { *m = ManifestRequest{} }
func (m *ManifestRequest) String() string { return proto.CompactTextString(m) }
func (*ManifestRequest) ProtoMessage() {}
func (*ManifestRequest) Descriptor() ([]byte, []int) { return fileDescriptorRepository, []int{0} }
func (m *ManifestRequest) Reset() { *m = ManifestRequest{} }
func (m *ManifestRequest) String() string { return proto.CompactTextString(m) }
func (*ManifestRequest) ProtoMessage() {}
func (*ManifestRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_49651600e73b0b40, []int{0}
}
func (m *ManifestRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ManifestRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ManifestRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ManifestRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ManifestRequest.Merge(dst, src)
}
func (m *ManifestRequest) XXX_Size() int {
return m.Size()
}
func (m *ManifestRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ManifestRequest.DiscardUnknown(m)
}
func (m *ManifestRequest) GetRepo() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository {
var xxx_messageInfo_ManifestRequest proto.InternalMessageInfo
func (m *ManifestRequest) GetRepo() *v1alpha1.Repository {
if m != nil {
return m.Repo
}
@@ -92,7 +110,7 @@ func (m *ManifestRequest) GetAppLabel() string {
return ""
}
func (m *ManifestRequest) GetComponentParameterOverrides() []*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ComponentParameter {
func (m *ManifestRequest) GetComponentParameterOverrides() []*v1alpha1.ComponentParameter {
if m != nil {
return m.ComponentParameterOverrides
}
@@ -106,18 +124,56 @@ func (m *ManifestRequest) GetValueFiles() []string {
return nil
}
type ManifestResponse struct {
Manifests []string `protobuf:"bytes,1,rep,name=manifests" json:"manifests,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
Server string `protobuf:"bytes,3,opt,name=server,proto3" json:"server,omitempty"`
Revision string `protobuf:"bytes,4,opt,name=revision,proto3" json:"revision,omitempty"`
Params []*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ComponentParameter `protobuf:"bytes,5,rep,name=params" json:"params,omitempty"`
func (m *ManifestRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
func (m *ManifestResponse) Reset() { *m = ManifestResponse{} }
func (m *ManifestResponse) String() string { return proto.CompactTextString(m) }
func (*ManifestResponse) ProtoMessage() {}
func (*ManifestResponse) Descriptor() ([]byte, []int) { return fileDescriptorRepository, []int{1} }
type ManifestResponse struct {
Manifests []string `protobuf:"bytes,1,rep,name=manifests" json:"manifests,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
Server string `protobuf:"bytes,3,opt,name=server,proto3" json:"server,omitempty"`
Revision string `protobuf:"bytes,4,opt,name=revision,proto3" json:"revision,omitempty"`
Params []*v1alpha1.ComponentParameter `protobuf:"bytes,5,rep,name=params" json:"params,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ManifestResponse) Reset() { *m = ManifestResponse{} }
func (m *ManifestResponse) String() string { return proto.CompactTextString(m) }
func (*ManifestResponse) ProtoMessage() {}
func (*ManifestResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_49651600e73b0b40, []int{1}
}
func (m *ManifestResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ManifestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ManifestResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ManifestResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ManifestResponse.Merge(dst, src)
}
func (m *ManifestResponse) XXX_Size() int {
return m.Size()
}
func (m *ManifestResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ManifestResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ManifestResponse proto.InternalMessageInfo
func (m *ManifestResponse) GetManifests() []string {
if m != nil {
@@ -147,7 +203,7 @@ func (m *ManifestResponse) GetRevision() string {
return ""
}
func (m *ManifestResponse) GetParams() []*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ComponentParameter {
func (m *ManifestResponse) GetParams() []*v1alpha1.ComponentParameter {
if m != nil {
return m.Params
}
@@ -156,17 +212,48 @@ func (m *ManifestResponse) GetParams() []*github_com_argoproj_argo_cd_pkg_apis_a
// ListDirRequest requests a repository directory structure
type ListDirRequest struct {
Repo *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo" json:"repo,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo" json:"repo,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListDirRequest) Reset() { *m = ListDirRequest{} }
func (m *ListDirRequest) String() string { return proto.CompactTextString(m) }
func (*ListDirRequest) ProtoMessage() {}
func (*ListDirRequest) Descriptor() ([]byte, []int) { return fileDescriptorRepository, []int{2} }
func (m *ListDirRequest) Reset() { *m = ListDirRequest{} }
func (m *ListDirRequest) String() string { return proto.CompactTextString(m) }
func (*ListDirRequest) ProtoMessage() {}
func (*ListDirRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_49651600e73b0b40, []int{2}
}
func (m *ListDirRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ListDirRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ListDirRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ListDirRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListDirRequest.Merge(dst, src)
}
func (m *ListDirRequest) XXX_Size() int {
return m.Size()
}
func (m *ListDirRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListDirRequest.DiscardUnknown(m)
}
func (m *ListDirRequest) GetRepo() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository {
var xxx_messageInfo_ListDirRequest proto.InternalMessageInfo
func (m *ListDirRequest) GetRepo() *v1alpha1.Repository {
if m != nil {
return m.Repo
}
@@ -189,13 +276,44 @@ func (m *ListDirRequest) GetPath() string {
// FileList returns the contents of the repo of a ListDir request
type FileList struct {
Items []string `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"`
Items []string `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *FileList) Reset() { *m = FileList{} }
func (m *FileList) String() string { return proto.CompactTextString(m) }
func (*FileList) ProtoMessage() {}
func (*FileList) Descriptor() ([]byte, []int) { return fileDescriptorRepository, []int{3} }
func (m *FileList) Reset() { *m = FileList{} }
func (m *FileList) String() string { return proto.CompactTextString(m) }
func (*FileList) ProtoMessage() {}
func (*FileList) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_49651600e73b0b40, []int{3}
}
func (m *FileList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *FileList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_FileList.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *FileList) XXX_Merge(src proto.Message) {
xxx_messageInfo_FileList.Merge(dst, src)
}
func (m *FileList) XXX_Size() int {
return m.Size()
}
func (m *FileList) XXX_DiscardUnknown() {
xxx_messageInfo_FileList.DiscardUnknown(m)
}
var xxx_messageInfo_FileList proto.InternalMessageInfo
func (m *FileList) GetItems() []string {
if m != nil {
@@ -206,17 +324,48 @@ func (m *FileList) GetItems() []string {
// GetFileRequest return
type GetFileRequest struct {
Repo *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo" json:"repo,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo" json:"repo,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetFileRequest) Reset() { *m = GetFileRequest{} }
func (m *GetFileRequest) String() string { return proto.CompactTextString(m) }
func (*GetFileRequest) ProtoMessage() {}
func (*GetFileRequest) Descriptor() ([]byte, []int) { return fileDescriptorRepository, []int{4} }
func (m *GetFileRequest) Reset() { *m = GetFileRequest{} }
func (m *GetFileRequest) String() string { return proto.CompactTextString(m) }
func (*GetFileRequest) ProtoMessage() {}
func (*GetFileRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_49651600e73b0b40, []int{4}
}
func (m *GetFileRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetFileRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetFileRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetFileRequest.Merge(dst, src)
}
func (m *GetFileRequest) XXX_Size() int {
return m.Size()
}
func (m *GetFileRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetFileRequest.DiscardUnknown(m)
}
func (m *GetFileRequest) GetRepo() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository {
var xxx_messageInfo_GetFileRequest proto.InternalMessageInfo
func (m *GetFileRequest) GetRepo() *v1alpha1.Repository {
if m != nil {
return m.Repo
}
@@ -239,13 +388,44 @@ func (m *GetFileRequest) GetPath() string {
// GetFileResponse returns the contents of the file of a GetFile request
type GetFileResponse struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetFileResponse) Reset() { *m = GetFileResponse{} }
func (m *GetFileResponse) String() string { return proto.CompactTextString(m) }
func (*GetFileResponse) ProtoMessage() {}
func (*GetFileResponse) Descriptor() ([]byte, []int) { return fileDescriptorRepository, []int{5} }
func (m *GetFileResponse) Reset() { *m = GetFileResponse{} }
func (m *GetFileResponse) String() string { return proto.CompactTextString(m) }
func (*GetFileResponse) ProtoMessage() {}
func (*GetFileResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_repository_49651600e73b0b40, []int{5}
}
func (m *GetFileResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetFileResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetFileResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetFileResponse.Merge(dst, src)
}
func (m *GetFileResponse) XXX_Size() int {
return m.Size()
}
func (m *GetFileResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetFileResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetFileResponse proto.InternalMessageInfo
func (m *GetFileResponse) GetData() []byte {
if m != nil {
@@ -292,7 +472,7 @@ func NewRepositoryServiceClient(cc *grpc.ClientConn) RepositoryServiceClient {
func (c *repositoryServiceClient) GenerateManifest(ctx context.Context, in *ManifestRequest, opts ...grpc.CallOption) (*ManifestResponse, error) {
out := new(ManifestResponse)
err := grpc.Invoke(ctx, "/repository.RepositoryService/GenerateManifest", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/repository.RepositoryService/GenerateManifest", in, out, opts...)
if err != nil {
return nil, err
}
@@ -301,7 +481,7 @@ func (c *repositoryServiceClient) GenerateManifest(ctx context.Context, in *Mani
func (c *repositoryServiceClient) ListDir(ctx context.Context, in *ListDirRequest, opts ...grpc.CallOption) (*FileList, error) {
out := new(FileList)
err := grpc.Invoke(ctx, "/repository.RepositoryService/ListDir", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/repository.RepositoryService/ListDir", in, out, opts...)
if err != nil {
return nil, err
}
@@ -310,7 +490,7 @@ func (c *repositoryServiceClient) ListDir(ctx context.Context, in *ListDirReques
func (c *repositoryServiceClient) GetFile(ctx context.Context, in *GetFileRequest, opts ...grpc.CallOption) (*GetFileResponse, error) {
out := new(GetFileResponse)
err := grpc.Invoke(ctx, "/repository.RepositoryService/GetFile", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/repository.RepositoryService/GetFile", in, out, opts...)
if err != nil {
return nil, err
}
@@ -483,6 +663,15 @@ func (m *ManifestRequest) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], s)
}
}
if len(m.Namespace) > 0 {
dAtA[i] = 0x42
i++
i = encodeVarintRepository(dAtA, i, uint64(len(m.Namespace)))
i += copy(dAtA[i:], m.Namespace)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -546,6 +735,9 @@ func (m *ManifestResponse) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -586,6 +778,9 @@ func (m *ListDirRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRepository(dAtA, i, uint64(len(m.Path)))
i += copy(dAtA[i:], m.Path)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -619,6 +814,9 @@ func (m *FileList) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], s)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -659,6 +857,9 @@ func (m *GetFileRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRepository(dAtA, i, uint64(len(m.Path)))
i += copy(dAtA[i:], m.Path)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -683,6 +884,9 @@ func (m *GetFileResponse) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRepository(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -730,6 +934,13 @@ func (m *ManifestRequest) Size() (n int) {
n += 1 + l + sovRepository(uint64(l))
}
}
l = len(m.Namespace)
if l > 0 {
n += 1 + l + sovRepository(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -760,6 +971,9 @@ func (m *ManifestResponse) Size() (n int) {
n += 1 + l + sovRepository(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -778,6 +992,9 @@ func (m *ListDirRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovRepository(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -790,6 +1007,9 @@ func (m *FileList) Size() (n int) {
n += 1 + l + sovRepository(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -808,6 +1028,9 @@ func (m *GetFileRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovRepository(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -818,6 +1041,9 @@ func (m *GetFileResponse) Size() (n int) {
if l > 0 {
n += 1 + l + sovRepository(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -890,7 +1116,7 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Repo == nil {
m.Repo = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository{}
m.Repo = &v1alpha1.Repository{}
}
if err := m.Repo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -1038,7 +1264,7 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ComponentParameterOverrides = append(m.ComponentParameterOverrides, &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ComponentParameter{})
m.ComponentParameterOverrides = append(m.ComponentParameterOverrides, &v1alpha1.ComponentParameter{})
if err := m.ComponentParameterOverrides[len(m.ComponentParameterOverrides)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -1072,6 +1298,35 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error {
}
m.ValueFiles = append(m.ValueFiles, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRepository
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRepository
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Namespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRepository(dAtA[iNdEx:])
@@ -1084,6 +1339,7 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1264,7 +1520,7 @@ func (m *ManifestResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Params = append(m.Params, &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ComponentParameter{})
m.Params = append(m.Params, &v1alpha1.ComponentParameter{})
if err := m.Params[len(m.Params)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -1281,6 +1537,7 @@ func (m *ManifestResponse) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1346,7 +1603,7 @@ func (m *ListDirRequest) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Repo == nil {
m.Repo = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository{}
m.Repo = &v1alpha1.Repository{}
}
if err := m.Repo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -1422,6 +1679,7 @@ func (m *ListDirRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1501,6 +1759,7 @@ func (m *FileList) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1566,7 +1825,7 @@ func (m *GetFileRequest) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Repo == nil {
m.Repo = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Repository{}
m.Repo = &v1alpha1.Repository{}
}
if err := m.Repo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -1642,6 +1901,7 @@ func (m *GetFileRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1723,6 +1983,7 @@ func (m *GetFileResponse) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1837,44 +2098,47 @@ var (
ErrIntOverflowRepository = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("reposerver/repository/repository.proto", fileDescriptorRepository) }
var fileDescriptorRepository = []byte{
// 576 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xcd, 0x6e, 0xd3, 0x40,
0x10, 0xae, 0xc9, 0x5f, 0xb3, 0x41, 0xb4, 0xac, 0x22, 0x64, 0x39, 0x51, 0x64, 0x59, 0x02, 0xe5,
0x82, 0xad, 0x84, 0x0b, 0x17, 0x84, 0x04, 0x85, 0x0a, 0xa9, 0x55, 0x91, 0x39, 0xc1, 0x05, 0x6d,
0x9c, 0xc1, 0x59, 0x62, 0xef, 0x2e, 0xbb, 0x1b, 0x4b, 0xbc, 0x03, 0x12, 0x0f, 0xc0, 0x0b, 0x71,
0xe4, 0x11, 0x50, 0x6e, 0x7d, 0x0b, 0xe4, 0x8d, 0x1d, 0x3b, 0x6d, 0xd4, 0x0b, 0xaa, 0xd4, 0xdb,
0xcc, 0x37, 0xb3, 0xf3, 0xcd, 0x7e, 0x33, 0x1a, 0xf4, 0x44, 0x82, 0xe0, 0x0a, 0x64, 0x06, 0x32,
0x30, 0x26, 0xd5, 0x5c, 0x7e, 0xaf, 0x99, 0xbe, 0x90, 0x5c, 0x73, 0x8c, 0x2a, 0xc4, 0xe9, 0xc7,
0x3c, 0xe6, 0x06, 0x0e, 0x72, 0x6b, 0x93, 0xe1, 0x0c, 0x63, 0xce, 0xe3, 0x04, 0x02, 0x22, 0x68,
0x40, 0x18, 0xe3, 0x9a, 0x68, 0xca, 0x99, 0x2a, 0xa2, 0xde, 0xf2, 0xb9, 0xf2, 0x29, 0x37, 0xd1,
0x88, 0x4b, 0x08, 0xb2, 0x49, 0x10, 0x03, 0x03, 0x49, 0x34, 0xcc, 0x8b, 0x9c, 0x77, 0x31, 0xd5,
0x8b, 0xd5, 0xcc, 0x8f, 0x78, 0x1a, 0x10, 0x69, 0x28, 0xbe, 0x1a, 0xe3, 0x69, 0x34, 0x0f, 0xc4,
0x32, 0xce, 0x1f, 0xab, 0x80, 0x08, 0x91, 0xd0, 0xc8, 0x14, 0x0f, 0xb2, 0x09, 0x49, 0xc4, 0x82,
0x5c, 0x2b, 0xe5, 0xfd, 0x68, 0xa0, 0xa3, 0x73, 0xc2, 0xe8, 0x17, 0x50, 0x3a, 0x84, 0x6f, 0x2b,
0x50, 0x1a, 0x7f, 0x44, 0xcd, 0xfc, 0x13, 0xb6, 0xe5, 0x5a, 0xe3, 0xde, 0xf4, 0x8d, 0x5f, 0xb1,
0xf9, 0x25, 0x9b, 0x31, 0x3e, 0x47, 0x73, 0x5f, 0x2c, 0x63, 0x3f, 0x67, 0xf3, 0x6b, 0x6c, 0x7e,
0xc9, 0xe6, 0x87, 0x5b, 0x2d, 0x42, 0x53, 0x12, 0x3b, 0xe8, 0x50, 0x42, 0x46, 0x15, 0xe5, 0xcc,
0xbe, 0xe7, 0x5a, 0xe3, 0x6e, 0xb8, 0xf5, 0x31, 0x46, 0x4d, 0x41, 0xf4, 0xc2, 0x6e, 0x18, 0xdc,
0xd8, 0xd8, 0x45, 0x3d, 0x60, 0x19, 0x95, 0x9c, 0xa5, 0xc0, 0xb4, 0xdd, 0x34, 0xa1, 0x3a, 0x94,
0x57, 0x24, 0x42, 0x9c, 0x91, 0x19, 0x24, 0x76, 0x6b, 0x53, 0xb1, 0xf4, 0xf1, 0x4f, 0x0b, 0x0d,
0x22, 0x9e, 0x0a, 0xce, 0x80, 0xe9, 0xf7, 0x44, 0x92, 0x14, 0x34, 0xc8, 0x8b, 0x0c, 0xa4, 0xa4,
0x73, 0x50, 0x76, 0xdb, 0x6d, 0x8c, 0x7b, 0xd3, 0xf3, 0xff, 0xf8, 0xe0, 0xeb, 0x6b, 0xd5, 0xc3,
0x9b, 0x18, 0xf1, 0x08, 0xa1, 0x8c, 0x24, 0x2b, 0x78, 0x4b, 0x13, 0x50, 0x76, 0xc7, 0x6d, 0x8c,
0xbb, 0x61, 0x0d, 0xf1, 0x2e, 0x2d, 0x74, 0x5c, 0x8d, 0x43, 0x09, 0xce, 0x14, 0xe0, 0x21, 0xea,
0xa6, 0x05, 0xa6, 0x6c, 0xcb, 0xbc, 0xa9, 0x80, 0x3c, 0xca, 0x48, 0x0a, 0x4a, 0x90, 0x08, 0x0a,
0x4d, 0x2b, 0x00, 0x3f, 0x42, 0xed, 0xcd, 0xd2, 0x16, 0xb2, 0x16, 0xde, 0xce, 0x20, 0x9a, 0x57,
0x06, 0x01, 0xa8, 0x2d, 0xf2, 0xd6, 0x95, 0xdd, 0xba, 0x0d, 0x81, 0x8a, 0xe2, 0xde, 0x2f, 0x0b,
0x3d, 0x38, 0xa3, 0x4a, 0x9f, 0x50, 0x79, 0xf7, 0x36, 0xcf, 0x73, 0xd1, 0x61, 0x3e, 0x92, 0xbc,
0x41, 0xdc, 0x47, 0x2d, 0xaa, 0x21, 0x2d, 0xc5, 0xdf, 0x38, 0xa6, 0xff, 0x53, 0xd0, 0x79, 0xd6,
0x1d, 0xec, 0xff, 0x31, 0x3a, 0xda, 0x36, 0x57, 0xec, 0x11, 0x46, 0xcd, 0x39, 0xd1, 0xc4, 0x74,
0x77, 0x3f, 0x34, 0xf6, 0xf4, 0xd2, 0x42, 0x0f, 0x2b, 0xae, 0x0f, 0x20, 0x33, 0x1a, 0x01, 0xbe,
0x40, 0xc7, 0xa7, 0xc5, 0xa1, 0x28, 0xb7, 0x11, 0x0f, 0xfc, 0xda, 0xad, 0xbb, 0x72, 0x32, 0x9c,
0xe1, 0xfe, 0xe0, 0x86, 0xd8, 0x3b, 0xc0, 0x2f, 0x50, 0xa7, 0x18, 0x35, 0x76, 0xea, 0xa9, 0xbb,
0xf3, 0x77, 0xfa, 0xf5, 0x58, 0x29, 0xbf, 0x77, 0x80, 0x4f, 0x50, 0xa7, 0xf8, 0xcc, 0xee, 0xf3,
0x5d, 0xf9, 0x9d, 0xc1, 0xde, 0x58, 0xd9, 0xc4, 0xab, 0x97, 0xbf, 0xd7, 0x23, 0xeb, 0xcf, 0x7a,
0x64, 0xfd, 0x5d, 0x8f, 0xac, 0x4f, 0x93, 0x9b, 0x8e, 0xe8, 0xde, 0x63, 0x3f, 0x6b, 0x9b, 0x9b,
0xf9, 0xec, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x81, 0x49, 0x46, 0x0c, 0x06, 0x00, 0x00,
func init() {
proto.RegisterFile("reposerver/repository/repository.proto", fileDescriptor_repository_49651600e73b0b40)
}
var fileDescriptor_repository_49651600e73b0b40 = []byte{
// 584 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0xdd, 0x8a, 0xd3, 0x40,
0x14, 0xde, 0x6c, 0xbb, 0xdd, 0x76, 0x2a, 0xee, 0x3a, 0x14, 0x09, 0x69, 0x29, 0x21, 0xa0, 0xf4,
0xc6, 0x84, 0xd6, 0x1b, 0x6f, 0x44, 0xd0, 0xd5, 0x45, 0xd8, 0x65, 0x25, 0x5e, 0xe9, 0x8d, 0x4c,
0xd3, 0x63, 0x3a, 0x36, 0x99, 0x19, 0x67, 0xa6, 0x01, 0x9f, 0xc2, 0x07, 0xf0, 0x0d, 0x7c, 0x12,
0x2f, 0x7d, 0x04, 0xe9, 0xdd, 0xbe, 0x85, 0x64, 0x9a, 0x34, 0x69, 0xb7, 0xec, 0x8d, 0x08, 0x7b,
0x77, 0xe6, 0x3b, 0x27, 0xdf, 0x77, 0xfe, 0x38, 0x41, 0x8f, 0x25, 0x08, 0xae, 0x40, 0x66, 0x20,
0x03, 0x63, 0x52, 0xcd, 0xe5, 0xb7, 0x9a, 0xe9, 0x0b, 0xc9, 0x35, 0xc7, 0xa8, 0x42, 0x9c, 0x5e,
0xcc, 0x63, 0x6e, 0xe0, 0x20, 0xb7, 0xd6, 0x11, 0xce, 0x20, 0xe6, 0x3c, 0x4e, 0x20, 0x20, 0x82,
0x06, 0x84, 0x31, 0xae, 0x89, 0xa6, 0x9c, 0xa9, 0xc2, 0xeb, 0x2d, 0x9e, 0x29, 0x9f, 0x72, 0xe3,
0x8d, 0xb8, 0x84, 0x20, 0x1b, 0x07, 0x31, 0x30, 0x90, 0x44, 0xc3, 0xac, 0x88, 0x79, 0x1b, 0x53,
0x3d, 0x5f, 0x4e, 0xfd, 0x88, 0xa7, 0x01, 0x91, 0x46, 0xe2, 0x8b, 0x31, 0x9e, 0x44, 0xb3, 0x40,
0x2c, 0xe2, 0xfc, 0x63, 0x15, 0x10, 0x21, 0x12, 0x1a, 0x19, 0xf2, 0x20, 0x1b, 0x93, 0x44, 0xcc,
0xc9, 0x0d, 0x2a, 0xef, 0x67, 0x03, 0x9d, 0x5c, 0x12, 0x46, 0x3f, 0x83, 0xd2, 0x21, 0x7c, 0x5d,
0x82, 0xd2, 0xf8, 0x03, 0x6a, 0xe6, 0x45, 0xd8, 0x96, 0x6b, 0x8d, 0xba, 0x93, 0xd7, 0x7e, 0xa5,
0xe6, 0x97, 0x6a, 0xc6, 0xf8, 0x14, 0xcd, 0x7c, 0xb1, 0x88, 0xfd, 0x5c, 0xcd, 0xaf, 0xa9, 0xf9,
0xa5, 0x9a, 0x1f, 0x6e, 0x7a, 0x11, 0x1a, 0x4a, 0xec, 0xa0, 0xb6, 0x84, 0x8c, 0x2a, 0xca, 0x99,
0x7d, 0xe8, 0x5a, 0xa3, 0x4e, 0xb8, 0x79, 0x63, 0x8c, 0x9a, 0x82, 0xe8, 0xb9, 0xdd, 0x30, 0xb8,
0xb1, 0xb1, 0x8b, 0xba, 0xc0, 0x32, 0x2a, 0x39, 0x4b, 0x81, 0x69, 0xbb, 0x69, 0x5c, 0x75, 0x28,
0x67, 0x24, 0x42, 0x5c, 0x90, 0x29, 0x24, 0xf6, 0xd1, 0x9a, 0xb1, 0x7c, 0xe3, 0xef, 0x16, 0xea,
0x47, 0x3c, 0x15, 0x9c, 0x01, 0xd3, 0xef, 0x88, 0x24, 0x29, 0x68, 0x90, 0x57, 0x19, 0x48, 0x49,
0x67, 0xa0, 0xec, 0x96, 0xdb, 0x18, 0x75, 0x27, 0x97, 0xff, 0x50, 0xe0, 0xab, 0x1b, 0xec, 0xe1,
0x6d, 0x8a, 0x78, 0x88, 0x50, 0x46, 0x92, 0x25, 0xbc, 0xa1, 0x09, 0x28, 0xfb, 0xd8, 0x6d, 0x8c,
0x3a, 0x61, 0x0d, 0xc1, 0x03, 0xd4, 0x61, 0x24, 0x05, 0x25, 0x48, 0x04, 0x76, 0xdb, 0x94, 0x53,
0x01, 0xde, 0xb5, 0x85, 0x4e, 0xab, 0x61, 0x29, 0xc1, 0x99, 0x82, 0xfc, 0x93, 0xb4, 0xc0, 0x94,
0x6d, 0x19, 0xc6, 0x0a, 0xd8, 0x26, 0x3c, 0xdc, 0x21, 0xc4, 0x0f, 0x51, 0x6b, 0xbd, 0xd2, 0x45,
0xd3, 0x8b, 0xd7, 0xd6, 0x98, 0x9a, 0x3b, 0x63, 0x02, 0xd4, 0x12, 0x79, 0x61, 0xca, 0x3e, 0xfa,
0x1f, 0xed, 0x2b, 0xc8, 0xbd, 0x1f, 0x16, 0xba, 0x7f, 0x41, 0x95, 0x3e, 0xa3, 0xf2, 0xee, 0xed,
0xa5, 0xe7, 0xa2, 0x76, 0x3e, 0xb0, 0x3c, 0x41, 0xdc, 0x43, 0x47, 0x54, 0x43, 0x5a, 0x36, 0x7f,
0xfd, 0x30, 0xf9, 0x9f, 0x83, 0xce, 0xa3, 0xee, 0x60, 0xfe, 0x8f, 0xd0, 0xc9, 0x26, 0xb9, 0x62,
0x8f, 0x30, 0x6a, 0xce, 0x88, 0x26, 0x26, 0xbb, 0x7b, 0xa1, 0xb1, 0x27, 0xd7, 0x16, 0x7a, 0x50,
0x69, 0xbd, 0x07, 0x99, 0xd1, 0x08, 0xf0, 0x15, 0x3a, 0x3d, 0x2f, 0xce, 0x48, 0xb9, 0x8d, 0xb8,
0xef, 0xd7, 0x2e, 0xe1, 0xce, 0x41, 0x71, 0x06, 0xfb, 0x9d, 0x6b, 0x61, 0xef, 0x00, 0x3f, 0x47,
0xc7, 0xc5, 0xa8, 0xb1, 0x53, 0x0f, 0xdd, 0x9e, 0xbf, 0xd3, 0xab, 0xfb, 0xca, 0xf6, 0x7b, 0x07,
0xf8, 0x0c, 0x1d, 0x17, 0xc5, 0x6c, 0x7f, 0xbe, 0xdd, 0x7e, 0xa7, 0xbf, 0xd7, 0x57, 0x26, 0xf1,
0xf2, 0xc5, 0xaf, 0xd5, 0xd0, 0xfa, 0xbd, 0x1a, 0x5a, 0x7f, 0x56, 0x43, 0xeb, 0xe3, 0xf8, 0xb6,
0x13, 0xbb, 0xf7, 0x57, 0x30, 0x6d, 0x99, 0x8b, 0xfa, 0xf4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff,
0x22, 0x8e, 0xa1, 0x51, 0x2a, 0x06, 0x00, 0x00,
}

View File

@@ -17,6 +17,7 @@ message ManifestRequest {
string appLabel = 5;
repeated github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.ComponentParameter componentParameterOverrides = 6;
repeated string valueFiles = 7;
string namespace = 8;
}
message ManifestResponse {

View File

@@ -6,14 +6,24 @@ import (
"github.com/stretchr/testify/assert"
)
func TestGenerateManifestInDir(t *testing.T) {
func TestGenerateYamlManifestInDir(t *testing.T) {
// update this value if we add/remove manifests
const countOfManifests = 22
q := ManifestRequest{}
res1, err := generateManifests("../../manifests/components", &q)
res1, err := generateManifests("../../manifests/base", &q)
assert.Nil(t, err)
assert.True(t, len(res1.Manifests) == 16) // update this value if we add/remove manifests
assert.Equal(t, len(res1.Manifests), countOfManifests)
// this will test concatenated manifests to verify we split YAMLs correctly
res2, err := generateManifests("../../manifests", &q)
res2, err := generateManifests("./testdata/concatenated", &q)
assert.Nil(t, err)
assert.True(t, len(res2.Manifests) == len(res1.Manifests))
assert.Equal(t, 3, len(res2.Manifests))
}
func TestGenerateJsonnetManifestInDir(t *testing.T) {
q := ManifestRequest{}
res1, err := generateManifests("./testdata/jsonnet", &q)
assert.Nil(t, err)
assert.Equal(t, len(res1.Manifests), 2)
}

View File

@@ -0,0 +1,17 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sa1
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sa2
---
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sa3
---

View File

@@ -0,0 +1,58 @@
local params = import 'params.libsonnet';
[
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": params.name
},
"spec": {
"ports": [
{
"port": params.servicePort,
"targetPort": params.containerPort
}
],
"selector": {
"app": params.name
},
"type": params.type
}
},
{
"apiVersion": "apps/v1beta2",
"kind": "Deployment",
"metadata": {
"name": params.name
},
"spec": {
"replicas": params.replicas,
"selector": {
"matchLabels": {
"app": params.name
},
},
"template": {
"metadata": {
"labels": {
"app": params.name
}
},
"spec": {
"containers": [
{
"image": params.image,
"name": params.name,
"ports": [
{
"containerPort": params.containerPort
}
]
}
]
}
}
}
}
]

View File

@@ -0,0 +1,8 @@
{
containerPort: 80,
image: "gcr.io/heptio-images/ks-guestbook-demo:0.2",
name: "guestbook-ui",
replicas: 1,
servicePort: 80,
type: "LoadBalancer",
}

View File

@@ -1,15 +1,19 @@
package reposerver
import (
"crypto/tls"
"github.com/argoproj/argo-cd/reposerver/repository"
"github.com/argoproj/argo-cd/server/version"
"github.com/argoproj/argo-cd/util/cache"
"github.com/argoproj/argo-cd/util/git"
grpc_util "github.com/argoproj/argo-cd/util/grpc"
tlsutil "github.com/argoproj/argo-cd/util/tls"
"github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/reflection"
)
@@ -18,28 +22,51 @@ type ArgoCDRepoServer struct {
log *log.Entry
gitFactory git.ClientFactory
cache cache.Cache
opts []grpc.ServerOption
}
// NewServer returns a new instance of the ArgoCD Repo server
func NewServer(gitFactory git.ClientFactory, cache cache.Cache) *ArgoCDRepoServer {
func NewServer(gitFactory git.ClientFactory, cache cache.Cache, tlsConfCustomizer tlsutil.ConfigCustomizer) (*ArgoCDRepoServer, error) {
// generate TLS cert
hosts := []string{
"localhost",
"argocd-repo-server",
}
cert, err := tlsutil.GenerateX509KeyPair(tlsutil.CertOptions{
Hosts: hosts,
Organization: "Argo CD",
IsCA: true,
})
if err != nil {
return nil, err
}
tlsConfig := &tls.Config{Certificates: []tls.Certificate{*cert}}
tlsConfCustomizer(tlsConfig)
opts := []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsConfig))}
return &ArgoCDRepoServer{
log: log.NewEntry(log.New()),
gitFactory: gitFactory,
cache: cache,
}
opts: opts,
}, nil
}
// CreateGRPC creates new configured grpc server
func (a *ArgoCDRepoServer) CreateGRPC() *grpc.Server {
server := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_logrus.StreamServerInterceptor(a.log),
grpc_util.PanicLoggerStreamServerInterceptor(a.log),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_logrus.UnaryServerInterceptor(a.log),
grpc_util.PanicLoggerUnaryServerInterceptor(a.log),
)),
append(a.opts,
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_logrus.StreamServerInterceptor(a.log),
grpc_util.PanicLoggerStreamServerInterceptor(a.log),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_logrus.UnaryServerInterceptor(a.log),
grpc_util.PanicLoggerUnaryServerInterceptor(a.log),
)))...,
)
version.RegisterVersionServiceServer(server, &version.Server{})
manifestService := repository.NewService(a.gitFactory, a.cache)

View File

@@ -1,17 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/account/account.proto
/*
Package account is a generated protocol buffer package.
It is generated from these files:
server/account/account.proto
It has these top-level messages:
UpdatePasswordRequest
UpdatePasswordResponse
*/
package account
package account // import "github.com/argoproj/argo-cd/server/account"
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
@@ -36,14 +26,45 @@ var _ = math.Inf
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type UpdatePasswordRequest struct {
NewPassword string `protobuf:"bytes,1,opt,name=newPassword,proto3" json:"newPassword,omitempty"`
CurrentPassword string `protobuf:"bytes,2,opt,name=currentPassword,proto3" json:"currentPassword,omitempty"`
NewPassword string `protobuf:"bytes,1,opt,name=newPassword,proto3" json:"newPassword,omitempty"`
CurrentPassword string `protobuf:"bytes,2,opt,name=currentPassword,proto3" json:"currentPassword,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdatePasswordRequest) Reset() { *m = UpdatePasswordRequest{} }
func (m *UpdatePasswordRequest) String() string { return proto.CompactTextString(m) }
func (*UpdatePasswordRequest) ProtoMessage() {}
func (*UpdatePasswordRequest) Descriptor() ([]byte, []int) { return fileDescriptorAccount, []int{0} }
func (m *UpdatePasswordRequest) Reset() { *m = UpdatePasswordRequest{} }
func (m *UpdatePasswordRequest) String() string { return proto.CompactTextString(m) }
func (*UpdatePasswordRequest) ProtoMessage() {}
func (*UpdatePasswordRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_account_d27ff2bbd0f6944b, []int{0}
}
func (m *UpdatePasswordRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UpdatePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UpdatePasswordRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UpdatePasswordRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdatePasswordRequest.Merge(dst, src)
}
func (m *UpdatePasswordRequest) XXX_Size() int {
return m.Size()
}
func (m *UpdatePasswordRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdatePasswordRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdatePasswordRequest proto.InternalMessageInfo
func (m *UpdatePasswordRequest) GetNewPassword() string {
if m != nil {
@@ -60,12 +81,43 @@ func (m *UpdatePasswordRequest) GetCurrentPassword() string {
}
type UpdatePasswordResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdatePasswordResponse) Reset() { *m = UpdatePasswordResponse{} }
func (m *UpdatePasswordResponse) String() string { return proto.CompactTextString(m) }
func (*UpdatePasswordResponse) ProtoMessage() {}
func (*UpdatePasswordResponse) Descriptor() ([]byte, []int) { return fileDescriptorAccount, []int{1} }
func (m *UpdatePasswordResponse) Reset() { *m = UpdatePasswordResponse{} }
func (m *UpdatePasswordResponse) String() string { return proto.CompactTextString(m) }
func (*UpdatePasswordResponse) ProtoMessage() {}
func (*UpdatePasswordResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_account_d27ff2bbd0f6944b, []int{1}
}
func (m *UpdatePasswordResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UpdatePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UpdatePasswordResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UpdatePasswordResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdatePasswordResponse.Merge(dst, src)
}
func (m *UpdatePasswordResponse) XXX_Size() int {
return m.Size()
}
func (m *UpdatePasswordResponse) XXX_DiscardUnknown() {
xxx_messageInfo_UpdatePasswordResponse.DiscardUnknown(m)
}
var xxx_messageInfo_UpdatePasswordResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*UpdatePasswordRequest)(nil), "account.UpdatePasswordRequest")
@@ -97,7 +149,7 @@ func NewAccountServiceClient(cc *grpc.ClientConn) AccountServiceClient {
func (c *accountServiceClient) UpdatePassword(ctx context.Context, in *UpdatePasswordRequest, opts ...grpc.CallOption) (*UpdatePasswordResponse, error) {
out := new(UpdatePasswordResponse)
err := grpc.Invoke(ctx, "/account.AccountService/UpdatePassword", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/account.AccountService/UpdatePassword", in, out, opts...)
if err != nil {
return nil, err
}
@@ -173,6 +225,9 @@ func (m *UpdatePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintAccount(dAtA, i, uint64(len(m.CurrentPassword)))
i += copy(dAtA[i:], m.CurrentPassword)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -191,6 +246,9 @@ func (m *UpdatePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -214,12 +272,18 @@ func (m *UpdatePasswordRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovAccount(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *UpdatePasswordResponse) Size() (n int) {
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -335,6 +399,7 @@ func (m *UpdatePasswordRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -385,6 +450,7 @@ func (m *UpdatePasswordResponse) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -499,9 +565,11 @@ var (
ErrIntOverflowAccount = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("server/account/account.proto", fileDescriptorAccount) }
func init() {
proto.RegisterFile("server/account/account.proto", fileDescriptor_account_d27ff2bbd0f6944b)
}
var fileDescriptorAccount = []byte{
var fileDescriptor_account_d27ff2bbd0f6944b = []byte{
// 268 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x4e, 0x2d, 0x2a,
0x4b, 0x2d, 0xd2, 0x4f, 0x4c, 0x4e, 0xce, 0x2f, 0xcd, 0x2b, 0x81, 0xd1, 0x7a, 0x05, 0x45, 0xf9,

View File

@@ -43,6 +43,7 @@ type Server struct {
kubeclientset kubernetes.Interface
appclientset appclientset.Interface
repoClientset reposerver.Clientset
kubectl kube.Kubectl
db db.ArgoDB
appComparator controller.AppStateManager
enf *rbac.Enforcer
@@ -56,6 +57,7 @@ func NewServer(
kubeclientset kubernetes.Interface,
appclientset appclientset.Interface,
repoClientset reposerver.Clientset,
kubectl kube.Kubectl,
db db.ArgoDB,
enf *rbac.Enforcer,
projectLock *util.KeyLock,
@@ -67,7 +69,8 @@ func NewServer(
kubeclientset: kubeclientset,
db: db,
repoClientset: repoClientset,
appComparator: controller.NewAppStateManager(db, appclientset, repoClientset, namespace),
kubectl: kubectl,
appComparator: controller.NewAppStateManager(db, appclientset, repoClientset, namespace, kubectl),
enf: enf,
projectLock: projectLock,
auditLogger: argo.NewAuditLogger(namespace, kubeclientset, "argocd-server"),
@@ -79,6 +82,65 @@ func appRBACName(app appv1.Application) string {
return fmt.Sprintf("%s/%s", app.Spec.GetProject(), app.Name)
}
func toString(val interface{}) string {
if val == nil {
return ""
}
return fmt.Sprintf("%s", val)
}
// hideSecretData checks if given object kind is Secret, replaces data keys with stars and returns unchanged data map. The method additionally check if data key if different
// from corresponding key of optional parameter `otherData` and adds extra star to keep information about difference. So if secret data is out of sync user still can see which
// fields are different.
func hideSecretData(state string, otherData map[string]interface{}) (string, map[string]interface{}) {
obj, err := appv1.UnmarshalToUnstructured(state)
if err == nil {
if obj != nil && obj.GetKind() == kube.SecretKind {
if data, ok, err := unstructured.NestedMap(obj.Object, "data"); err == nil && ok {
unchangedData := make(map[string]interface{})
for k, v := range data {
unchangedData[k] = v
}
for k := range data {
replacement := "********"
if otherData != nil {
if val, ok := otherData[k]; ok && toString(val) != toString(data[k]) {
replacement = replacement + "*"
}
}
data[k] = replacement
}
_ = unstructured.SetNestedMap(obj.Object, data, "data")
newState, err := json.Marshal(obj)
if err == nil {
return string(newState), unchangedData
}
}
}
}
return state, nil
}
func hideNodesSecrets(nodes []appv1.ResourceNode) {
for i := range nodes {
node := nodes[i]
node.State, _ = hideSecretData(node.State, nil)
hideNodesSecrets(node.Children)
nodes[i] = node
}
}
func hideAppSecrets(app *appv1.Application) {
for i := range app.Status.ComparisonResult.Resources {
res := app.Status.ComparisonResult.Resources[i]
var data map[string]interface{}
res.LiveState, data = hideSecretData(res.LiveState, nil)
res.TargetState, _ = hideSecretData(res.TargetState, data)
hideNodesSecrets(res.ChildLiveResources)
app.Status.ComparisonResult.Resources[i] = res
}
}
// List returns list of applications
func (s *Server) List(ctx context.Context, q *ApplicationQuery) (*appv1.ApplicationList, error) {
appList, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).List(metav1.ListOptions{})
@@ -92,6 +154,11 @@ func (s *Server) List(ctx context.Context, q *ApplicationQuery) (*appv1.Applicat
}
}
newItems = argoutil.FilterByProjects(newItems, q.Projects)
for i := range newItems {
app := newItems[i]
hideAppSecrets(&app)
newItems[i] = app
}
appList.Items = newItems
return appList, nil
}
@@ -133,8 +200,9 @@ func (s *Server) Create(ctx context.Context, q *ApplicationCreateRequest) (*appv
}
if err == nil {
s.logEvent(out, ctx, argo.EventReasonResourceCreated, "create")
s.logEvent(out, ctx, argo.EventReasonResourceCreated, "created application")
}
hideAppSecrets(out)
return out, err
}
@@ -144,7 +212,7 @@ func (s *Server) GetManifests(ctx context.Context, q *ApplicationManifestQuery)
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications/manifests", "get", appRBACName(*a)) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "get", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
repo := s.getRepo(ctx, a.Spec.Source.RepoURL)
@@ -174,6 +242,7 @@ func (s *Server) GetManifests(ctx context.Context, q *ApplicationManifestQuery)
ComponentParameterOverrides: overrides,
AppLabel: a.Name,
ValueFiles: a.Spec.Source.ValuesFiles,
Namespace: a.Spec.Destination.Namespace,
})
if err != nil {
return nil, err
@@ -202,6 +271,7 @@ func (s *Server) Get(ctx context.Context, q *ApplicationQuery) (*appv1.Applicati
return nil, err
}
}
hideAppSecrets(a)
return a, nil
}
@@ -211,7 +281,7 @@ func (s *Server) ListResourceEvents(ctx context.Context, q *ApplicationResourceE
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications/events", "get", appRBACName(*a)) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "get", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
var (
@@ -266,7 +336,14 @@ func (s *Server) Update(ctx context.Context, q *ApplicationUpdateRequest) (*appv
if err != nil {
return nil, err
}
return s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(a)
out, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(a)
if out != nil {
hideAppSecrets(out)
}
if err == nil {
s.logEvent(a, ctx, argo.EventReasonResourceUpdated, "updated application")
}
return out, err
}
// removeInvalidOverrides removes any parameter overrides that are no longer valid
@@ -299,7 +376,6 @@ func (s *Server) removeInvalidOverrides(a *appv1.Application, q *ApplicationUpda
// UpdateSpec updates an application spec and filters out any invalid parameter overrides
func (s *Server) UpdateSpec(ctx context.Context, q *ApplicationUpdateSpecRequest) (*appv1.ApplicationSpec, error) {
s.projectLock.Lock(q.Spec.Project)
defer s.projectLock.Unlock(q.Spec.Project)
@@ -322,9 +398,7 @@ func (s *Server) UpdateSpec(ctx context.Context, q *ApplicationUpdateSpecRequest
a.Spec = q.Spec
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(a)
if err == nil {
if err != nil {
s.logEvent(a, ctx, argo.EventReasonResourceUpdated, "update")
}
s.logEvent(a, ctx, argo.EventReasonResourceUpdated, "updated application spec")
return &q.Spec, nil
}
if !apierr.IsConflict(err) {
@@ -387,7 +461,7 @@ func (s *Server) Delete(ctx context.Context, q *ApplicationDeleteRequest) (*Appl
return nil, err
}
s.logEvent(a, ctx, argo.EventReasonResourceDeleted, "delete")
s.logEvent(a, ctx, argo.EventReasonResourceDeleted, "deleted application")
return &ApplicationResponse{}, nil
}
@@ -406,6 +480,7 @@ func (s *Server) Watch(q *ApplicationQuery, ws ApplicationService_WatchServer) e
// do not emit apps user does not have accessing
continue
}
hideAppSecrets(&a)
err = ws.Send(&appv1.ApplicationWatchEvent{
Type: next.Type,
Application: a,
@@ -480,7 +555,7 @@ func (s *Server) DeleteResource(ctx context.Context, q *ApplicationDeleteResourc
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications/resources", "delete", appRBACName(*a)) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "delete", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
found := findResource(a, q)
@@ -491,10 +566,11 @@ func (s *Server) DeleteResource(ctx context.Context, q *ApplicationDeleteResourc
if err != nil {
return nil, err
}
err = kube.DeleteResource(config, found, namespace)
err = s.kubectl.DeleteResource(config, found, namespace)
if err != nil {
return nil, err
}
s.logEvent(a, ctx, argo.EventReasonResourceDeleted, fmt.Sprintf("deleted resource %s/%s '%s'", q.APIVersion, q.Kind, q.ResourceName))
return &ApplicationResponse{}, nil
}
@@ -543,7 +619,7 @@ func (s *Server) PodLogs(q *ApplicationPodLogsQuery, ws ApplicationService_PodLo
if err != nil {
return err
}
if !s.enf.EnforceClaims(ws.Context().Value("claims"), "applications/logs", "get", appRBACName(*a)) {
if !s.enf.EnforceClaims(ws.Context().Value("claims"), "applications", "get", appRBACName(*a)) {
return grpc.ErrPermissionDenied
}
config, namespace, err := s.getApplicationClusterConfig(*q.Name)
@@ -631,70 +707,85 @@ func (s *Server) getRepo(ctx context.Context, repoURL string) *appv1.Repository
// Sync syncs an application to its target state
func (s *Server) Sync(ctx context.Context, syncReq *ApplicationSyncRequest) (*appv1.Application, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*syncReq.Name, metav1.GetOptions{})
appIf := s.appclientset.ArgoprojV1alpha1().Applications(s.ns)
a, err := appIf.Get(*syncReq.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "sync", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
return s.setAppOperation(ctx, *syncReq.Name, "sync", func(app *appv1.Application) (*appv1.Operation, error) {
syncOp := appv1.SyncOperation{
Revision: syncReq.Revision,
Prune: syncReq.Prune,
DryRun: syncReq.DryRun,
SyncStrategy: syncReq.Strategy,
if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil {
if syncReq.Revision != "" && syncReq.Revision != a.Spec.Source.TargetRevision {
return nil, status.Errorf(codes.FailedPrecondition, "Cannot sync to %s: auto-sync currently set to %s", syncReq.Revision, a.Spec.Source.TargetRevision)
}
return &appv1.Operation{
Sync: &syncOp,
}, nil
})
}
parameterOverrides := make(appv1.ParameterOverrides, 0)
if syncReq.Parameter != nil {
// If parameter overrides are supplied, the caller explicitly states to use the provided
// list of overrides. NOTE: gogo/protobuf cannot currently distinguish between empty arrays
// vs nil arrays, which is why the wrapping syncReq.Parameter is examined for intent.
// See: https://github.com/gogo/protobuf/issues/181
for _, p := range syncReq.Parameter.Overrides {
parameterOverrides = append(parameterOverrides, appv1.ComponentParameter{
Name: p.Name,
Value: p.Value,
Component: p.Component,
})
}
} else {
// If parameter overrides are omitted completely, we use what is set in the application
if a.Spec.Source.ComponentParameterOverrides != nil {
parameterOverrides = appv1.ParameterOverrides(a.Spec.Source.ComponentParameterOverrides)
}
}
op := appv1.Operation{
Sync: &appv1.SyncOperation{
Revision: syncReq.Revision,
Prune: syncReq.Prune,
DryRun: syncReq.DryRun,
SyncStrategy: syncReq.Strategy,
ParameterOverrides: parameterOverrides,
},
}
a, err = argo.SetAppOperation(ctx, appIf, s.auditLogger, *syncReq.Name, &op)
if err == nil {
rev := syncReq.Revision
if syncReq.Revision == "" {
rev = a.Spec.Source.TargetRevision
}
message := fmt.Sprintf("initiated sync to %s", rev)
s.logEvent(a, ctx, argo.EventReasonOperationStarted, message)
}
return a, err
}
func (s *Server) Rollback(ctx context.Context, rollbackReq *ApplicationRollbackRequest) (*appv1.Application, error) {
a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(*rollbackReq.Name, metav1.GetOptions{})
appIf := s.appclientset.ArgoprojV1alpha1().Applications(s.ns)
a, err := appIf.Get(*rollbackReq.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "rollback", appRBACName(*a)) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "sync", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
return s.setAppOperation(ctx, *rollbackReq.Name, "rollback", func(app *appv1.Application) (*appv1.Operation, error) {
return &appv1.Operation{
Rollback: &appv1.RollbackOperation{
ID: rollbackReq.ID,
Prune: rollbackReq.Prune,
DryRun: rollbackReq.DryRun,
},
}, nil
})
}
func (s *Server) setAppOperation(ctx context.Context, appName string, operationName string, operationCreator func(app *appv1.Application) (*appv1.Operation, error)) (*appv1.Application, error) {
for {
a, err := s.Get(ctx, &ApplicationQuery{Name: &appName})
if err != nil {
return nil, err
}
if a.Operation != nil {
return nil, status.Errorf(codes.InvalidArgument, "another operation is already in progress")
}
op, err := operationCreator(a)
if err != nil {
return nil, err
}
a.Operation = op
a.Status.OperationState = nil
_, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(a)
if err != nil && apierr.IsConflict(err) {
log.Warnf("Failed to set operation for app '%s' due to update conflict. Retrying again...", appName)
} else {
if err == nil {
s.logEvent(a, ctx, argo.EventReasonResourceUpdated, operationName)
}
return a, err
}
if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil {
return nil, status.Errorf(codes.FailedPrecondition, "Rollback cannot be initiated when auto-sync is enabled")
}
op := appv1.Operation{
Rollback: &appv1.RollbackOperation{
ID: rollbackReq.ID,
Prune: rollbackReq.Prune,
DryRun: rollbackReq.DryRun,
},
}
a, err = argo.SetAppOperation(ctx, appIf, s.auditLogger, *rollbackReq.Name, &op)
if err == nil {
s.logEvent(a, ctx, argo.EventReasonOperationStarted, fmt.Sprintf("initiated rollback to %d", rollbackReq.ID))
}
return a, err
}
func (s *Server) TerminateOperation(ctx context.Context, termOpReq *OperationTerminateRequest) (*OperationTerminateResponse, error) {
@@ -702,7 +793,7 @@ func (s *Server) TerminateOperation(ctx context.Context, termOpReq *OperationTer
if err != nil {
return nil, err
}
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "terminateop", appRBACName(*a)) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "applications", "sync", appRBACName(*a)) {
return nil, grpc.ErrPermissionDenied
}
@@ -724,12 +815,18 @@ func (s *Server) TerminateOperation(ctx context.Context, termOpReq *OperationTer
if err != nil {
return nil, err
} else {
s.logEvent(a, ctx, argo.EventReasonResourceUpdated, "terminateop")
s.logEvent(a, ctx, argo.EventReasonResourceUpdated, "terminated running operation")
}
}
return nil, status.Errorf(codes.Internal, "Failed to terminate app. Too many conflicts")
}
func (s *Server) logEvent(a *appv1.Application, ctx context.Context, reason string, action string) {
s.auditLogger.LogAppEvent(a, argo.EventInfo{Reason: reason, Action: action, Username: session.Username(ctx)}, v1.EventTypeNormal)
eventInfo := argo.EventInfo{Type: v1.EventTypeNormal, Reason: reason}
user := session.Username(ctx)
if user == "" {
user = "Unknown user"
}
message := fmt.Sprintf("%s %s", user, action)
s.auditLogger.LogAppEvent(a, eventInfo, message)
}

File diff suppressed because it is too large Load Diff

View File

@@ -57,6 +57,19 @@ message ApplicationSyncRequest {
optional bool dryRun = 3 [(gogoproto.nullable) = false];
optional bool prune = 4 [(gogoproto.nullable) = false];
optional github.com.argoproj.argo_cd.pkg.apis.application.v1alpha1.SyncStrategy strategy = 5;
optional ParameterOverrides parameter = 6;
}
// ParameterOverrides is a wrapper on a list of parameters. If omitted, the application's overrides
// in the spec will be used. If set, will use the supplied list of overrides
message ParameterOverrides {
repeated Parameter overrides = 1;
}
message Parameter {
required string name = 1 [(gogoproto.nullable) = false];
optional string value = 2 [(gogoproto.nullable) = false];
optional string component = 3 [(gogoproto.nullable) = false];
}
// ApplicationUpdateSpecRequest is a request to update application spec

View File

@@ -19,6 +19,7 @@ import (
"github.com/argoproj/argo-cd/test"
"github.com/argoproj/argo-cd/util"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/rbac"
)
@@ -109,6 +110,7 @@ func newTestAppServer() ApplicationServiceServer {
kubeclientset,
apps.NewSimpleClientset(defaultProj),
mockRepoClient,
kube.KubectlCmd{},
db,
enforcer,
util.NewKeyLock(),

View File

@@ -1,32 +1,21 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/cluster/cluster.proto
/*
Package cluster is a generated protocol buffer package.
package cluster // import "github.com/argoproj/argo-cd/server/cluster"
/*
Cluster Service
Cluster Service API performs CRUD actions against cluster resources
It is generated from these files:
server/cluster/cluster.proto
It has these top-level messages:
ClusterQuery
ClusterResponse
ClusterCreateRequest
ClusterCreateFromKubeConfigRequest
ClusterUpdateRequest
*/
package cluster
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import _ "k8s.io/api/core/v1"
import github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
@@ -46,13 +35,44 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// ClusterQuery is a query for cluster resources
type ClusterQuery struct {
Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"`
Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterQuery) Reset() { *m = ClusterQuery{} }
func (m *ClusterQuery) String() string { return proto.CompactTextString(m) }
func (*ClusterQuery) ProtoMessage() {}
func (*ClusterQuery) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{0} }
func (m *ClusterQuery) Reset() { *m = ClusterQuery{} }
func (m *ClusterQuery) String() string { return proto.CompactTextString(m) }
func (*ClusterQuery) ProtoMessage() {}
func (*ClusterQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_0875510a34378ea0, []int{0}
}
func (m *ClusterQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ClusterQuery.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ClusterQuery) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterQuery.Merge(dst, src)
}
func (m *ClusterQuery) XXX_Size() int {
return m.Size()
}
func (m *ClusterQuery) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterQuery.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterQuery proto.InternalMessageInfo
func (m *ClusterQuery) GetServer() string {
if m != nil {
@@ -62,24 +82,86 @@ func (m *ClusterQuery) GetServer() string {
}
type ClusterResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterResponse) Reset() { *m = ClusterResponse{} }
func (m *ClusterResponse) String() string { return proto.CompactTextString(m) }
func (*ClusterResponse) ProtoMessage() {}
func (*ClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{1} }
func (m *ClusterResponse) Reset() { *m = ClusterResponse{} }
func (m *ClusterResponse) String() string { return proto.CompactTextString(m) }
func (*ClusterResponse) ProtoMessage() {}
func (*ClusterResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_0875510a34378ea0, []int{1}
}
func (m *ClusterResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ClusterResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ClusterResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterResponse.Merge(dst, src)
}
func (m *ClusterResponse) XXX_Size() int {
return m.Size()
}
func (m *ClusterResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterResponse proto.InternalMessageInfo
type ClusterCreateRequest struct {
Cluster *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
Upsert bool `protobuf:"varint,2,opt,name=upsert,proto3" json:"upsert,omitempty"`
Cluster *v1alpha1.Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
Upsert bool `protobuf:"varint,2,opt,name=upsert,proto3" json:"upsert,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterCreateRequest) Reset() { *m = ClusterCreateRequest{} }
func (m *ClusterCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterCreateRequest) ProtoMessage() {}
func (*ClusterCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{2} }
func (m *ClusterCreateRequest) Reset() { *m = ClusterCreateRequest{} }
func (m *ClusterCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterCreateRequest) ProtoMessage() {}
func (*ClusterCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_0875510a34378ea0, []int{2}
}
func (m *ClusterCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ClusterCreateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ClusterCreateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterCreateRequest.Merge(dst, src)
}
func (m *ClusterCreateRequest) XXX_Size() int {
return m.Size()
}
func (m *ClusterCreateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterCreateRequest.DiscardUnknown(m)
}
func (m *ClusterCreateRequest) GetCluster() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster {
var xxx_messageInfo_ClusterCreateRequest proto.InternalMessageInfo
func (m *ClusterCreateRequest) GetCluster() *v1alpha1.Cluster {
if m != nil {
return m.Cluster
}
@@ -94,18 +176,47 @@ func (m *ClusterCreateRequest) GetUpsert() bool {
}
type ClusterCreateFromKubeConfigRequest struct {
Kubeconfig string `protobuf:"bytes,1,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"`
Context string `protobuf:"bytes,2,opt,name=context,proto3" json:"context,omitempty"`
Upsert bool `protobuf:"varint,3,opt,name=upsert,proto3" json:"upsert,omitempty"`
InCluster bool `protobuf:"varint,4,opt,name=inCluster,proto3" json:"inCluster,omitempty"`
Kubeconfig string `protobuf:"bytes,1,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"`
Context string `protobuf:"bytes,2,opt,name=context,proto3" json:"context,omitempty"`
Upsert bool `protobuf:"varint,3,opt,name=upsert,proto3" json:"upsert,omitempty"`
InCluster bool `protobuf:"varint,4,opt,name=inCluster,proto3" json:"inCluster,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterCreateFromKubeConfigRequest) Reset() { *m = ClusterCreateFromKubeConfigRequest{} }
func (m *ClusterCreateFromKubeConfigRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterCreateFromKubeConfigRequest) ProtoMessage() {}
func (*ClusterCreateFromKubeConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptorCluster, []int{3}
return fileDescriptor_cluster_0875510a34378ea0, []int{3}
}
func (m *ClusterCreateFromKubeConfigRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterCreateFromKubeConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ClusterCreateFromKubeConfigRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ClusterCreateFromKubeConfigRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterCreateFromKubeConfigRequest.Merge(dst, src)
}
func (m *ClusterCreateFromKubeConfigRequest) XXX_Size() int {
return m.Size()
}
func (m *ClusterCreateFromKubeConfigRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterCreateFromKubeConfigRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterCreateFromKubeConfigRequest proto.InternalMessageInfo
func (m *ClusterCreateFromKubeConfigRequest) GetKubeconfig() string {
if m != nil {
@@ -136,15 +247,46 @@ func (m *ClusterCreateFromKubeConfigRequest) GetInCluster() bool {
}
type ClusterUpdateRequest struct {
Cluster *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
Cluster *v1alpha1.Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterUpdateRequest) Reset() { *m = ClusterUpdateRequest{} }
func (m *ClusterUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterUpdateRequest) ProtoMessage() {}
func (*ClusterUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCluster, []int{4} }
func (m *ClusterUpdateRequest) Reset() { *m = ClusterUpdateRequest{} }
func (m *ClusterUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ClusterUpdateRequest) ProtoMessage() {}
func (*ClusterUpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cluster_0875510a34378ea0, []int{4}
}
func (m *ClusterUpdateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ClusterUpdateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ClusterUpdateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterUpdateRequest.Merge(dst, src)
}
func (m *ClusterUpdateRequest) XXX_Size() int {
return m.Size()
}
func (m *ClusterUpdateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterUpdateRequest.DiscardUnknown(m)
}
func (m *ClusterUpdateRequest) GetCluster() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster {
var xxx_messageInfo_ClusterUpdateRequest proto.InternalMessageInfo
func (m *ClusterUpdateRequest) GetCluster() *v1alpha1.Cluster {
if m != nil {
return m.Cluster
}
@@ -171,15 +313,15 @@ const _ = grpc.SupportPackageIsVersion4
type ClusterServiceClient interface {
// List returns list of clusters
List(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ClusterList, error)
List(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*v1alpha1.ClusterList, error)
// Create creates a cluster
Create(ctx context.Context, in *ClusterCreateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Create(ctx context.Context, in *ClusterCreateRequest, opts ...grpc.CallOption) (*v1alpha1.Cluster, error)
// CreateFromKubeConfig installs the argocd-manager service account into the cluster specified in the given kubeconfig and context
CreateFromKubeConfig(ctx context.Context, in *ClusterCreateFromKubeConfigRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
CreateFromKubeConfig(ctx context.Context, in *ClusterCreateFromKubeConfigRequest, opts ...grpc.CallOption) (*v1alpha1.Cluster, error)
// Get returns a cluster by server address
Get(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Get(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*v1alpha1.Cluster, error)
// Update updates a cluster
Update(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Update(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Cluster, error)
// Delete deletes a cluster
Delete(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*ClusterResponse, error)
}
@@ -192,45 +334,45 @@ func NewClusterServiceClient(cc *grpc.ClientConn) ClusterServiceClient {
return &clusterServiceClient{cc}
}
func (c *clusterServiceClient) List(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ClusterList, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ClusterList)
err := grpc.Invoke(ctx, "/cluster.ClusterService/List", in, out, c.cc, opts...)
func (c *clusterServiceClient) List(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*v1alpha1.ClusterList, error) {
out := new(v1alpha1.ClusterList)
err := c.cc.Invoke(ctx, "/cluster.ClusterService/List", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterServiceClient) Create(ctx context.Context, in *ClusterCreateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
err := grpc.Invoke(ctx, "/cluster.ClusterService/Create", in, out, c.cc, opts...)
func (c *clusterServiceClient) Create(ctx context.Context, in *ClusterCreateRequest, opts ...grpc.CallOption) (*v1alpha1.Cluster, error) {
out := new(v1alpha1.Cluster)
err := c.cc.Invoke(ctx, "/cluster.ClusterService/Create", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterServiceClient) CreateFromKubeConfig(ctx context.Context, in *ClusterCreateFromKubeConfigRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
err := grpc.Invoke(ctx, "/cluster.ClusterService/CreateFromKubeConfig", in, out, c.cc, opts...)
func (c *clusterServiceClient) CreateFromKubeConfig(ctx context.Context, in *ClusterCreateFromKubeConfigRequest, opts ...grpc.CallOption) (*v1alpha1.Cluster, error) {
out := new(v1alpha1.Cluster)
err := c.cc.Invoke(ctx, "/cluster.ClusterService/CreateFromKubeConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterServiceClient) Get(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
err := grpc.Invoke(ctx, "/cluster.ClusterService/Get", in, out, c.cc, opts...)
func (c *clusterServiceClient) Get(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*v1alpha1.Cluster, error) {
out := new(v1alpha1.Cluster)
err := c.cc.Invoke(ctx, "/cluster.ClusterService/Get", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterServiceClient) Update(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster)
err := grpc.Invoke(ctx, "/cluster.ClusterService/Update", in, out, c.cc, opts...)
func (c *clusterServiceClient) Update(ctx context.Context, in *ClusterUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Cluster, error) {
out := new(v1alpha1.Cluster)
err := c.cc.Invoke(ctx, "/cluster.ClusterService/Update", in, out, opts...)
if err != nil {
return nil, err
}
@@ -239,7 +381,7 @@ func (c *clusterServiceClient) Update(ctx context.Context, in *ClusterUpdateRequ
func (c *clusterServiceClient) Delete(ctx context.Context, in *ClusterQuery, opts ...grpc.CallOption) (*ClusterResponse, error) {
out := new(ClusterResponse)
err := grpc.Invoke(ctx, "/cluster.ClusterService/Delete", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/cluster.ClusterService/Delete", in, out, opts...)
if err != nil {
return nil, err
}
@@ -250,15 +392,15 @@ func (c *clusterServiceClient) Delete(ctx context.Context, in *ClusterQuery, opt
type ClusterServiceServer interface {
// List returns list of clusters
List(context.Context, *ClusterQuery) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.ClusterList, error)
List(context.Context, *ClusterQuery) (*v1alpha1.ClusterList, error)
// Create creates a cluster
Create(context.Context, *ClusterCreateRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Create(context.Context, *ClusterCreateRequest) (*v1alpha1.Cluster, error)
// CreateFromKubeConfig installs the argocd-manager service account into the cluster specified in the given kubeconfig and context
CreateFromKubeConfig(context.Context, *ClusterCreateFromKubeConfigRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
CreateFromKubeConfig(context.Context, *ClusterCreateFromKubeConfigRequest) (*v1alpha1.Cluster, error)
// Get returns a cluster by server address
Get(context.Context, *ClusterQuery) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Get(context.Context, *ClusterQuery) (*v1alpha1.Cluster, error)
// Update updates a cluster
Update(context.Context, *ClusterUpdateRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster, error)
Update(context.Context, *ClusterUpdateRequest) (*v1alpha1.Cluster, error)
// Delete deletes a cluster
Delete(context.Context, *ClusterQuery) (*ClusterResponse, error)
}
@@ -429,6 +571,9 @@ func (m *ClusterQuery) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintCluster(dAtA, i, uint64(len(m.Server)))
i += copy(dAtA[i:], m.Server)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -447,6 +592,9 @@ func (m *ClusterResponse) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -485,6 +633,9 @@ func (m *ClusterCreateRequest) MarshalTo(dAtA []byte) (int, error) {
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -535,6 +686,9 @@ func (m *ClusterCreateFromKubeConfigRequest) MarshalTo(dAtA []byte) (int, error)
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -563,6 +717,9 @@ func (m *ClusterUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
}
i += n2
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -582,12 +739,18 @@ func (m *ClusterQuery) Size() (n int) {
if l > 0 {
n += 1 + l + sovCluster(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ClusterResponse) Size() (n int) {
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -601,6 +764,9 @@ func (m *ClusterCreateRequest) Size() (n int) {
if m.Upsert {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -621,6 +787,9 @@ func (m *ClusterCreateFromKubeConfigRequest) Size() (n int) {
if m.InCluster {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -631,6 +800,9 @@ func (m *ClusterUpdateRequest) Size() (n int) {
l = m.Cluster.Size()
n += 1 + l + sovCluster(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -717,6 +889,7 @@ func (m *ClusterQuery) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -767,6 +940,7 @@ func (m *ClusterResponse) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -832,7 +1006,7 @@ func (m *ClusterCreateRequest) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Cluster == nil {
m.Cluster = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster{}
m.Cluster = &v1alpha1.Cluster{}
}
if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -870,6 +1044,7 @@ func (m *ClusterCreateRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1018,6 +1193,7 @@ func (m *ClusterCreateFromKubeConfigRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1083,7 +1259,7 @@ func (m *ClusterUpdateRequest) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Cluster == nil {
m.Cluster = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.Cluster{}
m.Cluster = &v1alpha1.Cluster{}
}
if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -1101,6 +1277,7 @@ func (m *ClusterUpdateRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1215,9 +1392,11 @@ var (
ErrIntOverflowCluster = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("server/cluster/cluster.proto", fileDescriptorCluster) }
func init() {
proto.RegisterFile("server/cluster/cluster.proto", fileDescriptor_cluster_0875510a34378ea0)
}
var fileDescriptorCluster = []byte{
var fileDescriptor_cluster_0875510a34378ea0 = []byte{
// 564 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcd, 0x6e, 0x13, 0x31,
0x10, 0xc7, 0xe5, 0xb6, 0xda, 0x12, 0x83, 0xf8, 0xb0, 0x0a, 0x5a, 0xd2, 0x10, 0xa5, 0x3e, 0x54,

130
server/metrics/metrics.go Normal file
View File

@@ -0,0 +1,130 @@
package metrics
import (
"fmt"
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/labels"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
)
const (
// MetricsPath is the endpoint to collect application metrics
MetricsPath = "/metrics"
)
var (
descAppDefaultLabels = []string{"namespace", "name"}
descAppInfo = prometheus.NewDesc(
"argocd_app_info",
"Information about application.",
append(descAppDefaultLabels, "project", "repo", "dest_server", "dest_namespace"),
nil,
)
descAppCreated = prometheus.NewDesc(
"argocd_app_created_time",
"Creation time in unix timestamp for an application.",
descAppDefaultLabels,
nil,
)
descAppSyncStatus = prometheus.NewDesc(
"argocd_app_sync_status",
"The application current sync status.",
append(descAppDefaultLabels, "sync_status"),
nil,
)
descAppHealthStatus = prometheus.NewDesc(
"argocd_app_health_status",
"The application current health status.",
append(descAppDefaultLabels, "health_status"),
nil,
)
)
// NewMetricsServer returns a new prometheus server which collects application metrics
func NewMetricsServer(port int, appLister applister.ApplicationLister) *http.Server {
mux := http.NewServeMux()
appRegistry := NewAppRegistry(appLister)
mux.Handle(MetricsPath, promhttp.HandlerFor(appRegistry, promhttp.HandlerOpts{}))
return &http.Server{
Addr: fmt.Sprintf("localhost:%d", port),
Handler: mux,
}
}
type appCollector struct {
store applister.ApplicationLister
}
// NewAppCollector returns a prometheus collector for application metrics
func NewAppCollector(appLister applister.ApplicationLister) prometheus.Collector {
return &appCollector{
store: appLister,
}
}
// NewAppRegistry creates a new prometheus registry that collects applications
func NewAppRegistry(appLister applister.ApplicationLister) *prometheus.Registry {
registry := prometheus.NewRegistry()
registry.MustRegister(NewAppCollector(appLister))
return registry
}
// Describe implements the prometheus.Collector interface
func (c *appCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- descAppInfo
ch <- descAppCreated
ch <- descAppSyncStatus
ch <- descAppHealthStatus
}
// Collect implements the prometheus.Collector interface
func (c *appCollector) Collect(ch chan<- prometheus.Metric) {
apps, err := c.store.List(labels.NewSelector())
if err != nil {
log.Warn("Failed to collect applications: %v", err)
return
}
for _, app := range apps {
collectApps(ch, app)
}
}
func boolFloat64(b bool) float64 {
if b {
return 1
}
return 0
}
func collectApps(ch chan<- prometheus.Metric, app *argoappv1.Application) {
addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) {
lv = append([]string{app.Namespace, app.Name}, lv...)
ch <- prometheus.MustNewConstMetric(desc, t, v, lv...)
}
addGauge := func(desc *prometheus.Desc, v float64, lv ...string) {
addConstMetric(desc, prometheus.GaugeValue, v, lv...)
}
addGauge(descAppInfo, 1, app.Spec.Project, app.Spec.Source.RepoURL, app.Spec.Destination.Server, app.Spec.Destination.Namespace)
addGauge(descAppCreated, float64(app.CreationTimestamp.Unix()))
syncStatus := app.Status.ComparisonResult.Status
addGauge(descAppSyncStatus, boolFloat64(syncStatus == argoappv1.ComparisonStatusSynced), string(argoappv1.ComparisonStatusSynced))
addGauge(descAppSyncStatus, boolFloat64(syncStatus == argoappv1.ComparisonStatusOutOfSync), string(argoappv1.ComparisonStatusOutOfSync))
addGauge(descAppSyncStatus, boolFloat64(syncStatus == argoappv1.ComparisonStatusUnknown || syncStatus == ""), string(argoappv1.ComparisonStatusUnknown))
healthStatus := app.Status.Health.Status
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusUnknown || healthStatus == ""), string(argoappv1.HealthStatusUnknown))
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusProgressing), string(argoappv1.HealthStatusProgressing))
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusHealthy), string(argoappv1.HealthStatusHealthy))
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusDegraded), string(argoappv1.HealthStatusDegraded))
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusMissing), string(argoappv1.HealthStatusMissing))
}

View File

@@ -0,0 +1,96 @@
package metrics
import (
"context"
"log"
"net/http"
"net/http/httptest"
"testing"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
appinformer "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
)
var fakeApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app
namespace: argocd
spec:
destination:
namespace: dummy-namespace
server: https://localhost:6443
project: default
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
status:
comparisonResult:
status: Synced
health:
status: Healthy
`
var expectedResponse = `# HELP argocd_app_created_time Creation time in unix timestamp for an application.
# TYPE argocd_app_created_time gauge
argocd_app_created_time{name="my-app",namespace="argocd"} -6.21355968e+10
# HELP argocd_app_health_status The application current health status.
# TYPE argocd_app_health_status gauge
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd"} 0
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd"} 1
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd"} 0
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd"} 0
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd"} 0
# HELP argocd_app_info Information about application.
# TYPE argocd_app_info gauge
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="default",repo="https://github.com/argoproj/argocd-example-apps.git"} 1
# HELP argocd_app_sync_status The application current sync status.
# TYPE argocd_app_sync_status gauge
argocd_app_sync_status{name="my-app",namespace="argocd",sync_status="OutOfSync"} 0
argocd_app_sync_status{name="my-app",namespace="argocd",sync_status="Synced"} 1
argocd_app_sync_status{name="my-app",namespace="argocd",sync_status="Unknown"} 0
`
func newFakeApp() *argoappv1.Application {
var app argoappv1.Application
err := yaml.Unmarshal([]byte(fakeApp), &app)
if err != nil {
panic(err)
}
return &app
}
func newFakeLister() (context.CancelFunc, applister.ApplicationLister) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
appClientset := appclientset.NewSimpleClientset(newFakeApp())
factory := appinformer.NewFilteredSharedInformerFactory(appClientset, 0, "argocd", func(options *metav1.ListOptions) {})
appInformer := factory.Argoproj().V1alpha1().Applications().Informer()
go appInformer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), appInformer.HasSynced) {
log.Fatal("Timed out waiting for caches to sync")
}
return cancel, factory.Argoproj().V1alpha1().Applications().Lister()
}
func TestMetrics(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ := NewMetricsServer(8082, appLister)
req, err := http.NewRequest("GET", "/metrics", nil)
assert.NoError(t, err)
rr := httptest.NewRecorder()
metricsServ.Handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK)
body := rr.Body.String()
log.Println(body)
assert.Equal(t, expectedResponse, body)
}

View File

@@ -90,7 +90,7 @@ func (s *Server) CreateToken(ctx context.Context, q *ProjectTokenCreateRequest)
if err != nil {
return nil, err
}
s.logEvent(project, ctx, argo.EventReasonResourceCreated, "create token")
s.logEvent(project, ctx, argo.EventReasonResourceCreated, "created token")
return &ProjectTokenResponse{Token: jwtToken}, nil
}
@@ -145,7 +145,7 @@ func (s *Server) Create(ctx context.Context, q *ProjectCreateRequest) (*v1alpha1
}
res, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Create(q.Project)
if err == nil {
s.logEvent(res, ctx, argo.EventReasonResourceCreated, "create")
s.logEvent(res, ctx, argo.EventReasonResourceCreated, "created project")
}
return res, err
}
@@ -313,7 +313,6 @@ func validateProject(p *v1alpha1.AppProject) error {
} else {
return status.Errorf(codes.AlreadyExists, "can't have duplicate roles: role '%s' already exists", role.Name)
}
}
return nil
@@ -367,14 +366,34 @@ func (s *Server) Update(ctx context.Context, q *ProjectUpdateRequest) (*v1alpha1
return nil, status.Errorf(
codes.InvalidArgument, "following source repos are used by one or more application and cannot be removed: %s", strings.Join(removedSrcUsed, ";"))
}
for i, role := range q.Project.Spec.Roles {
var normalizedPolicies []string
for _, policy := range role.Policies {
normalizedPolicies = append(normalizedPolicies, normalizePolicy(policy))
}
q.Project.Spec.Roles[i].Policies = normalizedPolicies
}
res, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Update(q.Project)
if err == nil {
s.logEvent(res, ctx, argo.EventReasonResourceUpdated, "update")
s.logEvent(res, ctx, argo.EventReasonResourceUpdated, "updated project")
}
return res, err
}
func normalizePolicy(policy string) string {
policyComponents := strings.Split(policy, ",")
normalizedPolicy := ""
for _, component := range policyComponents {
if normalizedPolicy == "" {
normalizedPolicy = component
} else {
normalizedPolicy = fmt.Sprintf("%s, %s", normalizedPolicy, strings.Trim(component, " "))
}
}
return normalizedPolicy
}
// Delete deletes a project
func (s *Server) Delete(ctx context.Context, q *ProjectQuery) (*EmptyResponse, error) {
if q.Name == common.DefaultAppProjectName {
@@ -402,13 +421,13 @@ func (s *Server) Delete(ctx context.Context, q *ProjectQuery) (*EmptyResponse, e
}
err = s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Delete(q.Name, &metav1.DeleteOptions{})
if err == nil {
s.logEvent(p, ctx, argo.EventReasonResourceDeleted, "delete")
s.logEvent(p, ctx, argo.EventReasonResourceDeleted, "deleted project")
}
return &EmptyResponse{}, err
}
func (s *Server) ListEvents(ctx context.Context, q *ProjectQuery) (*v1.EventList, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "projects/events", "get", q.Name) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "projects", "get", q.Name) {
return nil, grpc.ErrPermissionDenied
}
proj, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Get(q.Name, metav1.GetOptions{})
@@ -423,6 +442,12 @@ func (s *Server) ListEvents(ctx context.Context, q *ProjectQuery) (*v1.EventList
return s.kubeclientset.CoreV1().Events(s.ns).List(metav1.ListOptions{FieldSelector: fieldSelector})
}
func (s *Server) logEvent(p *v1alpha1.AppProject, ctx context.Context, reason string, action string) {
s.auditLogger.LogAppProjEvent(p, argo.EventInfo{Reason: reason, Action: action, Username: session.Username(ctx)}, v1.EventTypeNormal)
func (s *Server) logEvent(a *v1alpha1.AppProject, ctx context.Context, reason string, action string) {
eventInfo := argo.EventInfo{Type: v1.EventTypeNormal, Reason: reason}
user := session.Username(ctx)
if user == "" {
user = "Unknown user"
}
message := fmt.Sprintf("%s %s", user, action)
s.auditLogger.LogAppProjEvent(a, eventInfo, message)
}

View File

@@ -1,35 +1,22 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/project/project.proto
/*
Package project is a generated protocol buffer package.
package project // import "github.com/argoproj/argo-cd/server/project"
/*
Project Service
Project Service API performs CRUD actions against project resources
It is generated from these files:
server/project/project.proto
It has these top-level messages:
ProjectCreateRequest
ProjectTokenDeleteRequest
ProjectTokenCreateRequest
ProjectTokenResponse
ProjectQuery
ProjectUpdateRequest
EmptyResponse
*/
package project
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import k8s_io_api_core_v1 "k8s.io/api/core/v1"
import v1 "k8s.io/api/core/v1"
import _ "k8s.io/apimachinery/pkg/apis/meta/v1"
import github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
@@ -49,15 +36,46 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// ProjectCreateRequest defines project creation parameters.
type ProjectCreateRequest struct {
Project *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
Project *v1alpha1.AppProject `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProjectCreateRequest) Reset() { *m = ProjectCreateRequest{} }
func (m *ProjectCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectCreateRequest) ProtoMessage() {}
func (*ProjectCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorProject, []int{0} }
func (m *ProjectCreateRequest) Reset() { *m = ProjectCreateRequest{} }
func (m *ProjectCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectCreateRequest) ProtoMessage() {}
func (*ProjectCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_082822b5d17b8c4e, []int{0}
}
func (m *ProjectCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProjectCreateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProjectCreateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectCreateRequest.Merge(dst, src)
}
func (m *ProjectCreateRequest) XXX_Size() int {
return m.Size()
}
func (m *ProjectCreateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectCreateRequest.DiscardUnknown(m)
}
func (m *ProjectCreateRequest) GetProject() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject {
var xxx_messageInfo_ProjectCreateRequest proto.InternalMessageInfo
func (m *ProjectCreateRequest) GetProject() *v1alpha1.AppProject {
if m != nil {
return m.Project
}
@@ -66,15 +84,46 @@ func (m *ProjectCreateRequest) GetProject() *github_com_argoproj_argo_cd_pkg_api
// ProjectTokenCreateRequest defines project token deletion parameters.
type ProjectTokenDeleteRequest struct {
Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
Iat int64 `protobuf:"varint,3,opt,name=iat,proto3" json:"iat,omitempty"`
Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
Iat int64 `protobuf:"varint,3,opt,name=iat,proto3" json:"iat,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProjectTokenDeleteRequest) Reset() { *m = ProjectTokenDeleteRequest{} }
func (m *ProjectTokenDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenDeleteRequest) ProtoMessage() {}
func (*ProjectTokenDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorProject, []int{1} }
func (m *ProjectTokenDeleteRequest) Reset() { *m = ProjectTokenDeleteRequest{} }
func (m *ProjectTokenDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenDeleteRequest) ProtoMessage() {}
func (*ProjectTokenDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_082822b5d17b8c4e, []int{1}
}
func (m *ProjectTokenDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectTokenDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProjectTokenDeleteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProjectTokenDeleteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectTokenDeleteRequest.Merge(dst, src)
}
func (m *ProjectTokenDeleteRequest) XXX_Size() int {
return m.Size()
}
func (m *ProjectTokenDeleteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectTokenDeleteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectTokenDeleteRequest proto.InternalMessageInfo
func (m *ProjectTokenDeleteRequest) GetProject() string {
if m != nil {
@@ -103,13 +152,44 @@ type ProjectTokenCreateRequest struct {
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
Role string `protobuf:"bytes,3,opt,name=role,proto3" json:"role,omitempty"`
// expiresIn represents a duration in seconds
ExpiresIn int64 `protobuf:"varint,4,opt,name=expiresIn,proto3" json:"expiresIn,omitempty"`
ExpiresIn int64 `protobuf:"varint,4,opt,name=expiresIn,proto3" json:"expiresIn,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProjectTokenCreateRequest) Reset() { *m = ProjectTokenCreateRequest{} }
func (m *ProjectTokenCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenCreateRequest) ProtoMessage() {}
func (*ProjectTokenCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorProject, []int{2} }
func (m *ProjectTokenCreateRequest) Reset() { *m = ProjectTokenCreateRequest{} }
func (m *ProjectTokenCreateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenCreateRequest) ProtoMessage() {}
func (*ProjectTokenCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_082822b5d17b8c4e, []int{2}
}
func (m *ProjectTokenCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectTokenCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProjectTokenCreateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProjectTokenCreateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectTokenCreateRequest.Merge(dst, src)
}
func (m *ProjectTokenCreateRequest) XXX_Size() int {
return m.Size()
}
func (m *ProjectTokenCreateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectTokenCreateRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectTokenCreateRequest proto.InternalMessageInfo
func (m *ProjectTokenCreateRequest) GetProject() string {
if m != nil {
@@ -141,13 +221,44 @@ func (m *ProjectTokenCreateRequest) GetExpiresIn() int64 {
// ProjectTokenResponse wraps the created token or returns an empty string if deleted.
type ProjectTokenResponse struct {
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProjectTokenResponse) Reset() { *m = ProjectTokenResponse{} }
func (m *ProjectTokenResponse) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenResponse) ProtoMessage() {}
func (*ProjectTokenResponse) Descriptor() ([]byte, []int) { return fileDescriptorProject, []int{3} }
func (m *ProjectTokenResponse) Reset() { *m = ProjectTokenResponse{} }
func (m *ProjectTokenResponse) String() string { return proto.CompactTextString(m) }
func (*ProjectTokenResponse) ProtoMessage() {}
func (*ProjectTokenResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_project_082822b5d17b8c4e, []int{3}
}
func (m *ProjectTokenResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProjectTokenResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProjectTokenResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectTokenResponse.Merge(dst, src)
}
func (m *ProjectTokenResponse) XXX_Size() int {
return m.Size()
}
func (m *ProjectTokenResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectTokenResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectTokenResponse proto.InternalMessageInfo
func (m *ProjectTokenResponse) GetToken() string {
if m != nil {
@@ -158,13 +269,44 @@ func (m *ProjectTokenResponse) GetToken() string {
// ProjectQuery is a query for Project resources
type ProjectQuery struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProjectQuery) Reset() { *m = ProjectQuery{} }
func (m *ProjectQuery) String() string { return proto.CompactTextString(m) }
func (*ProjectQuery) ProtoMessage() {}
func (*ProjectQuery) Descriptor() ([]byte, []int) { return fileDescriptorProject, []int{4} }
func (m *ProjectQuery) Reset() { *m = ProjectQuery{} }
func (m *ProjectQuery) String() string { return proto.CompactTextString(m) }
func (*ProjectQuery) ProtoMessage() {}
func (*ProjectQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_project_082822b5d17b8c4e, []int{4}
}
func (m *ProjectQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProjectQuery.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProjectQuery) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectQuery.Merge(dst, src)
}
func (m *ProjectQuery) XXX_Size() int {
return m.Size()
}
func (m *ProjectQuery) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectQuery.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectQuery proto.InternalMessageInfo
func (m *ProjectQuery) GetName() string {
if m != nil {
@@ -174,15 +316,46 @@ func (m *ProjectQuery) GetName() string {
}
type ProjectUpdateRequest struct {
Project *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
Project *v1alpha1.AppProject `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProjectUpdateRequest) Reset() { *m = ProjectUpdateRequest{} }
func (m *ProjectUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectUpdateRequest) ProtoMessage() {}
func (*ProjectUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorProject, []int{5} }
func (m *ProjectUpdateRequest) Reset() { *m = ProjectUpdateRequest{} }
func (m *ProjectUpdateRequest) String() string { return proto.CompactTextString(m) }
func (*ProjectUpdateRequest) ProtoMessage() {}
func (*ProjectUpdateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_project_082822b5d17b8c4e, []int{5}
}
func (m *ProjectUpdateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProjectUpdateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProjectUpdateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectUpdateRequest.Merge(dst, src)
}
func (m *ProjectUpdateRequest) XXX_Size() int {
return m.Size()
}
func (m *ProjectUpdateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectUpdateRequest.DiscardUnknown(m)
}
func (m *ProjectUpdateRequest) GetProject() *github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject {
var xxx_messageInfo_ProjectUpdateRequest proto.InternalMessageInfo
func (m *ProjectUpdateRequest) GetProject() *v1alpha1.AppProject {
if m != nil {
return m.Project
}
@@ -190,12 +363,43 @@ func (m *ProjectUpdateRequest) GetProject() *github_com_argoproj_argo_cd_pkg_api
}
type EmptyResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
func (*EmptyResponse) ProtoMessage() {}
func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorProject, []int{6} }
func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
func (*EmptyResponse) ProtoMessage() {}
func (*EmptyResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_project_082822b5d17b8c4e, []int{6}
}
func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *EmptyResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_EmptyResponse.Merge(dst, src)
}
func (m *EmptyResponse) XXX_Size() int {
return m.Size()
}
func (m *EmptyResponse) XXX_DiscardUnknown() {
xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
}
var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*ProjectCreateRequest)(nil), "project.ProjectCreateRequest")
@@ -223,17 +427,17 @@ type ProjectServiceClient interface {
// Delete a new project token.
DeleteToken(ctx context.Context, in *ProjectTokenDeleteRequest, opts ...grpc.CallOption) (*EmptyResponse, error)
// Create a new project.
Create(ctx context.Context, in *ProjectCreateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error)
Create(ctx context.Context, in *ProjectCreateRequest, opts ...grpc.CallOption) (*v1alpha1.AppProject, error)
// List returns list of projects
List(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProjectList, error)
List(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*v1alpha1.AppProjectList, error)
// Get returns a project by name
Get(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error)
Get(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*v1alpha1.AppProject, error)
// Update updates a project
Update(ctx context.Context, in *ProjectUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error)
Update(ctx context.Context, in *ProjectUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.AppProject, error)
// Delete deletes a project
Delete(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*EmptyResponse, error)
// ListEvents returns a list of project events
ListEvents(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*k8s_io_api_core_v1.EventList, error)
ListEvents(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*v1.EventList, error)
}
type projectServiceClient struct {
@@ -246,7 +450,7 @@ func NewProjectServiceClient(cc *grpc.ClientConn) ProjectServiceClient {
func (c *projectServiceClient) CreateToken(ctx context.Context, in *ProjectTokenCreateRequest, opts ...grpc.CallOption) (*ProjectTokenResponse, error) {
out := new(ProjectTokenResponse)
err := grpc.Invoke(ctx, "/project.ProjectService/CreateToken", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/project.ProjectService/CreateToken", in, out, opts...)
if err != nil {
return nil, err
}
@@ -255,43 +459,43 @@ func (c *projectServiceClient) CreateToken(ctx context.Context, in *ProjectToken
func (c *projectServiceClient) DeleteToken(ctx context.Context, in *ProjectTokenDeleteRequest, opts ...grpc.CallOption) (*EmptyResponse, error) {
out := new(EmptyResponse)
err := grpc.Invoke(ctx, "/project.ProjectService/DeleteToken", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/project.ProjectService/DeleteToken", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *projectServiceClient) Create(ctx context.Context, in *ProjectCreateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject)
err := grpc.Invoke(ctx, "/project.ProjectService/Create", in, out, c.cc, opts...)
func (c *projectServiceClient) Create(ctx context.Context, in *ProjectCreateRequest, opts ...grpc.CallOption) (*v1alpha1.AppProject, error) {
out := new(v1alpha1.AppProject)
err := c.cc.Invoke(ctx, "/project.ProjectService/Create", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *projectServiceClient) List(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProjectList, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProjectList)
err := grpc.Invoke(ctx, "/project.ProjectService/List", in, out, c.cc, opts...)
func (c *projectServiceClient) List(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*v1alpha1.AppProjectList, error) {
out := new(v1alpha1.AppProjectList)
err := c.cc.Invoke(ctx, "/project.ProjectService/List", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *projectServiceClient) Get(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject)
err := grpc.Invoke(ctx, "/project.ProjectService/Get", in, out, c.cc, opts...)
func (c *projectServiceClient) Get(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*v1alpha1.AppProject, error) {
out := new(v1alpha1.AppProject)
err := c.cc.Invoke(ctx, "/project.ProjectService/Get", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *projectServiceClient) Update(ctx context.Context, in *ProjectUpdateRequest, opts ...grpc.CallOption) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error) {
out := new(github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject)
err := grpc.Invoke(ctx, "/project.ProjectService/Update", in, out, c.cc, opts...)
func (c *projectServiceClient) Update(ctx context.Context, in *ProjectUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.AppProject, error) {
out := new(v1alpha1.AppProject)
err := c.cc.Invoke(ctx, "/project.ProjectService/Update", in, out, opts...)
if err != nil {
return nil, err
}
@@ -300,16 +504,16 @@ func (c *projectServiceClient) Update(ctx context.Context, in *ProjectUpdateRequ
func (c *projectServiceClient) Delete(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*EmptyResponse, error) {
out := new(EmptyResponse)
err := grpc.Invoke(ctx, "/project.ProjectService/Delete", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/project.ProjectService/Delete", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *projectServiceClient) ListEvents(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*k8s_io_api_core_v1.EventList, error) {
out := new(k8s_io_api_core_v1.EventList)
err := grpc.Invoke(ctx, "/project.ProjectService/ListEvents", in, out, c.cc, opts...)
func (c *projectServiceClient) ListEvents(ctx context.Context, in *ProjectQuery, opts ...grpc.CallOption) (*v1.EventList, error) {
out := new(v1.EventList)
err := c.cc.Invoke(ctx, "/project.ProjectService/ListEvents", in, out, opts...)
if err != nil {
return nil, err
}
@@ -324,17 +528,17 @@ type ProjectServiceServer interface {
// Delete a new project token.
DeleteToken(context.Context, *ProjectTokenDeleteRequest) (*EmptyResponse, error)
// Create a new project.
Create(context.Context, *ProjectCreateRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error)
Create(context.Context, *ProjectCreateRequest) (*v1alpha1.AppProject, error)
// List returns list of projects
List(context.Context, *ProjectQuery) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProjectList, error)
List(context.Context, *ProjectQuery) (*v1alpha1.AppProjectList, error)
// Get returns a project by name
Get(context.Context, *ProjectQuery) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error)
Get(context.Context, *ProjectQuery) (*v1alpha1.AppProject, error)
// Update updates a project
Update(context.Context, *ProjectUpdateRequest) (*github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject, error)
Update(context.Context, *ProjectUpdateRequest) (*v1alpha1.AppProject, error)
// Delete deletes a project
Delete(context.Context, *ProjectQuery) (*EmptyResponse, error)
// ListEvents returns a list of project events
ListEvents(context.Context, *ProjectQuery) (*k8s_io_api_core_v1.EventList, error)
ListEvents(context.Context, *ProjectQuery) (*v1.EventList, error)
}
func RegisterProjectServiceServer(s *grpc.Server, srv ProjectServiceServer) {
@@ -551,6 +755,9 @@ func (m *ProjectCreateRequest) MarshalTo(dAtA []byte) (int, error) {
}
i += n1
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -586,6 +793,9 @@ func (m *ProjectTokenDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintProject(dAtA, i, uint64(m.Iat))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -627,6 +837,9 @@ func (m *ProjectTokenCreateRequest) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintProject(dAtA, i, uint64(m.ExpiresIn))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -651,6 +864,9 @@ func (m *ProjectTokenResponse) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintProject(dAtA, i, uint64(len(m.Token)))
i += copy(dAtA[i:], m.Token)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -675,6 +891,9 @@ func (m *ProjectQuery) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintProject(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -703,6 +922,9 @@ func (m *ProjectUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
}
i += n2
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -721,6 +943,9 @@ func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -740,6 +965,9 @@ func (m *ProjectCreateRequest) Size() (n int) {
l = m.Project.Size()
n += 1 + l + sovProject(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -757,6 +985,9 @@ func (m *ProjectTokenDeleteRequest) Size() (n int) {
if m.Iat != 0 {
n += 1 + sovProject(uint64(m.Iat))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -778,6 +1009,9 @@ func (m *ProjectTokenCreateRequest) Size() (n int) {
if m.ExpiresIn != 0 {
n += 1 + sovProject(uint64(m.ExpiresIn))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -788,6 +1022,9 @@ func (m *ProjectTokenResponse) Size() (n int) {
if l > 0 {
n += 1 + l + sovProject(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -798,6 +1035,9 @@ func (m *ProjectQuery) Size() (n int) {
if l > 0 {
n += 1 + l + sovProject(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -808,12 +1048,18 @@ func (m *ProjectUpdateRequest) Size() (n int) {
l = m.Project.Size()
n += 1 + l + sovProject(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *EmptyResponse) Size() (n int) {
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -886,7 +1132,7 @@ func (m *ProjectCreateRequest) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Project == nil {
m.Project = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject{}
m.Project = &v1alpha1.AppProject{}
}
if err := m.Project.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -904,6 +1150,7 @@ func (m *ProjectCreateRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1031,6 +1278,7 @@ func (m *ProjectTokenDeleteRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1187,6 +1435,7 @@ func (m *ProjectTokenCreateRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1266,6 +1515,7 @@ func (m *ProjectTokenResponse) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1345,6 +1595,7 @@ func (m *ProjectQuery) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1410,7 +1661,7 @@ func (m *ProjectUpdateRequest) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Project == nil {
m.Project = &github_com_argoproj_argo_cd_pkg_apis_application_v1alpha1.AppProject{}
m.Project = &v1alpha1.AppProject{}
}
if err := m.Project.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -1428,6 +1679,7 @@ func (m *ProjectUpdateRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1478,6 +1730,7 @@ func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -1592,52 +1845,54 @@ var (
ErrIntOverflowProject = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("server/project/project.proto", fileDescriptorProject) }
var fileDescriptorProject = []byte{
// 689 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x5d, 0x6b, 0x13, 0x4d,
0x14, 0x66, 0x9a, 0xbe, 0x79, 0xed, 0xc4, 0x8f, 0x32, 0xb4, 0x9a, 0xc6, 0x36, 0x86, 0xb9, 0x90,
0x12, 0xec, 0x0c, 0x69, 0x15, 0x8a, 0x77, 0x7e, 0x14, 0x29, 0x78, 0xa1, 0x51, 0x41, 0xf4, 0xa2,
0x4c, 0x37, 0x87, 0xed, 0x36, 0xc9, 0xce, 0x38, 0x3b, 0x5d, 0x2d, 0xa5, 0x20, 0xc5, 0x1b, 0xf5,
0xd2, 0x9f, 0x20, 0xf8, 0x5b, 0xbc, 0x14, 0xfc, 0x03, 0x12, 0xfc, 0x21, 0x32, 0xb3, 0xbb, 0x49,
0xb6, 0xe9, 0x16, 0x84, 0xe0, 0x55, 0xce, 0x9e, 0x39, 0x73, 0x9e, 0xe7, 0x39, 0x1f, 0x19, 0xbc,
0x1c, 0x81, 0x8e, 0x41, 0x73, 0xa5, 0xe5, 0x3e, 0x78, 0x26, 0xfb, 0x65, 0x4a, 0x4b, 0x23, 0xc9,
0xff, 0xe9, 0x67, 0x6d, 0xc1, 0x97, 0xbe, 0x74, 0x3e, 0x6e, 0xad, 0xe4, 0xb8, 0xb6, 0xec, 0x4b,
0xe9, 0xf7, 0x80, 0x0b, 0x15, 0x70, 0x11, 0x86, 0xd2, 0x08, 0x13, 0xc8, 0x30, 0x4a, 0x4f, 0x69,
0x77, 0x33, 0x62, 0x81, 0x74, 0xa7, 0x9e, 0xd4, 0xc0, 0xe3, 0x16, 0xf7, 0x21, 0x04, 0x2d, 0x0c,
0x74, 0xd2, 0x98, 0xdb, 0xa3, 0x98, 0xbe, 0xf0, 0xf6, 0x82, 0x10, 0xf4, 0x21, 0x57, 0x5d, 0xdf,
0x3a, 0x22, 0xde, 0x07, 0x23, 0xce, 0xba, 0xb5, 0xed, 0x07, 0x66, 0xef, 0x60, 0x97, 0x79, 0xb2,
0xcf, 0x85, 0x76, 0xc4, 0xf6, 0x9d, 0xb1, 0xe6, 0x75, 0x46, 0xb7, 0x85, 0x52, 0xbd, 0xc0, 0x73,
0x94, 0x78, 0xdc, 0x12, 0x3d, 0xb5, 0x27, 0x26, 0x52, 0xd1, 0xb7, 0x78, 0xe1, 0x49, 0xa2, 0xf1,
0x81, 0x06, 0x61, 0xa0, 0x0d, 0x6f, 0x0e, 0x20, 0x32, 0x64, 0x07, 0x67, 0xda, 0xab, 0xa8, 0x81,
0x56, 0x2b, 0xeb, 0x5b, 0x6c, 0x04, 0xca, 0x32, 0x50, 0x67, 0xec, 0x78, 0x1d, 0xa6, 0xba, 0x3e,
0xb3, 0xa0, 0x6c, 0x0c, 0x94, 0x65, 0xa0, 0xec, 0x9e, 0x52, 0x29, 0x48, 0x3b, 0xcb, 0x4a, 0x5f,
0xe3, 0xa5, 0xd4, 0xf7, 0x5c, 0x76, 0x21, 0x7c, 0x08, 0x3d, 0x18, 0xa1, 0x57, 0xf3, 0xe8, 0x73,
0xc3, 0x6b, 0x84, 0xe0, 0x59, 0x2d, 0x7b, 0x50, 0x9d, 0x71, 0x6e, 0x67, 0x93, 0x79, 0x5c, 0x0a,
0x84, 0xa9, 0x96, 0x1a, 0x68, 0xb5, 0xd4, 0xb6, 0x26, 0xfd, 0x88, 0xf2, 0xd9, 0xf3, 0xda, 0x8a,
0xb3, 0x37, 0x70, 0xa5, 0x03, 0x91, 0xa7, 0x03, 0x65, 0x05, 0xa4, 0x20, 0xe3, 0xae, 0x21, 0x7e,
0x69, 0x0c, 0x7f, 0x19, 0xcf, 0xc1, 0x3b, 0x15, 0x68, 0x88, 0xb6, 0xc3, 0xea, 0xac, 0x63, 0x31,
0x72, 0xd0, 0x5b, 0xc3, 0x0a, 0x3b, 0x2a, 0x6d, 0x88, 0x94, 0x0c, 0x23, 0x20, 0x0b, 0xf8, 0x3f,
0x63, 0x1d, 0x29, 0x87, 0xe4, 0x83, 0x52, 0x7c, 0x31, 0x8d, 0x7e, 0x7a, 0x00, 0xfa, 0xd0, 0xe2,
0x85, 0xa2, 0x0f, 0x69, 0x90, 0xb3, 0xc7, 0x7a, 0xf6, 0x42, 0x75, 0xfe, 0x65, 0xcf, 0xae, 0xe0,
0x4b, 0x5b, 0x7d, 0x65, 0x0e, 0x33, 0x0d, 0xeb, 0xdf, 0x2e, 0xe0, 0xcb, 0x69, 0xd4, 0x33, 0xd0,
0x71, 0xe0, 0x01, 0xf9, 0x84, 0x70, 0x25, 0x29, 0xb7, 0x93, 0x4b, 0x28, 0xcb, 0x56, 0xaa, 0xb0,
0x21, 0xb5, 0x95, 0x33, 0x63, 0x32, 0x14, 0xba, 0x79, 0xf2, 0xf3, 0xf7, 0x97, 0x99, 0x75, 0xba,
0xe6, 0x56, 0x29, 0x6e, 0x65, 0x4b, 0x1a, 0xf1, 0xa3, 0xd4, 0x3a, 0xe6, 0xb6, 0x11, 0x11, 0x3f,
0xb2, 0x3f, 0xc7, 0xdc, 0x95, 0xf2, 0x2e, 0x6a, 0x92, 0xf7, 0x08, 0x57, 0x92, 0xc9, 0x3a, 0x8f,
0x4c, 0x6e, 0xf6, 0x6a, 0x57, 0x87, 0x31, 0x39, 0xad, 0xf4, 0x8e, 0x63, 0xc1, 0x9b, 0x7f, 0xc7,
0x82, 0x7c, 0x46, 0xb8, 0x9c, 0xa8, 0x25, 0x13, 0x32, 0xf3, 0x55, 0x98, 0x4e, 0xb7, 0xe8, 0x75,
0xc7, 0x73, 0x91, 0xce, 0x9f, 0xe6, 0x69, 0x0b, 0x72, 0x82, 0xf0, 0xec, 0xe3, 0x20, 0x32, 0x64,
0xf1, 0x34, 0x17, 0x37, 0x6e, 0xb5, 0xed, 0xa9, 0x70, 0xb0, 0x08, 0xb4, 0xea, 0x78, 0x10, 0x32,
0xc1, 0x83, 0x7c, 0x40, 0xb8, 0xf4, 0x08, 0x0a, 0x39, 0x4c, 0xa9, 0x0e, 0x37, 0x1c, 0xfe, 0x12,
0xb9, 0x36, 0xd9, 0x2f, 0xbb, 0x45, 0xc7, 0xe4, 0x2b, 0xc2, 0xe5, 0x64, 0x81, 0x26, 0x3b, 0x93,
0x5b, 0xac, 0x69, 0x31, 0xda, 0x70, 0x8c, 0xd6, 0x6a, 0xab, 0x85, 0x13, 0xc4, 0xec, 0x3f, 0x7e,
0x47, 0x18, 0xc1, 0x1c, 0x45, 0xdb, 0xb1, 0x97, 0xb8, 0x9c, 0xcc, 0x67, 0x51, 0xb9, 0x8a, 0xe6,
0x35, 0xd5, 0xdf, 0x2c, 0xd4, 0xbf, 0x8f, 0xb1, 0x6d, 0xd4, 0x56, 0x0c, 0xa1, 0x89, 0x8a, 0xb2,
0xaf, 0xb0, 0xe4, 0x85, 0xb2, 0x0a, 0x99, 0x7d, 0xc5, 0x58, 0xdc, 0x62, 0xee, 0x8a, 0x6b, 0xf2,
0x4d, 0x07, 0xd2, 0x20, 0xf5, 0x02, 0x10, 0x0e, 0x2e, 0xfb, 0xfd, 0xcd, 0xef, 0x83, 0x3a, 0xfa,
0x31, 0xa8, 0xa3, 0x5f, 0x83, 0x3a, 0x7a, 0xd5, 0x3c, 0xef, 0xfd, 0xca, 0x3f, 0xc8, 0xbb, 0x65,
0xf7, 0x4e, 0x6d, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0x53, 0xd4, 0xec, 0x49, 0xa9, 0x07, 0x00,
0x00,
func init() {
proto.RegisterFile("server/project/project.proto", fileDescriptor_project_082822b5d17b8c4e)
}
var fileDescriptor_project_082822b5d17b8c4e = []byte{
// 697 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x5d, 0x6b, 0x13, 0x4d,
0x14, 0x66, 0x9a, 0xbe, 0x79, 0xdf, 0x4e, 0x5e, 0xb5, 0x0c, 0xad, 0xa6, 0xb1, 0x8d, 0x61, 0x2e,
0xa4, 0x04, 0x3b, 0x43, 0x5a, 0x85, 0xa2, 0x57, 0x7e, 0x14, 0x29, 0x78, 0xa1, 0x51, 0x41, 0xf4,
0xa2, 0x4c, 0x37, 0x87, 0xed, 0x36, 0xc9, 0xce, 0x38, 0x3b, 0x5d, 0x2d, 0x25, 0x37, 0x45, 0x04,
0xf5, 0xd2, 0x9f, 0xe0, 0xad, 0x3f, 0xc4, 0x4b, 0xc1, 0x3f, 0x20, 0xc5, 0x1f, 0x22, 0x33, 0xbb,
0x9b, 0x64, 0x9b, 0x6e, 0x41, 0x08, 0x5e, 0xe5, 0xec, 0x99, 0x33, 0xe7, 0x79, 0x9e, 0xf3, 0x91,
0xc1, 0xcb, 0x11, 0xe8, 0x18, 0x34, 0x57, 0x5a, 0xee, 0x83, 0x67, 0xb2, 0x5f, 0xa6, 0xb4, 0x34,
0x92, 0xfc, 0x9b, 0x7e, 0xd6, 0x16, 0x7c, 0xe9, 0x4b, 0xe7, 0xe3, 0xd6, 0x4a, 0x8e, 0x6b, 0xcb,
0xbe, 0x94, 0x7e, 0x0f, 0xb8, 0x50, 0x01, 0x17, 0x61, 0x28, 0x8d, 0x30, 0x81, 0x0c, 0xa3, 0xf4,
0x94, 0x76, 0x37, 0x23, 0x16, 0x48, 0x77, 0xea, 0x49, 0x0d, 0x3c, 0x6e, 0x71, 0x1f, 0x42, 0xd0,
0xc2, 0x40, 0x27, 0x8d, 0xb9, 0x39, 0x8a, 0xe9, 0x0b, 0x6f, 0x2f, 0x08, 0x41, 0x1f, 0x72, 0xd5,
0xf5, 0xad, 0x23, 0xe2, 0x7d, 0x30, 0xe2, 0xac, 0x5b, 0xdb, 0x7e, 0x60, 0xf6, 0x0e, 0x76, 0x99,
0x27, 0xfb, 0x5c, 0x68, 0x47, 0x6c, 0xdf, 0x19, 0x6b, 0x5e, 0x67, 0x74, 0x5b, 0x28, 0xd5, 0x0b,
0x3c, 0x47, 0x89, 0xc7, 0x2d, 0xd1, 0x53, 0x7b, 0x62, 0x22, 0x15, 0x7d, 0x83, 0x17, 0x1e, 0x27,
0x1a, 0xef, 0x6b, 0x10, 0x06, 0xda, 0xf0, 0xfa, 0x00, 0x22, 0x43, 0x76, 0x70, 0xa6, 0xbd, 0x8a,
0x1a, 0x68, 0xb5, 0xb2, 0xbe, 0xc5, 0x46, 0xa0, 0x2c, 0x03, 0x75, 0xc6, 0x8e, 0xd7, 0x61, 0xaa,
0xeb, 0x33, 0x0b, 0xca, 0xc6, 0x40, 0x59, 0x06, 0xca, 0xee, 0x2a, 0x95, 0x82, 0xb4, 0xb3, 0xac,
0xf4, 0x15, 0x5e, 0x4a, 0x7d, 0xcf, 0x64, 0x17, 0xc2, 0x07, 0xd0, 0x83, 0x11, 0x7a, 0x35, 0x8f,
0x3e, 0x37, 0xbc, 0x46, 0x08, 0x9e, 0xd5, 0xb2, 0x07, 0xd5, 0x19, 0xe7, 0x76, 0x36, 0x99, 0xc7,
0xa5, 0x40, 0x98, 0x6a, 0xa9, 0x81, 0x56, 0x4b, 0x6d, 0x6b, 0xd2, 0x0f, 0x28, 0x9f, 0x3d, 0xaf,
0xad, 0x38, 0x7b, 0x03, 0x57, 0x3a, 0x10, 0x79, 0x3a, 0x50, 0x56, 0x40, 0x0a, 0x32, 0xee, 0x1a,
0xe2, 0x97, 0xc6, 0xf0, 0x97, 0xf1, 0x1c, 0xbc, 0x55, 0x81, 0x86, 0x68, 0x3b, 0xac, 0xce, 0x3a,
0x16, 0x23, 0x07, 0xbd, 0x31, 0xac, 0xb0, 0xa3, 0xd2, 0x86, 0x48, 0xc9, 0x30, 0x02, 0xb2, 0x80,
0xff, 0x31, 0xd6, 0x91, 0x72, 0x48, 0x3e, 0x28, 0xc5, 0xff, 0xa7, 0xd1, 0x4f, 0x0e, 0x40, 0x1f,
0x5a, 0xbc, 0x50, 0xf4, 0x21, 0x0d, 0x72, 0xf6, 0x58, 0xcf, 0x9e, 0xab, 0xce, 0xdf, 0xec, 0xd9,
0x25, 0x7c, 0x61, 0xab, 0xaf, 0xcc, 0x61, 0xa6, 0x61, 0xfd, 0xeb, 0x7f, 0xf8, 0x62, 0x1a, 0xf5,
0x14, 0x74, 0x1c, 0x78, 0x40, 0x3e, 0x22, 0x5c, 0x49, 0xca, 0xed, 0xe4, 0x12, 0xca, 0xb2, 0x95,
0x2a, 0x6c, 0x48, 0x6d, 0xe5, 0xcc, 0x98, 0x0c, 0x85, 0x6e, 0x1e, 0xff, 0xf8, 0xf5, 0x79, 0x66,
0x9d, 0xae, 0xb9, 0x55, 0x8a, 0x5b, 0xd9, 0x92, 0x46, 0xfc, 0x28, 0xb5, 0x06, 0xdc, 0x36, 0x22,
0xe2, 0x47, 0xf6, 0x67, 0xc0, 0x5d, 0x29, 0x6f, 0xa3, 0x26, 0x79, 0x8f, 0x70, 0x25, 0x99, 0xac,
0xf3, 0xc8, 0xe4, 0x66, 0xaf, 0x76, 0x79, 0x18, 0x93, 0xd3, 0x4a, 0xef, 0x38, 0x16, 0xb7, 0x9a,
0x1b, 0x7f, 0xc4, 0x82, 0x1f, 0x05, 0xc2, 0x0c, 0xc8, 0x27, 0x84, 0xcb, 0x89, 0x66, 0x32, 0x21,
0x36, 0x5f, 0x8b, 0xe9, 0xf4, 0x8c, 0x5e, 0x75, 0x6c, 0x17, 0xe9, 0xfc, 0x69, 0xb6, 0xb6, 0x2c,
0xc7, 0x08, 0xcf, 0x3e, 0x0a, 0x22, 0x43, 0x16, 0x4f, 0x73, 0x71, 0x43, 0x57, 0xdb, 0x9e, 0x0a,
0x07, 0x8b, 0x40, 0xab, 0x8e, 0x07, 0x21, 0x13, 0x3c, 0xc8, 0x3b, 0x84, 0x4b, 0x0f, 0xa1, 0x90,
0xc3, 0x94, 0xea, 0x70, 0xcd, 0xe1, 0x2f, 0x91, 0x2b, 0x93, 0x5d, 0xb3, 0xbb, 0x34, 0x20, 0x5f,
0x10, 0x2e, 0x27, 0x6b, 0x34, 0xd9, 0x99, 0xdc, 0x7a, 0x4d, 0x8b, 0xd1, 0x86, 0x63, 0xb4, 0x56,
0x5b, 0x2d, 0x9c, 0x23, 0x66, 0xff, 0xf7, 0x3b, 0xc2, 0x08, 0xe6, 0x28, 0xda, 0x8e, 0xbd, 0xc0,
0xe5, 0x64, 0x4a, 0x8b, 0xca, 0x55, 0x34, 0xb5, 0xa9, 0xfe, 0x66, 0xa1, 0xfe, 0x7d, 0x8c, 0x6d,
0xa3, 0xb6, 0x62, 0x08, 0x4d, 0x54, 0x94, 0x7d, 0x85, 0x25, 0xef, 0x94, 0x55, 0xc8, 0xec, 0x5b,
0xc6, 0xe2, 0x16, 0x73, 0x57, 0x5c, 0x93, 0xaf, 0x3b, 0x90, 0x06, 0xa9, 0x17, 0x80, 0x70, 0x70,
0xd9, 0xef, 0x6d, 0x7e, 0x3b, 0xa9, 0xa3, 0xef, 0x27, 0x75, 0xf4, 0xf3, 0xa4, 0x8e, 0x5e, 0x36,
0xcf, 0x7b, 0xc5, 0xf2, 0xcf, 0xf2, 0x6e, 0xd9, 0xbd, 0x56, 0x1b, 0xbf, 0x03, 0x00, 0x00, 0xff,
0xff, 0x41, 0x42, 0x44, 0x21, 0xaf, 0x07, 0x00, 0x00,
}

View File

@@ -70,10 +70,6 @@ func request_ProjectService_CreateToken_0(ctx context.Context, marshaler runtime
}
var (
filter_ProjectService_DeleteToken_0 = &utilities.DoubleArray{Encoding: map[string]int{"project": 0, "role": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
)
func request_ProjectService_DeleteToken_0(ctx context.Context, marshaler runtime.Marshaler, client ProjectServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ProjectTokenDeleteRequest
var metadata runtime.ServerMetadata
@@ -107,8 +103,15 @@ func request_ProjectService_DeleteToken_0(ctx context.Context, marshaler runtime
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "role", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ProjectService_DeleteToken_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
val, ok = pathParams["iat"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "iat")
}
protoReq.Iat, err = runtime.Int64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "iat", err)
}
msg, err := client.DeleteToken(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
@@ -534,7 +537,7 @@ func RegisterProjectServiceHandlerClient(ctx context.Context, mux *runtime.Serve
var (
pattern_ProjectService_CreateToken_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "projects", "project", "roles", "role", "token"}, ""))
pattern_ProjectService_DeleteToken_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "projects", "project", "roles", "role", "token"}, ""))
pattern_ProjectService_DeleteToken_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"api", "v1", "projects", "project", "roles", "role", "token", "iat"}, ""))
pattern_ProjectService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "projects"}, ""))

View File

@@ -63,7 +63,7 @@ service ProjectService {
// Delete a new project token.
rpc DeleteToken(ProjectTokenDeleteRequest) returns (EmptyResponse) {
option (google.api.http).delete = "/api/v1/projects/{project}/roles/{role}/token";
option (google.api.http).delete = "/api/v1/projects/{project}/roles/{role}/token/{iat}";
}
// Create a new project.

View File

@@ -3,6 +3,7 @@ package project
import (
"context"
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -326,4 +327,25 @@ func TestProjectServer(t *testing.T) {
expectedErr := fmt.Sprintf("rpc error: code = InvalidArgument desc = incorrect policy format for '%s' as effect can only have value 'allow' or 'deny'", invalidPolicy)
assert.EqualError(t, err, expectedErr)
})
t.Run("TestNormalizeProjectRolePolicies", func(t *testing.T) {
action := "create"
object := "testApplication"
roleName := "testRole"
effect := "allow"
projWithRole := existingProj.DeepCopy()
role := v1alpha1.ProjectRole{Name: roleName, JWTTokens: []v1alpha1.JWTToken{{IssuedAt: 1}}}
noSpacesPolicyTemplate := strings.Replace(policyTemplate, " ", "", -1)
invalidPolicy := fmt.Sprintf(noSpacesPolicyTemplate, projWithRole.Name, roleName, action, projWithRole.Name, object, effect)
role.Policies = append(role.Policies, invalidPolicy)
projWithRole.Spec.Roles = append(projWithRole.Spec.Roles, role)
projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projWithRole), enforcer, util.NewKeyLock(), nil)
request := &ProjectUpdateRequest{Project: projWithRole}
updateProj, err := projectServer.Update(context.Background(), request)
assert.Nil(t, err)
expectedPolicy := fmt.Sprintf(policyTemplate, projWithRole.Name, roleName, action, projWithRole.Name, object, effect)
assert.Equal(t, expectedPolicy, updateProj.Spec.Roles[0].Policies[0])
})
}

View File

@@ -2,6 +2,7 @@ package repository
import (
"path"
"path/filepath"
"reflect"
"github.com/ghodss/yaml"
@@ -56,7 +57,7 @@ func (s *Server) List(ctx context.Context, q *RepoQuery) (*appsv1.RepositoryList
// ListKsonnetApps returns list of Ksonnet apps in the repo
func (s *Server) ListApps(ctx context.Context, q *RepoAppsQuery) (*RepoAppsResponse, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories/apps", "get", q.Repo) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories", "get", q.Repo) {
return nil, grpc.ErrPermissionDenied
}
repo, err := s.db.GetRepository(ctx, q.Repo)
@@ -109,7 +110,7 @@ func (s *Server) ListApps(ctx context.Context, q *RepoAppsQuery) (*RepoAppsRespo
}
func (s *Server) GetAppDetails(ctx context.Context, q *RepoAppDetailsQuery) (*RepoAppDetailsResponse, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories/apps", "get", q.Repo) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "repositories", "get", q.Repo) {
return nil, grpc.ErrPermissionDenied
}
repo, err := s.db.GetRepository(ctx, q.Repo)
@@ -158,15 +159,23 @@ func (s *Server) GetAppDetails(ctx context.Context, q *RepoAppDetailsQuery) (*Re
if err != nil {
return nil, err
}
appDir := path.Dir(q.Path)
valuesFilesRes, err := repoClient.ListDir(ctx, &repository.ListDirRequest{
Revision: revision,
Repo: repo,
Path: path.Join(path.Dir(q.Path), "*values*.yaml"),
Path: path.Join(appDir, "*values*.yaml"),
})
if err != nil {
return nil, err
}
appSpec.ValueFiles = valuesFilesRes.Items
appSpec.ValueFiles = make([]string, len(valuesFilesRes.Items))
for i := range valuesFilesRes.Items {
valueFilePath, err := filepath.Rel(appDir, valuesFilesRes.Items[i])
if err != nil {
return nil, err
}
appSpec.ValueFiles[i] = valueFilePath
}
return &RepoAppDetailsResponse{
Type: string(appSourceType),
Helm: &appSpec,

File diff suppressed because it is too large Load Diff

View File

@@ -32,6 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd"
"github.com/argoproj/argo-cd/common"
@@ -39,10 +40,13 @@ import (
"github.com/argoproj/argo-cd/pkg/apiclient"
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
appinformer "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/reposerver"
"github.com/argoproj/argo-cd/server/account"
"github.com/argoproj/argo-cd/server/application"
"github.com/argoproj/argo-cd/server/cluster"
"github.com/argoproj/argo-cd/server/metrics"
"github.com/argoproj/argo-cd/server/project"
"github.com/argoproj/argo-cd/server/repository"
"github.com/argoproj/argo-cd/server/session"
@@ -56,6 +60,7 @@ import (
"github.com/argoproj/argo-cd/util/healthz"
jsonutil "github.com/argoproj/argo-cd/util/json"
jwtutil "github.com/argoproj/argo-cd/util/jwt"
"github.com/argoproj/argo-cd/util/kube"
projectutil "github.com/argoproj/argo-cd/util/project"
"github.com/argoproj/argo-cd/util/rbac"
util_session "github.com/argoproj/argo-cd/util/session"
@@ -105,19 +110,22 @@ type ArgoCDServer struct {
sessionMgr *util_session.SessionManager
settingsMgr *settings_util.SettingsManager
enf *rbac.Enforcer
appInformer cache.SharedIndexInformer
appLister applister.ApplicationLister
// stopCh is the channel which when closed, will shutdown the ArgoCD server
stopCh chan struct{}
}
type ArgoCDServerOpts struct {
DisableAuth bool
Insecure bool
Namespace string
StaticAssetsDir string
KubeClientset kubernetes.Interface
AppClientset appclientset.Interface
RepoClientset reposerver.Clientset
DisableAuth bool
Insecure bool
Namespace string
StaticAssetsDir string
KubeClientset kubernetes.Interface
AppClientset appclientset.Interface
RepoClientset reposerver.Clientset
TLSConfigCustomizer tlsutil.ConfigCustomizer
}
// initializeDefaultProject creates the default project if it does not already exist
@@ -143,7 +151,15 @@ func initializeSettings(settingsMgr *settings_util.SettingsManager, opts ArgoCDS
defaultPassword, err := os.Hostname()
errors.CheckError(err)
cdSettings := settings_util.UpdateSettings(defaultPassword, settingsMgr, false, false, opts.Namespace)
cdSettings, err := settings_util.UpdateSettings(defaultPassword, settingsMgr, false, false, opts.Namespace)
if err != nil {
// assume settings are initialized by another instance of api server
if apierrors.IsConflict(err) {
return settingsMgr.GetSettings()
} else {
log.Fatal(err)
}
}
return cdSettings, nil
}
@@ -163,6 +179,11 @@ func NewServer(opts ArgoCDServerOpts) *ArgoCDServer {
err = enf.SetBuiltinPolicy(builtinPolicy)
errors.CheckError(err)
enf.EnableLog(os.Getenv(common.EnvVarRBACDebug) == "1")
factory := appinformer.NewFilteredSharedInformerFactory(opts.AppClientset, 0, opts.Namespace, func(options *metav1.ListOptions) {})
appInformer := factory.Argoproj().V1alpha1().Applications().Informer()
appLister := factory.Argoproj().V1alpha1().Applications().Lister()
return &ArgoCDServer{
ArgoCDServerOpts: opts,
log: log.NewEntry(log.New()),
@@ -170,6 +191,8 @@ func NewServer(opts ArgoCDServerOpts) *ArgoCDServer {
sessionMgr: sessionMgr,
settingsMgr: settingsMgr,
enf: enf,
appInformer: appInformer,
appLister: appLister,
}
}
@@ -226,10 +249,13 @@ func (a *ArgoCDServer) Run(ctx context.Context, port int) {
httpsL = tlsm.Match(cmux.HTTP1Fast())
grpcL = tlsm.Match(cmux.Any())
}
metricsServ := metrics.NewMetricsServer(8082, a.appLister)
// Start the muxed listeners for our servers
log.Infof("argocd %s serving on port %d (url: %s, tls: %v, namespace: %s, sso: %v)",
argocd.GetVersion(), port, a.settings.URL, a.useTLS(), a.Namespace, a.settings.IsSSOConfigured())
go a.appInformer.Run(ctx.Done())
go func() { a.checkServeErr("grpcS", grpcS.Serve(grpcL)) }()
go func() { a.checkServeErr("httpS", httpS.Serve(httpL)) }()
if a.useTLS() {
@@ -239,6 +265,10 @@ func (a *ArgoCDServer) Run(ctx context.Context, port int) {
go a.watchSettings(ctx)
go a.rbacPolicyLoader(ctx)
go func() { a.checkServeErr("tcpm", tcpm.Serve()) }()
go func() { a.checkServeErr("metrics", metricsServ.ListenAndServe()) }()
if !cache.WaitForCacheSync(ctx.Done(), a.appInformer.HasSynced) {
log.Fatal("Timed out waiting for caches to sync")
}
a.stopCh = make(chan struct{})
<-a.stopCh
@@ -337,6 +367,8 @@ func (a *ArgoCDServer) newGRPCServer() *grpc.Server {
sensitiveMethods := map[string]bool{
"/session.SessionService/Create": true,
"/account.AccountService/UpdatePassword": true,
"/repository.RepositoryService/Create": true,
"/repository.RepositoryService/Update": true,
}
// NOTE: notice we do not configure the gRPC server here with TLS (e.g. grpc.Creds(creds))
// This is because TLS handshaking occurs in cmux handling
@@ -366,7 +398,7 @@ func (a *ArgoCDServer) newGRPCServer() *grpc.Server {
repoService := repository.NewServer(a.RepoClientset, db, a.enf)
sessionService := session.NewServer(a.sessionMgr)
projectLock := util.NewKeyLock()
applicationService := application.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.RepoClientset, db, a.enf, projectLock)
applicationService := application.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.RepoClientset, kube.KubectlCmd{}, db, a.enf, projectLock)
projectService := project.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.enf, projectLock, a.sessionMgr)
settingsService := settings.NewServer(a.settingsMgr)
accountService := account.NewServer(a.sessionMgr, a.settingsMgr)
@@ -412,6 +444,7 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int) *http.Server
// so we need to supply the same certificates to establish the connections that a normal,
// external gRPC client would need.
tlsConfig := a.settings.TLSConfig()
a.ArgoCDServerOpts.TLSConfigCustomizer(tlsConfig)
tlsConfig.InsecureSkipVerify = true
dCreds := credentials.NewTLS(tlsConfig)
dOpts = append(dOpts, grpc.WithTransportCredentials(dCreds))

View File

@@ -43,7 +43,7 @@ func (s *Server) Create(ctx context.Context, q *SessionCreateRequest) (*SessionR
// Delete an authentication cookie from the client. This makes sense only for the Web client.
func (s *Server) Delete(ctx context.Context, q *SessionDeleteRequest) (*SessionResponse, error) {
return &SessionResponse{""}, nil
return &SessionResponse{Token: ""}, nil
}
// AuthFuncOverride overrides the authentication function and let us not require auth to receive auth.

View File

@@ -1,30 +1,21 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/session/session.proto
/*
Package session is a generated protocol buffer package.
package session // import "github.com/argoproj/argo-cd/server/session"
/*
Session Service
Session Service API performs CRUD actions against session resources
It is generated from these files:
server/session/session.proto
It has these top-level messages:
SessionCreateRequest
SessionDeleteRequest
SessionResponse
*/
package session
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import _ "k8s.io/api/core/v1"
import _ "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
@@ -44,15 +35,46 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// SessionCreateRequest is for logging in.
type SessionCreateRequest struct {
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionCreateRequest) Reset() { *m = SessionCreateRequest{} }
func (m *SessionCreateRequest) String() string { return proto.CompactTextString(m) }
func (*SessionCreateRequest) ProtoMessage() {}
func (*SessionCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorSession, []int{0} }
func (m *SessionCreateRequest) Reset() { *m = SessionCreateRequest{} }
func (m *SessionCreateRequest) String() string { return proto.CompactTextString(m) }
func (*SessionCreateRequest) ProtoMessage() {}
func (*SessionCreateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_session_8e535ce77fc5e082, []int{0}
}
func (m *SessionCreateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SessionCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SessionCreateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SessionCreateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionCreateRequest.Merge(dst, src)
}
func (m *SessionCreateRequest) XXX_Size() int {
return m.Size()
}
func (m *SessionCreateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SessionCreateRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SessionCreateRequest proto.InternalMessageInfo
func (m *SessionCreateRequest) GetUsername() string {
if m != nil {
@@ -77,22 +99,84 @@ func (m *SessionCreateRequest) GetToken() string {
// SessionDeleteRequest is for logging out.
type SessionDeleteRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionDeleteRequest) Reset() { *m = SessionDeleteRequest{} }
func (m *SessionDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*SessionDeleteRequest) ProtoMessage() {}
func (*SessionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorSession, []int{1} }
func (m *SessionDeleteRequest) Reset() { *m = SessionDeleteRequest{} }
func (m *SessionDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*SessionDeleteRequest) ProtoMessage() {}
func (*SessionDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_session_8e535ce77fc5e082, []int{1}
}
func (m *SessionDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SessionDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SessionDeleteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SessionDeleteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionDeleteRequest.Merge(dst, src)
}
func (m *SessionDeleteRequest) XXX_Size() int {
return m.Size()
}
func (m *SessionDeleteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SessionDeleteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SessionDeleteRequest proto.InternalMessageInfo
// SessionResponse wraps the created token or returns an empty string if deleted.
type SessionResponse struct {
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionResponse) Reset() { *m = SessionResponse{} }
func (m *SessionResponse) String() string { return proto.CompactTextString(m) }
func (*SessionResponse) ProtoMessage() {}
func (*SessionResponse) Descriptor() ([]byte, []int) { return fileDescriptorSession, []int{2} }
func (m *SessionResponse) Reset() { *m = SessionResponse{} }
func (m *SessionResponse) String() string { return proto.CompactTextString(m) }
func (*SessionResponse) ProtoMessage() {}
func (*SessionResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_session_8e535ce77fc5e082, []int{2}
}
func (m *SessionResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SessionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SessionResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SessionResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionResponse.Merge(dst, src)
}
func (m *SessionResponse) XXX_Size() int {
return m.Size()
}
func (m *SessionResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SessionResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SessionResponse proto.InternalMessageInfo
func (m *SessionResponse) GetToken() string {
if m != nil {
@@ -134,7 +218,7 @@ func NewSessionServiceClient(cc *grpc.ClientConn) SessionServiceClient {
func (c *sessionServiceClient) Create(ctx context.Context, in *SessionCreateRequest, opts ...grpc.CallOption) (*SessionResponse, error) {
out := new(SessionResponse)
err := grpc.Invoke(ctx, "/session.SessionService/Create", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/session.SessionService/Create", in, out, opts...)
if err != nil {
return nil, err
}
@@ -143,7 +227,7 @@ func (c *sessionServiceClient) Create(ctx context.Context, in *SessionCreateRequ
func (c *sessionServiceClient) Delete(ctx context.Context, in *SessionDeleteRequest, opts ...grpc.CallOption) (*SessionResponse, error) {
out := new(SessionResponse)
err := grpc.Invoke(ctx, "/session.SessionService/Delete", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/session.SessionService/Delete", in, out, opts...)
if err != nil {
return nil, err
}
@@ -249,6 +333,9 @@ func (m *SessionCreateRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintSession(dAtA, i, uint64(len(m.Token)))
i += copy(dAtA[i:], m.Token)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -267,6 +354,9 @@ func (m *SessionDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -291,6 +381,9 @@ func (m *SessionResponse) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintSession(dAtA, i, uint64(len(m.Token)))
i += copy(dAtA[i:], m.Token)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -318,12 +411,18 @@ func (m *SessionCreateRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSession(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SessionDeleteRequest) Size() (n int) {
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -334,6 +433,9 @@ func (m *SessionResponse) Size() (n int) {
if l > 0 {
n += 1 + l + sovSession(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -478,6 +580,7 @@ func (m *SessionCreateRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -528,6 +631,7 @@ func (m *SessionDeleteRequest) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -607,6 +711,7 @@ func (m *SessionResponse) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -721,9 +826,11 @@ var (
ErrIntOverflowSession = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("server/session/session.proto", fileDescriptorSession) }
func init() {
proto.RegisterFile("server/session/session.proto", fileDescriptor_session_8e535ce77fc5e082)
}
var fileDescriptorSession = []byte{
var fileDescriptor_session_8e535ce77fc5e082 = []byte{
// 356 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xb1, 0x4e, 0xeb, 0x30,
0x14, 0x86, 0xe5, 0x5e, 0xdd, 0xde, 0x7b, 0x3d, 0xdc, 0x8a, 0x28, 0x82, 0x28, 0x2a, 0x15, 0xca,

View File

@@ -1,23 +1,13 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/settings/settings.proto
/*
Package settings is a generated protocol buffer package.
package settings // import "github.com/argoproj/argo-cd/server/settings"
/*
Settings Service
Settings Service API retrives ArgoCD settings
It is generated from these files:
server/settings/settings.proto
It has these top-level messages:
SettingsQuery
Settings
DexConfig
Connector
*/
package settings
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
@@ -43,22 +33,84 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// SettingsQuery is a query for ArgoCD settings
type SettingsQuery struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SettingsQuery) Reset() { *m = SettingsQuery{} }
func (m *SettingsQuery) String() string { return proto.CompactTextString(m) }
func (*SettingsQuery) ProtoMessage() {}
func (*SettingsQuery) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{0} }
func (m *SettingsQuery) Reset() { *m = SettingsQuery{} }
func (m *SettingsQuery) String() string { return proto.CompactTextString(m) }
func (*SettingsQuery) ProtoMessage() {}
func (*SettingsQuery) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_71506a99e4ff7448, []int{0}
}
func (m *SettingsQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SettingsQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SettingsQuery.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SettingsQuery) XXX_Merge(src proto.Message) {
xxx_messageInfo_SettingsQuery.Merge(dst, src)
}
func (m *SettingsQuery) XXX_Size() int {
return m.Size()
}
func (m *SettingsQuery) XXX_DiscardUnknown() {
xxx_messageInfo_SettingsQuery.DiscardUnknown(m)
}
var xxx_messageInfo_SettingsQuery proto.InternalMessageInfo
type Settings struct {
URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
DexConfig *DexConfig `protobuf:"bytes,2,opt,name=dexConfig" json:"dexConfig,omitempty"`
URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
DexConfig *DexConfig `protobuf:"bytes,2,opt,name=dexConfig" json:"dexConfig,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Settings) Reset() { *m = Settings{} }
func (m *Settings) String() string { return proto.CompactTextString(m) }
func (*Settings) ProtoMessage() {}
func (*Settings) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{1} }
func (m *Settings) Reset() { *m = Settings{} }
func (m *Settings) String() string { return proto.CompactTextString(m) }
func (*Settings) ProtoMessage() {}
func (*Settings) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_71506a99e4ff7448, []int{1}
}
func (m *Settings) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Settings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Settings.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Settings) XXX_Merge(src proto.Message) {
xxx_messageInfo_Settings.Merge(dst, src)
}
func (m *Settings) XXX_Size() int {
return m.Size()
}
func (m *Settings) XXX_DiscardUnknown() {
xxx_messageInfo_Settings.DiscardUnknown(m)
}
var xxx_messageInfo_Settings proto.InternalMessageInfo
func (m *Settings) GetURL() string {
if m != nil {
@@ -75,13 +127,44 @@ func (m *Settings) GetDexConfig() *DexConfig {
}
type DexConfig struct {
Connectors []*Connector `protobuf:"bytes,1,rep,name=connectors" json:"connectors,omitempty"`
Connectors []*Connector `protobuf:"bytes,1,rep,name=connectors" json:"connectors,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DexConfig) Reset() { *m = DexConfig{} }
func (m *DexConfig) String() string { return proto.CompactTextString(m) }
func (*DexConfig) ProtoMessage() {}
func (*DexConfig) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{2} }
func (m *DexConfig) Reset() { *m = DexConfig{} }
func (m *DexConfig) String() string { return proto.CompactTextString(m) }
func (*DexConfig) ProtoMessage() {}
func (*DexConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_71506a99e4ff7448, []int{2}
}
func (m *DexConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DexConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_DexConfig.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *DexConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_DexConfig.Merge(dst, src)
}
func (m *DexConfig) XXX_Size() int {
return m.Size()
}
func (m *DexConfig) XXX_DiscardUnknown() {
xxx_messageInfo_DexConfig.DiscardUnknown(m)
}
var xxx_messageInfo_DexConfig proto.InternalMessageInfo
func (m *DexConfig) GetConnectors() []*Connector {
if m != nil {
@@ -91,14 +174,45 @@ func (m *DexConfig) GetConnectors() []*Connector {
}
type Connector struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Connector) Reset() { *m = Connector{} }
func (m *Connector) String() string { return proto.CompactTextString(m) }
func (*Connector) ProtoMessage() {}
func (*Connector) Descriptor() ([]byte, []int) { return fileDescriptorSettings, []int{3} }
func (m *Connector) Reset() { *m = Connector{} }
func (m *Connector) String() string { return proto.CompactTextString(m) }
func (*Connector) ProtoMessage() {}
func (*Connector) Descriptor() ([]byte, []int) {
return fileDescriptor_settings_71506a99e4ff7448, []int{3}
}
func (m *Connector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Connector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Connector.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Connector) XXX_Merge(src proto.Message) {
xxx_messageInfo_Connector.Merge(dst, src)
}
func (m *Connector) XXX_Size() int {
return m.Size()
}
func (m *Connector) XXX_DiscardUnknown() {
xxx_messageInfo_Connector.DiscardUnknown(m)
}
var xxx_messageInfo_Connector proto.InternalMessageInfo
func (m *Connector) GetName() string {
if m != nil {
@@ -146,7 +260,7 @@ func NewSettingsServiceClient(cc *grpc.ClientConn) SettingsServiceClient {
func (c *settingsServiceClient) Get(ctx context.Context, in *SettingsQuery, opts ...grpc.CallOption) (*Settings, error) {
out := new(Settings)
err := grpc.Invoke(ctx, "/cluster.SettingsService/Get", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/cluster.SettingsService/Get", in, out, opts...)
if err != nil {
return nil, err
}
@@ -210,6 +324,9 @@ func (m *SettingsQuery) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -244,6 +361,9 @@ func (m *Settings) MarshalTo(dAtA []byte) (int, error) {
}
i += n1
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -274,6 +394,9 @@ func (m *DexConfig) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -304,6 +427,9 @@ func (m *Connector) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintSettings(dAtA, i, uint64(len(m.Type)))
i += copy(dAtA[i:], m.Type)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -319,6 +445,9 @@ func encodeVarintSettings(dAtA []byte, offset int, v uint64) int {
func (m *SettingsQuery) Size() (n int) {
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -333,6 +462,9 @@ func (m *Settings) Size() (n int) {
l = m.DexConfig.Size()
n += 1 + l + sovSettings(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -345,6 +477,9 @@ func (m *DexConfig) Size() (n int) {
n += 1 + l + sovSettings(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -359,6 +494,9 @@ func (m *Connector) Size() (n int) {
if l > 0 {
n += 1 + l + sovSettings(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -416,6 +554,7 @@ func (m *SettingsQuery) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -528,6 +667,7 @@ func (m *Settings) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -609,6 +749,7 @@ func (m *DexConfig) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -717,6 +858,7 @@ func (m *Connector) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -831,9 +973,11 @@ var (
ErrIntOverflowSettings = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("server/settings/settings.proto", fileDescriptorSettings) }
func init() {
proto.RegisterFile("server/settings/settings.proto", fileDescriptor_settings_71506a99e4ff7448)
}
var fileDescriptorSettings = []byte{
var fileDescriptor_settings_71506a99e4ff7448 = []byte{
// 322 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x41, 0x4b, 0xc3, 0x40,
0x10, 0x85, 0xd9, 0x46, 0xac, 0x19, 0x91, 0xea, 0x22, 0x12, 0x8b, 0xc4, 0x92, 0x53, 0x41, 0x4c,

View File

@@ -832,7 +832,9 @@
}
}
}
},
}
},
"/api/v1/projects/{project}/roles/{role}/token/{iat}": {
"delete": {
"tags": [
"ProjectService"
@@ -851,6 +853,13 @@
"name": "role",
"in": "path",
"required": true
},
{
"type": "string",
"format": "int64",
"name": "iat",
"in": "path",
"required": true
}
],
"responses": {
@@ -1304,6 +1313,9 @@
"name": {
"type": "string"
},
"parameter": {
"$ref": "#/definitions/applicationParameterOverrides"
},
"prune": {
"type": "boolean",
"format": "boolean"
@@ -1330,6 +1342,44 @@
"applicationOperationTerminateResponse": {
"type": "object"
},
"applicationParameter": {
"type": "object",
"properties": {
"component": {
"type": "string"
},
"name": {
"type": "string"
},
"value": {
"type": "string"
}
}
},
"applicationParameterOverrides": {
"type": "object",
"title": "ParameterOverrides is a wrapper on a list of parameters. If omitted, the application's overrides\nin the spec will be used. If set, will use the supplied list of overrides",
"properties": {
"overrides": {
"type": "array",
"items": {
"$ref": "#/definitions/applicationParameter"
}
}
}
},
"applicationv1alpha1ParameterOverrides": {
"type": "object",
"title": "ParameterOverrides masks the value so protobuf can generate\n+protobuf.nullable=true\n+protobuf.options.(gogoproto.goproto_stringer)=false",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1ComponentParameter"
}
}
}
},
"clusterClusterCreateFromKubeConfigRequest": {
"type": "object",
"properties": {
@@ -1714,6 +1764,19 @@
}
}
},
"v1GroupKind": {
"description": "+protobuf.options.(gogoproto.goproto_stringer)=false",
"type": "object",
"title": "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying\nconcepts during lookup stages without having partially valid types",
"properties": {
"group": {
"type": "string"
},
"kind": {
"type": "string"
}
}
},
"v1Initializer": {
"description": "Initializer is information about an initializer that has not yet completed.",
"type": "object",
@@ -1937,6 +2000,20 @@
}
}
},
"v1alpha1AWSAuthConfig": {
"type": "object",
"title": "AWSAuthConfig is an AWS IAM authentication configuration",
"properties": {
"clusterName": {
"type": "string",
"title": "ClusterName contains AWS cluster name"
},
"roleARN": {
"description": "RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.",
"type": "string"
}
}
},
"v1alpha1AppProject": {
"type": "object",
"title": "AppProject is a definition of AppProject resource.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
@@ -1968,6 +2045,13 @@
"type": "object",
"title": "AppProjectSpec represents",
"properties": {
"clusterResourceWhitelist": {
"type": "array",
"title": "ClusterResourceWhitelist contains list of whitelisted cluster level resources",
"items": {
"$ref": "#/definitions/v1GroupKind"
}
},
"description": {
"type": "string",
"title": "Description contains optional project description"
@@ -1979,6 +2063,13 @@
"$ref": "#/definitions/v1alpha1ApplicationDestination"
}
},
"namespaceResourceBlacklist": {
"type": "array",
"title": "NamespaceResourceBlacklist contains list of blacklisted namespace level resources",
"items": {
"$ref": "#/definitions/v1GroupKind"
}
},
"roles": {
"type": "array",
"items": {
@@ -2104,6 +2195,9 @@
},
"source": {
"$ref": "#/definitions/v1alpha1ApplicationSource"
},
"syncPolicy": {
"$ref": "#/definitions/v1alpha1SyncPolicy"
}
}
},
@@ -2176,6 +2270,9 @@
"description": "ClusterConfig is the configuration attributes. This structure is subset of the go-client\nrest.Config with annotations added for marshalling.",
"type": "object",
"properties": {
"awsAuthConfig": {
"$ref": "#/definitions/v1alpha1AWSAuthConfig"
},
"bearerToken": {
"description": "Server requires Bearer authentication. This client will not attempt to use\nrefresh tokens for an OAuth2 flow.\nTODO: demonstrate an OAuth2 compatible client.",
"type": "string"
@@ -2223,6 +2320,9 @@
"$ref": "#/definitions/v1alpha1ResourceState"
}
},
"revision": {
"type": "string"
},
"status": {
"type": "string"
}
@@ -2528,14 +2628,17 @@
"format": "boolean",
"title": "DryRun will perform a `kubectl apply --dry-run` without actually performing the sync"
},
"parameterOverrides": {
"$ref": "#/definitions/applicationv1alpha1ParameterOverrides"
},
"prune": {
"type": "boolean",
"format": "boolean",
"title": "Prune deletes resources that are no longer tracked in git"
},
"revision": {
"type": "string",
"title": "Revision is the git revision in which to sync the application to"
"description": "Revision is the git revision in which to sync the application to.\nIf omitted, will use the revision specified in app spec.",
"type": "string"
},
"syncStrategy": {
"$ref": "#/definitions/v1alpha1SyncStrategy"
@@ -2566,9 +2669,29 @@
}
}
},
"v1alpha1SyncPolicy": {
"type": "object",
"title": "SyncPolicy controls when a sync will be performed in response to updates in git",
"properties": {
"automated": {
"$ref": "#/definitions/v1alpha1SyncPolicyAutomated"
}
}
},
"v1alpha1SyncPolicyAutomated": {
"type": "object",
"title": "SyncPolicyAutomated controls the behavior of an automated sync",
"properties": {
"prune": {
"type": "boolean",
"format": "boolean",
"title": "Prune will prune resources automatically as part of automated sync (default: false)"
}
}
},
"v1alpha1SyncStrategy": {
"type": "object",
"title": "SyncStrategy indicates the",
"title": "SyncStrategy controls the manner in which a sync is performed",
"properties": {
"apply": {
"$ref": "#/definitions/v1alpha1SyncStrategyApply"

View File

@@ -1,26 +1,19 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/version/version.proto
/*
Package version is a generated protocol buffer package.
package version // import "github.com/argoproj/argo-cd/server/version"
/*
Version Service
Version Service API returns the version of the API server.
It is generated from these files:
server/version/version.proto
It has these top-level messages:
VersionMessage
*/
package version
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import empty "github.com/golang/protobuf/ptypes/empty"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
@@ -40,21 +33,52 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// VersionMessage represents version of the ArgoCD API server
type VersionMessage struct {
Version string `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"`
BuildDate string `protobuf:"bytes,2,opt,name=BuildDate,proto3" json:"BuildDate,omitempty"`
GitCommit string `protobuf:"bytes,3,opt,name=GitCommit,proto3" json:"GitCommit,omitempty"`
GitTag string `protobuf:"bytes,4,opt,name=GitTag,proto3" json:"GitTag,omitempty"`
GitTreeState string `protobuf:"bytes,5,opt,name=GitTreeState,proto3" json:"GitTreeState,omitempty"`
GoVersion string `protobuf:"bytes,6,opt,name=GoVersion,proto3" json:"GoVersion,omitempty"`
Compiler string `protobuf:"bytes,7,opt,name=Compiler,proto3" json:"Compiler,omitempty"`
Platform string `protobuf:"bytes,8,opt,name=Platform,proto3" json:"Platform,omitempty"`
KsonnetVersion string `protobuf:"bytes,9,opt,name=KsonnetVersion,proto3" json:"KsonnetVersion,omitempty"`
Version string `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"`
BuildDate string `protobuf:"bytes,2,opt,name=BuildDate,proto3" json:"BuildDate,omitempty"`
GitCommit string `protobuf:"bytes,3,opt,name=GitCommit,proto3" json:"GitCommit,omitempty"`
GitTag string `protobuf:"bytes,4,opt,name=GitTag,proto3" json:"GitTag,omitempty"`
GitTreeState string `protobuf:"bytes,5,opt,name=GitTreeState,proto3" json:"GitTreeState,omitempty"`
GoVersion string `protobuf:"bytes,6,opt,name=GoVersion,proto3" json:"GoVersion,omitempty"`
Compiler string `protobuf:"bytes,7,opt,name=Compiler,proto3" json:"Compiler,omitempty"`
Platform string `protobuf:"bytes,8,opt,name=Platform,proto3" json:"Platform,omitempty"`
KsonnetVersion string `protobuf:"bytes,9,opt,name=KsonnetVersion,proto3" json:"KsonnetVersion,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VersionMessage) Reset() { *m = VersionMessage{} }
func (m *VersionMessage) String() string { return proto.CompactTextString(m) }
func (*VersionMessage) ProtoMessage() {}
func (*VersionMessage) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
func (m *VersionMessage) Reset() { *m = VersionMessage{} }
func (m *VersionMessage) String() string { return proto.CompactTextString(m) }
func (*VersionMessage) ProtoMessage() {}
func (*VersionMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_version_cdcdf9fc14514c9b, []int{0}
}
func (m *VersionMessage) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VersionMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VersionMessage.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VersionMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_VersionMessage.Merge(dst, src)
}
func (m *VersionMessage) XXX_Size() int {
return m.Size()
}
func (m *VersionMessage) XXX_DiscardUnknown() {
xxx_messageInfo_VersionMessage.DiscardUnknown(m)
}
var xxx_messageInfo_VersionMessage proto.InternalMessageInfo
func (m *VersionMessage) GetVersion() string {
if m != nil {
@@ -135,7 +159,7 @@ const _ = grpc.SupportPackageIsVersion4
type VersionServiceClient interface {
// Version returns version information of the API server
Version(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*VersionMessage, error)
Version(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*VersionMessage, error)
}
type versionServiceClient struct {
@@ -146,9 +170,9 @@ func NewVersionServiceClient(cc *grpc.ClientConn) VersionServiceClient {
return &versionServiceClient{cc}
}
func (c *versionServiceClient) Version(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*VersionMessage, error) {
func (c *versionServiceClient) Version(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*VersionMessage, error) {
out := new(VersionMessage)
err := grpc.Invoke(ctx, "/version.VersionService/Version", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/version.VersionService/Version", in, out, opts...)
if err != nil {
return nil, err
}
@@ -159,7 +183,7 @@ func (c *versionServiceClient) Version(ctx context.Context, in *google_protobuf1
type VersionServiceServer interface {
// Version returns version information of the API server
Version(context.Context, *google_protobuf1.Empty) (*VersionMessage, error)
Version(context.Context, *empty.Empty) (*VersionMessage, error)
}
func RegisterVersionServiceServer(s *grpc.Server, srv VersionServiceServer) {
@@ -167,7 +191,7 @@ func RegisterVersionServiceServer(s *grpc.Server, srv VersionServiceServer) {
}
func _VersionService_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(google_protobuf1.Empty)
in := new(empty.Empty)
if err := dec(in); err != nil {
return nil, err
}
@@ -179,7 +203,7 @@ func _VersionService_Version_Handler(srv interface{}, ctx context.Context, dec f
FullMethod: "/version.VersionService/Version",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VersionServiceServer).Version(ctx, req.(*google_protobuf1.Empty))
return srv.(VersionServiceServer).Version(ctx, req.(*empty.Empty))
}
return interceptor(ctx, in, info, handler)
}
@@ -266,6 +290,9 @@ func (m *VersionMessage) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintVersion(dAtA, i, uint64(len(m.KsonnetVersion)))
i += copy(dAtA[i:], m.KsonnetVersion)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@@ -317,6 +344,9 @@ func (m *VersionMessage) Size() (n int) {
if l > 0 {
n += 1 + l + sovVersion(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@@ -635,6 +665,7 @@ func (m *VersionMessage) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@@ -749,9 +780,11 @@ var (
ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("server/version/version.proto", fileDescriptorVersion) }
func init() {
proto.RegisterFile("server/version/version.proto", fileDescriptor_version_cdcdf9fc14514c9b)
}
var fileDescriptorVersion = []byte{
var fileDescriptor_version_cdcdf9fc14514c9b = []byte{
// 343 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0xcf, 0x4a, 0xc3, 0x40,
0x10, 0xc6, 0x49, 0xd5, 0xfe, 0x59, 0x4a, 0x0f, 0x8b, 0xd4, 0x25, 0x96, 0x22, 0x3d, 0x88, 0x08,

View File

@@ -2,6 +2,7 @@ package e2e
import (
"strconv"
"strings"
"testing"
"time"
@@ -9,6 +10,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
// load the gcp plugin (required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
@@ -18,7 +20,7 @@ import (
)
func TestAppManagement(t *testing.T) {
assertAppHasEvent := func(a *v1alpha1.Application, action string, reason string) {
assertAppHasEvent := func(a *v1alpha1.Application, message string, reason string) {
list, err := fixture.KubeClient.CoreV1().Events(fixture.Namespace).List(metav1.ListOptions{
FieldSelector: fields.SelectorFromSet(map[string]string{
"involvedObject.name": a.Name,
@@ -31,11 +33,11 @@ func TestAppManagement(t *testing.T) {
}
for i := range list.Items {
event := list.Items[i]
if event.Reason == reason && event.Action == action {
if event.Reason == reason && strings.Contains(event.Message, message) {
return
}
}
t.Errorf("Unable to find event with reason=%s; action=%s", reason, action)
t.Errorf("Unable to find event with reason=%s; message=%s", reason, message)
}
testApp := &v1alpha1.Application{
@@ -148,7 +150,7 @@ func TestAppManagement(t *testing.T) {
t.Fatalf("Unable to sync app %v", err)
}
assertAppHasEvent(app, "rollback", argo.EventReasonResourceUpdated)
assertAppHasEvent(app, "rollback", argo.EventReasonOperationStarted)
WaitUntil(t, func() (done bool, err error) {
app, err = fixture.AppClient.ArgoprojV1alpha1().Applications(fixture.Namespace).Get(app.ObjectMeta.Name, metav1.GetOptions{})

View File

@@ -2,12 +2,15 @@ package e2e
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"log"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"testing"
"time"
@@ -21,9 +24,6 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"path"
"path/filepath"
"github.com/argoproj/argo-cd/cmd/argocd/commands"
"github.com/argoproj/argo-cd/common"
"github.com/argoproj/argo-cd/controller"
@@ -57,7 +57,6 @@ type Fixture struct {
AppClient appclientset.Interface
DB db.ArgoDB
Namespace string
InstanceID string
RepoServerAddress string
ApiServerAddress string
Enforcer *rbac.Enforcer
@@ -93,7 +92,7 @@ func getFreePort() (int, error) {
}
func (f *Fixture) setup() error {
_, err := exec.Command("kubectl", "apply", "-f", "../../manifests/components/01a_application-crd.yaml", "-f", "../../manifests/components/01b_appproject-crd.yaml").Output()
_, err := exec.Command("kubectl", "apply", "-f", "../../manifests/base/application-crd.yaml", "-f", "../../manifests/base/appproject-crd.yaml").Output()
if err != nil {
return err
}
@@ -127,7 +126,11 @@ func (f *Fixture) setup() error {
}
memCache := cache.NewInMemoryCache(repository.DefaultRepoCacheExpiration)
repoServerGRPC := reposerver.NewServer(&FakeGitClientFactory{}, memCache).CreateGRPC()
repoSrv, err := reposerver.NewServer(&FakeGitClientFactory{}, memCache, func(config *tls.Config) {})
if err != nil {
return err
}
repoServerGRPC := repoSrv.CreateGRPC()
repoServerListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return err
@@ -194,7 +197,7 @@ func (f *Fixture) ensureClusterRegistered() error {
errors.CheckError(err)
managerBearerToken, err := common.InstallClusterManagerRBAC(clientset)
errors.CheckError(err)
clst := commands.NewCluster(f.Config.Host, conf, managerBearerToken)
clst := commands.NewCluster(f.Config.Host, conf, managerBearerToken, nil)
clstCreateReq := cluster.ClusterCreateRequest{Cluster: clst}
_, err = cluster.NewServer(f.DB, f.Enforcer).Create(context.Background(), &clstCreateReq)
return err
@@ -273,7 +276,6 @@ func NewFixture() (*Fixture, error) {
DB: db,
KubeClient: kubeClient,
Namespace: namespace,
InstanceID: namespace,
Enforcer: enforcer,
}
err = fixture.setup()
@@ -283,7 +285,7 @@ func NewFixture() (*Fixture, error) {
return fixture, nil
}
// CreateApp creates application with appropriate controller instance id.
// CreateApp creates application
func (f *Fixture) CreateApp(t *testing.T, application *v1alpha1.Application) *v1alpha1.Application {
application = application.DeepCopy()
application.Name = fmt.Sprintf("e2e-test-%v", time.Now().Unix())
@@ -292,7 +294,6 @@ func (f *Fixture) CreateApp(t *testing.T, application *v1alpha1.Application) *v1
labels = make(map[string]string)
application.ObjectMeta.Labels = labels
}
labels[common.LabelKeyApplicationControllerInstanceID] = f.InstanceID
application.Spec.Source.ComponentParameterOverrides = append(
application.Spec.Source.ComponentParameterOverrides,
@@ -307,18 +308,12 @@ func (f *Fixture) CreateApp(t *testing.T, application *v1alpha1.Application) *v1
// createController creates new controller instance
func (f *Fixture) createController() *controller.ApplicationController {
appStateManager := controller.NewAppStateManager(
f.DB, f.AppClient, reposerver.NewRepositoryServerClientset(f.RepoServerAddress), f.Namespace)
return controller.NewApplicationController(
f.Namespace,
f.KubeClient,
f.AppClient,
reposerver.NewRepositoryServerClientset(f.RepoServerAddress),
f.DB,
appStateManager,
10*time.Second,
&controller.ApplicationControllerConfig{Namespace: f.Namespace, InstanceID: f.InstanceID})
10*time.Second)
}
func (f *Fixture) NewApiClientset() (argocdclient.Client, error) {
@@ -374,10 +369,10 @@ func WaitUntil(t *testing.T, condition wait.ConditionFunc) {
type FakeGitClientFactory struct{}
func (f *FakeGitClientFactory) NewClient(repoURL, path, username, password, sshPrivateKey string) git.Client {
func (f *FakeGitClientFactory) NewClient(repoURL, path, username, password, sshPrivateKey string) (git.Client, error) {
return &FakeGitClient{
root: path,
}
}, nil
}
// FakeGitClient is a test git client implementation which always clone local test repo.

Some files were not shown because too many files have changed in this diff Show More