Compare commits

...

45 Commits

Author SHA1 Message Date
argo-bot
5700faf0d1 Bump version to 2.1.0-rc3 2021-08-11 19:36:11 +00:00
argo-bot
829f0285b9 Bump version to 2.1.0-rc3 2021-08-11 19:35:56 +00:00
Alexander Matyushentsev
d6bb869468 fix: update deprecated helm2 installation URL (#6960)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-11 12:35:01 -07:00
Alexander Matyushentsev
b3abdb1323 fix: make sure repo server discard cached empty response (#6948)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-11 08:54:33 -07:00
Alexander Matyushentsev
e2bca9f9ef fix: applications/resources filter improvement and bug fixes (#6931)
* fix: applications/resources filter improvement and bug fixes

Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-11 08:54:26 -07:00
May Zhang
bdc53c804b fix: use secure way to generate initial password (#6938)
* fix: use secure way to generate initial password

Signed-off-by: May Zhang <may_zhang@intuit.com>

* fix: use secure way to generate initial password

Signed-off-by: May Zhang <may_zhang@intuit.com>
2021-08-10 15:01:15 -07:00
Daisuke Taniwaki
13e10a7c2b fix: Set header to OIDC requests (#6869)
Signed-off-by: Daisuke Taniwaki <daisuketaniwaki@gmail.com>
2021-08-10 15:01:05 -07:00
jannfis
f77d35a3d2 docs: Update contributor docs (#6615)
* Sync

Signed-off-by: jannfis <jann@mistrust.net>

* docs: Update contributor docs

Signed-off-by: jannfis <jann@mistrust.net>

* New paragraph about code submission

Signed-off-by: jannfis <jann@mistrust.net>

* Update

Signed-off-by: jannfis <jann@mistrust.net>

* Update code submissions before triage paragraph

Signed-off-by: jannfis <jann@mistrust.net>
2021-08-09 10:41:53 +00:00
jannfis
0818a48348 docs: Update security considerations (#6930)
* docs: Update security considerations

Signed-off-by: jannfis <jann@mistrust.net>

* docs: Update security considerations

Signed-off-by: jannfis <jann@mistrust.net>
2021-08-09 10:41:33 +00:00
Alexander Matyushentsev
faf7bff322 fix: application sync panel crashes if app has no sync options (#6914)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-04 16:57:09 -07:00
Yi Cai
96fba7c67b fix: Reword Generate new token dialog (#6913)
Signed-off-by: ciiay <yicai@redhat.com>
2021-08-04 16:57:05 -07:00
Alexander Matyushentsev
8020261a7d fix: assume ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS, ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS env vars have seconds (#6912)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-04 13:52:37 -07:00
Yi Cai
e7e93b2e7a fix: UI 6337 cluster filter improvement for issue #6337 (#6856)
* fix: when cluster field is bge-dev1, the applications with cluster dev1 is selected #6337

Signed-off-by: ciiay <yicai@redhat.com>
2021-08-04 13:52:34 -07:00
Noam Gal
8d78f0a604 fixed max to use MaxInt64 value (#6911)
Signed-off-by: Noam Gal <noam.gal@codefresh.io>
2021-08-04 11:21:57 -07:00
Chetan Banavikalmutt
27e9f6398c fix: unset command should remove env vars when there's no error (#6908)
Signed-off-by: Chetan Banavikalmutt <chetanrns1997@gmail.com>
2021-08-04 11:21:54 -07:00
pasha-codefresh
455d0f1be1 feat: rollback should work without id passed #6825. (#6877)
feat: rollback should work without id passed #6825. (#6877)

Signed-off-by: pashavictorovich <pavel@codefresh.io>
2021-08-04 11:21:50 -07:00
Remington Breeze
a87660adee fix: Add https prefix to ingress URLs if hosts field is present (#6901)
Signed-off-by: Remington Breeze <remington@breeze.software>
2021-08-04 11:21:47 -07:00
woshicai
31093cd359 fix: client input arguments with equal sign (#6885)
Signed-off-by: Charles Cai <charles.cai@sap.com>

Co-authored-by: Charles Cai <charles.cai@sap.com>
2021-08-04 11:21:41 -07:00
Bob Claerhout
cad6016ac6 docs: Add replace description on individual resource level (#6905)
Signed-off-by: Bob Claerhout <claerhout.bob@gmail.com>
2021-08-04 11:21:34 -07:00
May Zhang
a2f1993c06 fix: logout redirect URL (#6903)
Signed-off-by: May Zhang <may_zhang@intuit.com>
2021-08-03 17:07:37 -07:00
Alexander Matyushentsev
9118e7a7ec refactor: update resources install order according to helm implementation (#6902)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-03 17:07:33 -07:00
Alexander Matyushentsev
cd10754574 fix: controller should not create orphaned resources warning by default (#6898)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-03 17:07:28 -07:00
Alexander Matyushentsev
f0b91cccf3 feat: Improve Replace sync option description in UI (#6899)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-03 17:07:23 -07:00
dependabot[bot]
1354538269 chore(deps): bump tar from 6.1.0 to 6.1.3 in /ui (#6900)
Bumps [tar](https://github.com/npm/node-tar) from 6.1.0 to 6.1.3.
- [Release notes](https://github.com/npm/node-tar/releases)
- [Changelog](https://github.com/npm/node-tar/blob/main/CHANGELOG.md)
- [Commits](https://github.com/npm/node-tar/compare/v6.1.0...v6.1.3)

---
updated-dependencies:
- dependency-name: tar
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-08-03 17:07:20 -07:00
Yi Cai
5c408cc5d5 fix: menu for application loses text when title area buttons collapse #6474 (#6887)
Signed-off-by: ciiay <yicai@redhat.com>
2021-08-03 17:07:10 -07:00
argo-bot
b067879892 Bump version to 2.1.0-rc2 2021-08-03 17:00:32 +00:00
argo-bot
ab2bbc2201 Bump version to 2.1.0-rc2 2021-08-03 17:00:06 +00:00
Remington Breeze
a93649419b fix(ui): Bump argo-ui to hide filter suggestions on enter and show on typing (#6891)
* fix(ui): Bump argo-ui to hide filter suggestions on enter and show on typing

Signed-off-by: Remington Breeze <remington@breeze.software>

* remove unneccessary yarn.lock changes

Signed-off-by: Remington Breeze <remington@breeze.software>
2021-08-03 09:45:26 -07:00
Remington Breeze
15c361e525 fix(ui): Add View Details option to resource actions menu (#6893)
Signed-off-by: Remington Breeze <remington@breeze.software>
2021-08-03 09:45:21 -07:00
Jan-Otto Kröpke
a5ba98ff61 fix: upgrade to kustomize 4.2.0 (#6861)
Signed-off-by: Jan-Otto Kröpke <joe@adorsys.de>
2021-08-03 09:45:16 -07:00
May Zhang
18ddf1f839 fix: add documentation for using argocd repocreds with --enable-coi and --type helm (#6890)
Signed-off-by: May Zhang <may_zhang@intuit.com>
2021-08-03 09:45:12 -07:00
Jun
4e8b9b85b8 feat: Rollback command support omit history id (#6863)
Signed-off-by: junnplus <junnplus@gmail.com>
2021-08-03 09:45:03 -07:00
Remington Breeze
34b3139309 fix(ui): Incorrect path for non-namespaced resources (#6895)
Signed-off-by: Remington Breeze <remington@breeze.software>
2021-08-03 09:44:58 -07:00
Remington Breeze
4410803b11 fix(ui): Page navigation no longer visible with status bar (#6888)
Signed-off-by: Remington Breeze <remington@breeze.software>
2021-08-03 09:44:50 -07:00
Alexander Matyushentsev
83b272e125 fix: make sure orphaned filter checkbox is clickable (#6886)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-08-03 09:44:44 -07:00
woshicai
4f967eaa5a fix: docs about custom image user #6851 (#6872)
* fix: docs about custom image user, change it from argocd to 999

Signed-off-by: Charles Cai <charles.cai@sap.com>

* update: docs for upgrading version

Signed-off-by: Charles Cai <charles.cai@sap.com>

Co-authored-by: Charles Cai <charles.cai@sap.com>
2021-08-03 09:44:38 -07:00
Joe Bowbeer
13cdf01506 docs: installation.md (#6860)
Signed-off-by: Joe Bowbeer <joe.bowbeer@gmail.com>
2021-08-03 09:44:32 -07:00
Yi Cai
d84822ea88 fix: Project filter selector does not get unset upon clear filters #6750 (#6866)
Signed-off-by: ciiay <yicai@redhat.com>
2021-08-03 09:44:25 -07:00
Remington Breeze
a524a3b4d9 fix(ui): Prevent UI crash if app status or resources is empty (#6858) 2021-07-30 10:03:55 -07:00
Alexander Matyushentsev
9419c11c1d fix: util.cli.SetLogLevel should update global log level (#6852)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-07-29 13:02:07 -07:00
Alexander Matyushentsev
12a4475176 fix: include cluster level RBAC into argocd-core manifests (#6854)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-07-29 12:49:53 -07:00
Klaus Dorninger
662567b8fd fix: #6844 multiple global projects can be configured (#6845)
Signed-off-by: Klaus Dorninger <github@dornimaug.org>
2021-07-29 11:08:01 -07:00
Alexander Matyushentsev
6b14f909e9 fix: core installation must include CRD definitions (#6841)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2021-07-28 16:30:42 -07:00
argo-bot
a92c24094e Bump version to 2.1.0-rc1 2021-07-28 22:24:09 +00:00
argo-bot
307de9555d Bump version to 2.1.0-rc1 2021-07-28 22:23:55 +00:00
79 changed files with 3927 additions and 706 deletions

View File

@@ -1 +1 @@
2.1.0
2.1.0-rc3

View File

@@ -144,16 +144,16 @@ func NewCommand() *cobra.Command {
}
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt32).Seconds()), "Time period in seconds for application resync.")
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.")
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", int(env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60*time.Second, 0, math.MaxInt32).Seconds()), "Repo server RPC call timeout seconds.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().IntVar(&statusProcessors, "status-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS", 20, 0, math.MaxInt32), "Number of application status processors")
command.Flags().IntVar(&operationProcessors, "operation-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_OPERATION_PROCESSORS", 10, 0, math.MaxInt32), "Number of application operation processors")
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt32), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", 20, "Number of allowed concurrent kubectl fork/execs. Any value less the 1 means no limit.")
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")

View File

@@ -172,7 +172,7 @@ func NewCommand() *cobra.Command {
command.AddCommand(cli.NewVersionCmd(cliName))
command.Flags().IntVar(&listenPort, "port", common.DefaultPortAPIServer, "Listen on given port")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDAPIServerMetrics, "Start metrics on given port")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", int(env.ParseDurationFromEnv("ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS", 60*time.Second, 0, math.MaxInt32).Seconds()), "Repo server RPC call timeout seconds.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().StringVar(&frameOptions, "x-frame-options", env.StringFromEnv("ARGOCD_SERVER_X_FRAME_OPTIONS", "sameorigin"), "Set X-Frame-Options header in HTTP responses to `value`. To disable, set to \"\".")
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_SERVER_REPO_SERVER_PLAINTEXT", false), "Use a plaintext client (non-TLS) to connect to repository server")
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_SERVER_REPO_SERVER_STRICT_TLS", false), "Perform strict validation of TLS certificates when connecting to repo server")

View File

@@ -675,7 +675,7 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
}
for _, env := range pluginEnvs {
err = app.Spec.Source.Plugin.RemoveEnvEntry(env)
if err != nil {
if err == nil {
updated = true
}
}
@@ -1814,6 +1814,23 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
return command
}
func findRevisionHistory(application *argoappv1.Application, historyId int64) (*argoappv1.RevisionHistory, error) {
// in case if history id not passed and need fetch previous history revision
if historyId == -1 {
l := len(application.Status.History)
if l < 2 {
return nil, fmt.Errorf("Application '%s' should have at least two successful deployments", application.ObjectMeta.Name)
}
return &application.Status.History[l-2], nil
}
for _, di := range application.Status.History {
if di.ID == historyId {
return &di, nil
}
}
return nil, fmt.Errorf("Application '%s' does not have deployment id '%d' in history\n", application.ObjectMeta.Name, historyId)
}
// NewApplicationRollbackCommand returns a new instance of an `argocd app rollback` command
func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
@@ -1821,36 +1838,33 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
timeout uint
)
var command = &cobra.Command{
Use: "rollback APPNAME ID",
Short: "Rollback application to a previous deployed version by History ID",
Use: "rollback APPNAME [ID]",
Short: "Rollback application to a previous deployed version by History ID, omitted will Rollback to the previous version",
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
if len(args) == 0 {
c.HelpFunc()(c, args)
os.Exit(1)
}
appName := args[0]
depID, err := strconv.Atoi(args[1])
errors.CheckError(err)
var err error
depID := -1
if len(args) > 1 {
depID, err = strconv.Atoi(args[1])
errors.CheckError(err)
}
acdClient := argocdclient.NewClientOrDie(clientOpts)
conn, appIf := acdClient.NewApplicationClientOrDie()
defer argoio.Close(conn)
ctx := context.Background()
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName})
errors.CheckError(err)
var depInfo *argoappv1.RevisionHistory
for _, di := range app.Status.History {
if di.ID == int64(depID) {
depInfo = &di
break
}
}
if depInfo == nil {
log.Fatalf("Application '%s' does not have deployment id '%d' in history\n", app.ObjectMeta.Name, depID)
}
depInfo, err := findRevisionHistory(app, int64(depID))
errors.CheckError(err)
_, err = appIf.Rollback(ctx, &applicationpkg.ApplicationRollbackRequest{
Name: &appName,
ID: int64(depID),
ID: depInfo.ID,
Prune: prune,
})
errors.CheckError(err)

View File

@@ -0,0 +1,163 @@
package commands
import (
"testing"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
func TestFindRevisionHistoryWithoutPassedId(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
histories = append(histories, v1alpha1.RevisionHistory{ID: 1})
histories = append(histories, v1alpha1.RevisionHistory{ID: 2})
histories = append(histories, v1alpha1.RevisionHistory{ID: 3})
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, -1)
if err != nil {
t.Fatal("Find revision history should fail without errors")
}
if history == nil {
t.Fatal("History should be found")
}
}
func TestFindRevisionHistoryWithoutPassedIdAndEmptyHistoryList(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, -1)
if err == nil {
t.Fatal("Find revision history should fail with errors")
}
if history != nil {
t.Fatal("History should be empty")
}
if err.Error() != "Application '' should have at least two successful deployments" {
t.Fatal("Find revision history should fail with correct error message")
}
}
func TestFindRevisionHistoryWithPassedId(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
histories = append(histories, v1alpha1.RevisionHistory{ID: 1})
histories = append(histories, v1alpha1.RevisionHistory{ID: 2})
histories = append(histories, v1alpha1.RevisionHistory{ID: 3, Revision: "123"})
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, 3)
if err != nil {
t.Fatal("Find revision history should fail without errors")
}
if history == nil {
t.Fatal("History should be found")
}
if history.Revision != "123" {
t.Fatal("Failed to find correct history with correct revision")
}
}
func TestFindRevisionHistoryWithPassedIdThatNotExist(t *testing.T) {
histories := v1alpha1.RevisionHistories{}
histories = append(histories, v1alpha1.RevisionHistory{ID: 1})
histories = append(histories, v1alpha1.RevisionHistory{ID: 2})
histories = append(histories, v1alpha1.RevisionHistory{ID: 3, Revision: "123"})
status := v1alpha1.ApplicationStatus{
Resources: nil,
Sync: v1alpha1.SyncStatus{},
Health: v1alpha1.HealthStatus{},
History: histories,
Conditions: nil,
ReconciledAt: nil,
OperationState: nil,
ObservedAt: nil,
SourceType: "",
Summary: v1alpha1.ApplicationSummary{},
}
application := v1alpha1.Application{
Status: status,
}
history, err := findRevisionHistory(&application, 4)
if err == nil {
t.Fatal("Find revision history should fail with errors")
}
if history != nil {
t.Fatal("History should be not found")
}
if err.Error() != "Application '' does not have deployment id '4' in history\n" {
t.Fatal("Find revision history should fail with correct error message")
}
}

View File

@@ -60,6 +60,9 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
# Add credentials with GitHub App authentication to use for all repositories under https://ghe.example.com/repos
argocd repocreds add https://ghe.example.com/repos/ --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3
# Add credentials with helm oci registry so that these oci registry urls do not need to be added as repos individually.
argocd repocreds add localhost:5000/myrepo --enable-oci --type helm
`
var command = &cobra.Command{

View File

@@ -146,6 +146,17 @@ func populateIngressInfo(un *unstructured.Unstructured, res *ResourceInfo) {
tlshost := tlsline["host"]
if tlshost == host {
stringPort = "https"
continue
}
if hosts := tlsline["hosts"]; hosts != nil {
tlshosts, ok := tlsline["hosts"].(map[string]interface{})
if ok {
for j := range tlshosts {
if tlshosts[j] == host {
stringPort = "https"
}
}
}
}
}
}

View File

@@ -1 +1 @@
Please refer to [the Contribution Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/contributing/)
Please refer to [the Contribution Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/code-contributions/)

View File

@@ -0,0 +1,112 @@
# Submitting code contributions to Argo CD
## Preface
The Argo CD project continuously grows, both in terms of features and community size. It gets adopted by more and more organisations which entrust Argo CD to handle their critical production workloads. Thus, we need to take great care with any changes that affect compatibility, performance, scalability, stability and security of Argo CD. For this reason, every new feature or larger enhancement must be properly designed and discussed before it gets accepted into the code base.
We do welcome and encourage everyone to participate in the Argo CD project, but please understand that we can't accept each and every contribution from the community, for various reasons.
If you want to submit code for a great new feature or enhancement, we kindly ask you to take a look at the
enhancement process outlined below before you start to write code or submit a PR. This will ensure that your idea is well aligned with the project's strategy and technical requirements, and it will help greatly in getting your code merged into our code base.
Before submitting code for a new feature (and also, to some extent, for more complex bug fixes) please
[raise an Enhancement Proposal or Bug Issue](https://github.com/argoproj/argo-cd/issues/new/choose)
first.
Each enhancement proposal needs to go through our
[triage process](#triage-process)
before we accept code contributions. To facilitate triage and to provide transparency, we use
[this GitHub project](https://github.com/orgs/argoproj/projects/18) to keep track of this process' outcome.
_Please_ do not spend too much time on larger features or refactorings before the corresponding enhancement has been triaged. This may save everyone some amount of frustration and time, as the enhancement proposal might be rejected, and the code would never get merged. However, sometimes it's helpful to have some PoC code along with a proposal.
We will do our best to triage incoming enhancement proposals quickly, with one of the following outcomes:
* Accepted
* Rejected
* Proposal requires a design document to be further discussed
Depending on how many enhancement proposals we receive at given times, it may take some time until we can look at yours.
Also, please make sure you have read our
[Toolchain Guide](toolchain-guide.md)
to understand our toolchain and our continuous integration processes. It contains some invaluable information to get started with the complex code base that makes up Argo CD.
## Quick start
If you want a quick start contributing to Argo CD, take a look at issues that are labeled with
[help wanted](https://github.com/argoproj/argo-cd/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
or
[good first issue](https://github.com/argoproj/argo-cd/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22).
These are issues that were already triaged and accepted.
If the issue is already attached to next
[version milestone](https://github.com/argoproj/argo-cd/milestones),
we have decided to also dedicate some of our time on reviews to PRs received for these issues.
We encourage our community to pick up issues that are labeled in this way *and* are attached to the next version's milestone, with a promise for them to get a proper review with the clear intention for the incoming PRs to get merged.
## Triage process
### Overview
Our triage process for enhancements proposals ensures that we take a look at all incoming enhancements to determine whether we will accept code submissions to implement them.
The process works as follows:
* New Enhancement Proposals raised on our GitHub issue tracker are moved to the _Incoming_ column of the project's board. These are the proposals that are in the queue for triage.
* The _Active_ column holds the issues that are currently being triaged, or will be triaged next.
* The _Accepted_ column holds the issues that have been triaged and are considered good to be implemented (e.g. the project agreed that the feature would be great to have)
* The _Declined_ column holds the issues that were rejected during triage. The issue will be updated with information about why the proposal has been rejected.
* The _Needs discussion_ column holds the issues that were found to require additional information, or even a design document, during triage.
### Triage cadence
Triage of enhancement proposals is performed transparently, offline using issue comments and online in our weekly contributor's meeting. _Everyone_ is invited to participate in triaging, the process is not limited to participation only by maintainers.
Usually, we will triage enhancement proposals in a First-In-First-Out order, which mean that oldest proposals will be triaged first.
We aim to triage at least 10 proposals a week. Depending on our available time, we may be triaging a higher or lower number of proposals in any given week.
## Proposal states
### Accepted proposals
When a proposal is considered _Accepted_, it was decided that this enhancement would be valuable to the community at large and fits into the overall strategic roadmap of the project.
Implementation of the issue may be started, either by the proposal's creator or another community member (including maintainers of the project).
The issue should be refined enough by now to contain any concerns and guidelines to be taken into consideration during implementation.
### Declined proposals
We don't decline proposals lightly, and we will do our best to give a proper reasoning why we think that the proposal does not fit with the future of the project. Reasons for declining proposals may be - amongst others - that the change would be breaking for many, or that it does not meet the strategic direction of the project. Usually, discussion will be facilitated with the enhancement's creator before declining a proposal.
Once a proposal is in _Declined_ state it's unlikely that we will accept code contributions for its implementation.
### Proposals that need discussion
Sometimes, we can't completely understand a proposal from its GitHub issue and require more information on the original intent or on more details about the implementation. If we are confronted with such an issue during the triage, we move this issue to the _Needs discussion_ column to indicate that we expect the issue's creator to supply more information on their idea. We may ask you to provide this information, either by adding that information to the issue itself or by joining one of our
[regular contributor's meeting](#regular-contributor-meeting)
to discuss the proposal with us.
Also, issues that we find to require a more formal design document will be moved to this column.
## Design documents
For some enhancement proposals (especially those that will change behavior of Argo CD substantially, are attached with some caveats or where upgrade/downgrade paths are not clear), a more formal design document will be required in order to fully discuss and understand the enhancement in the broader community. This requirement is usually determined during triage. If you submitted an enhancement proposal, we may ask you to provide this more formal write down, along with some concerns or topics that need to be adressed.
Design documents are usually submitted as PR and use [this template](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/001-proposal-template.md) as a guide what kind of information we're looking for. Discussion will take place in the review process. When a design document gets merged, we consider it as approved and code can be written and submitted to implement this specific design.
## Regular contributor meeting
Our community regularly meets virtually to discuss issues, ideas and enhancements around Argo CD. We do invite you to join this virtual meetings if you want to bring up certain things (including your enhancement proposals), participate in our triaging or just want to get to know other contributors.
The current cadence of our meetings is weekly, every Thursday at 4pm UTC (9am Pacific, 12pm Eastern, 6pm Central European, 9:30pm Indian). We use Zoom to conduct these meetings.
* [Agenda document (Google Docs, includes Zoom link)](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
If you want to discuss something, we kindly ask you to put your item on the
[agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
for one of the upcoming meetings so that we can plan in the time for discussing about it.

View File

@@ -1,327 +1,2 @@
# Contribution guide
## Preface
We want to make contributing to ArgoCD as simple and smooth as possible.
This guide shall help you in setting up your build & test environment, so that you can start developing and testing bug fixes and feature enhancements without having to make too much effort in setting up a local toolchain.
If you want to submit a PR, please read this document carefully, as it contains important information guiding you through our PR quality gates.
As is the case with the development process, this document is under constant change. If you notice any error, or if you think this document is out-of-date, or if you think it is missing something: Feel free to submit a PR or submit a bug to our GitHub issue tracker.
If you need guidance with submitting a PR, or have any other questions regarding development of ArgoCD, do not hesitate to [join our Slack](https://argoproj.github.io/community/join-slack) and get in touch with us in the `#argo-dev` channel!
## Before you start
You will need at least the following things in your toolchain in order to develop and test ArgoCD locally:
* A Kubernetes cluster. You won't need a fully blown multi-master, multi-node cluster, but you will need something like K3S, Minikube or microk8s. You will also need a working Kubernetes client (`kubectl`) configuration in your development environment. The configuration must reside in `~/.kube/config` and the API server URL must point to the IP address of your local machine (or VM), and **not** to `localhost` or `127.0.0.1` if you are using the virtualized development toolchain (see below)
* You will also need a working Docker runtime environment, to be able to build and run images.
The Docker version must be fairly recent, and support multi-stage builds. You should not work as root. Make your local user a member of the `docker` group to be able to control the Docker service on your machine.
* Obviously, you will need a `git` client for pulling source code and pushing back your changes.
* Last but not least, you will need a Go SDK and related tools (such as GNU `make`) installed and working on your development environment. The minimum required Go version for building and testing ArgoCD is **v1.16**.
* We will assume that your Go workspace is at `~/go`.
!!! note
**Attention minikube users**: By default, minikube will create Kubernetes client configuration that uses authentication data from files. This is incompatible with the virtualized toolchain. So if you intend to use the virtualized toolchain, you have to embed this authentication data into the client configuration. To do so, start minikube using `minikube start --embed-certs`. Please also note that minikube using the Docker driver is currently not supported with the virtualized toolchain, because the Docker driver exposes the API server on 127.0.0.1 hard-coded. If in doubt, run `make verify-kube-connect` to find out.
## Submitting PRs
When you submit a PR against ArgoCD's GitHub repository, a couple of CI checks will be run automatically to ensure your changes will build fine and meet certain quality standards. Your contribution needs to pass those checks in order to be merged into the repository.
In general, it might be beneficial to only submit a PR for an existing issue. Especially for larger changes, an Enhancement Proposal should exist before.
!!!note
Please make sure that you always create PRs from a branch that is up-to-date with the latest changes from ArgoCD's master branch. Depending on how long it takes for the maintainers to review and merge your PR, it might be necessary to pull in latest changes into your branch again.
Please understand that we, as an Open Source project, have limited capacities for reviewing and merging PRs to ArgoCD. We will do our best to review your PR and give you feedback as soon as possible, but please bear with us if it takes a little longer as expected.
The following read will help you to submit a PR that meets the standards of our CI tests:
### Title of the PR
Please use a meaningful and concise title for your PR. This will help us to pick PRs for review quickly, and the PR title will also end up in the Changelog.
We use the [Semantic PR title checker](https://github.com/zeke/semantic-pull-requests) to categorize your PR into one of the following categories:
* `fix` - Your PR contains one or more code bug fixes
* `feat` - Your PR contains a new feature
* `docs` - Your PR improves the documentation
* `chore` - Your PR improves any internals of ArgoCD, such as the build process, unit tests, etc
Please prefix the title of your PR with one of the valid categories. For example, if you chose the title your PR `Add documentation for GitHub SSO integration`, please use `docs: Add documentation for GitHub SSO integration` instead.
### Contributor License Agreement
Every contributor to ArgoCD must have signed the current Contributor License Agreement (CLA). You only have to sign the CLA when you are a first time contributor, or when the agreement has changed since your last time signing it. The main purpose of the CLA is to ensure that you hold the required rights for your contribution. The CLA signing is an automated process.
You can read the current version of the CLA [here](https://cla-assistant.io/argoproj/argo-cd).
### PR template checklist
Upon opening a PR, the details will contain a checklist from a template. Please read the checklist, and tick those marks that apply to you.
### Automated builds & tests
After you have submitted your PR, and whenever you push new commits to that branch, GitHub will run a number of Continuous Integration checks against your code. It will execute the following actions, and each of them has to pass:
* Build the Go code (`make build`)
* Generate API glue code and manifests (`make codegen`)
* Run a Go linter on the code (`make lint`)
* Run the unit tests (`make test`)
* Run the End-to-End tests (`make test-e2e`)
* Build and lint the UI code (`make lint-ui`)
* Build the `argocd` CLI (`make cli`)
If any of these tests in the CI pipeline fail, it means that some of your contribution is considered faulty (or a test might be flaky, see below).
### Code test coverage
We use [CodeCov](https://codecov.io) in our CI pipeline to check for test coverage, and once you submit your PR, it will run and report on the coverage difference as a comment within your PR. If the difference is too high in the negative, i.e. your submission introduced a significant drop in code coverage, the CI check will fail.
Whenever you develop a new feature or submit a bug fix, please also write appropriate unit tests for it. If you write a completely new module, please aim for at least 80% of coverage.
If you want to see how much coverage just a specific module (i.e. your new one) has, you can set the `TEST_MODULE` to the (fully qualified) name of that module with `make test`, i.e.:
```bash
make test TEST_MODULE=github.com/argoproj/argo-cd/server/cache
...
ok github.com/argoproj/argo-cd/server/cache 0.029s coverage: 89.3% of statements
```
## Local vs Virtualized toolchain
ArgoCD provides a fully virtualized development and testing toolchain using Docker images. It is recommended to use those images, as they provide the same runtime environment as the final product and it is much easier to keep up-to-date with changes to the toolchain and dependencies. But as using Docker comes with a slight performance penalty, you might want to setup a local toolchain.
Most relevant targets for the build & test cycles in the `Makefile` provide two variants, one of them suffixed with `-local`. For example, `make test` will run unit tests in the Docker container, `make test-local` will run it natively on your local system.
If you are going to use the virtualized toolchain, please bear in mind the following things:
* Your Kubernetes API server must listen on the interface of your local machine or VM, and not on `127.0.0.1` only.
* Your Kubernetes client configuration (`~/.kube/config`) must not use an API URL that points to `localhost` or `127.0.0.1`.
You can test whether the virtualized toolchain has access to your Kubernetes cluster by running `make verify-kube-connect` (*after* you have setup your development environment, as described below), which will run `kubectl version` inside the Docker container used for running all tests.
The Docker container for the virtualized toolchain will use the following local mounts from your workstation, and possibly modify its contents:
* `~/go/src` - Your Go workspace's source directory (modifications expected)
* `~/.cache/go-build` - Your Go build cache (modifications expected)
* `~/.kube` - Your Kubernetes client configuration (no modifications)
* `/tmp` - Your system's temp directory (modifications expected)
## Setting up your development environment
The following steps are required no matter whether you chose to use a virtualized or a local toolchain.
### Clone the ArgoCD repository from your personal fork on GitHub
* `mkdir -p ~/go/src/github.com/argoproj`
* `cd ~/go/src/github.com/argoproj`
* `git clone https://github.com/yourghuser/argo-cd`
* `cd argo-cd`
### Optional: Setup an additional Git remote
While everyone has their own Git workflow, the author of this document recommends to create a remote called `upstream` in your local copy pointing to the original ArgoCD repository. This way, you can easily keep your local branches up-to-date by merging in latest changes from the ArgoCD repository, i.e. by doing a `git pull upstream master` in your locally checked out branch. To create the remote, run `git remote add upstream https://github.com/argoproj/argo-cd`
### Install the must-have requirements
Make sure you fulfill the pre-requisites above and run some preliminary tests. Neither of them should report an error.
* Run `kubectl version`
* Run `docker version`
* Run `go version`
### Build (or pull) the required Docker image
Build the required Docker image by running `make test-tools-image` or pull the latest version by issuing `docker pull argoproj/argocd-test-tools`.
The `Dockerfile` used to build these images can be found at `test/container/Dockerfile`.
### Test connection from build container to your K8s cluster
Run `make verify-kube-connect`, it should execute without error.
If you receive an error similar to the following:
```
The connection to the server 127.0.0.1:6443 was refused - did you specify the right host or port?
make: *** [Makefile:386: verify-kube-connect] Error 1
```
you should edit your `~/.kube/config` and modify the `server` option to point to your correct K8s API (as described above).
### Using k3d
[k3d](https://github.com/rancher/k3d) is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s), a minimal Kubernetes distribution, in docker. Because it's running in a docker container, you're dealing with docker's internal networking rules when using k3d. A typical Kubernetes cluster running on your local machine is part of the same network that you're on so you can access it using **kubectl**. However, a Kubernetes cluster running within a docker container (in this case, the one launched by make) cannot access 0.0.0.0 from inside the container itself, when 0.0.0.0 is a network resource outside the container itself (and/or the container's network). This is the cost of a fully self-contained, disposable Kubernetes cluster. The following steps should help with a successful `make verify-kube-connect` execution.
1. Find your host IP by executing `ifconfig` on Mac/Linux and `ipconfig` on Windows. For most users, the following command works to find the IP address.
* For Mac:
```
IP=`ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}'`
echo $IP
```
* For Linux:
```
IP=`ifconfig eth0 | grep inet | grep -v inet6 | awk '{print $2}'`
echo $IP
```
Keep in mind that this IP is dynamically assigned by the router so if your router restarts for any reason, your IP might change.
2. Edit your ~/.kube/config and replace 0.0.0.0 with the above IP address.
3. Execute a `kubectl version` to make sure you can still connect to the Kubernetes API server via this new IP. Run `make verify-kube-connect` and check if it works.
4. Finally, so that you don't have to keep updating your kube-config whenever you spin up a new k3d cluster, add `--api-port $IP:6550` to your **k3d cluster create** command, where $IP is the value from step 1. An example command is provided here:
```
k3d cluster create my-cluster --wait --k3s-server-arg '--disable=traefik' --api-port $IP:6550 -p 443:443@loadbalancer
```
## The development cycle
When you have developed and possibly manually tested the code you want to contribute, you should ensure that everything will build correctly. Commit your changes to the local copy of your Git branch and perform the following steps:
### Pull in all build dependencies
As build dependencies change over time, you have to synchronize your development environment with the current specification. In order to pull in all required dependencies, issue:
* `make dep-ui`
ArgoCD recently migrated to Go modules. Usually, dependencies will be downloaded on build time, but the Makefile provides two targets to download and vendor all dependencies:
* `make mod-download` will download all required Go modules and
* `make mod-vendor` will vendor those dependencies into the ArgoCD source tree
### Generate API glue code and other assets
ArgoCD relies on Google's [Protocol Buffers](https://developers.google.com/protocol-buffers) for its API, and this makes heavy use of auto-generated glue code and stubs. Whenever you touched parts of the API code, you must re-generate the auto generated code.
* Run `make codegen`, this might take a while
* Check if something has changed by running `git status` or `git diff`
* Commit any possible changes to your local Git branch, an appropriate commit message would be `Changes from codegen`, for example.
!!!note
There are a few non-obvious assets that are auto-generated. You should not change the autogenerated assets, as they will be overwritten by a subsequent run of `make codegen`. Instead, change their source files. Prominent examples of non-obvious auto-generated code are `swagger.json` or the installation manifest YAMLs.
### Build your code and run unit tests
After the code glue has been generated, your code should build and the unit tests should run without any errors. Execute the following statements:
* `make build`
* `make test`
These steps are non-modifying, so there's no need to check for changes afterwards.
### Lint your code base
In order to keep a consistent code style in our source tree, your code must be well-formed in accordance to some widely accepted rules, which are applied by a Linter.
The Linter might make some automatic changes to your code, such as indentation fixes. Some other errors reported by the Linter have to be fixed manually.
* Run `make lint` and observe any errors reported by the Linter
* Fix any of the errors reported and commit to your local branch
* Finally, after the Linter reports no errors anymore, run `git status` or `git diff` to check for any changes made automatically by Lint
* If there were automatic changes, commit them to your local branch
If you touched UI code, you should also run the Yarn linter on it:
* Run `make lint-ui`
* Fix any of the errors reported by it
## Contributing to Argo CD UI
Argo CD, along with Argo Workflows, uses shared React components from [Argo UI](https://github.com/argoproj/argo-ui). Examples of some of these components include buttons, containers, form controls,
and others. Although you can make changes to these files and run them locally, in order to have these changes added to the Argo CD repo, you will need to follow these steps.
1. Fork and clone the [Argo UI repository](https://github.com/argoproj/argo-ui).
2. `cd` into your `argo-ui` directory, and then run `yarn install`.
3. Make your file changes.
4. Run `yarn start` to start a [storybook](https://storybook.js.org/) dev server and view the components in your browser. Make sure all your changes work as expected.
5. Use [yarn link](https://classic.yarnpkg.com/en/docs/cli/link/) to link Argo UI package to your Argo CD repository. (Commands below assume that `argo-ui` and `argo-cd` are both located within the same parent folder)
* `cd argo-ui`
* `yarn link`
* `cd ../argo-cd/ui`
* `yarn link argo-ui`
Once `argo-ui` package has been successfully linked, test out changes in your local development environment.
6. Commit changes and open a PR to [Argo UI](https://github.com/argoproj/argo-ui).
7. Once your PR has been merged in Argo UI, `cd` into your `argo-cd` folder and run `yarn add https://github.com/argoproj/argo-ui.git`. This will update the commit SHA in the `ui/yarn.lock` file to use the lastest master commit for argo-ui.
8. Submit changes to `ui/yarn.lock`in a PR to Argo CD.
## Setting up a local toolchain
For development, you can either use the fully virtualized toolchain provided as Docker images, or you can set up the toolchain on your local development machine. Due to the dynamic nature of requirements, you might want to stay with the virtualized environment.
### Install required dependencies and build-tools
!!!note
The installations instructions are valid for Linux hosts only. Mac instructions will follow shortly.
For installing the tools required to build and test ArgoCD on your local system, we provide convenient installer scripts. By default, they will install binaries to `/usr/local/bin` on your system, which might require `root` privileges.
You can change the target location by setting the `BIN` environment before running the installer scripts. For example, you can install the binaries into `~/go/bin` (which should then be the first component in your `PATH` environment, i.e. `export PATH=~/go/bin:$PATH`):
```shell
make BIN=~/go/bin install-tools-local
```
Additionally, you have to install at least the following tools via your OS's package manager (this list might not be always up-to-date):
* Git LFS plugin
* GnuPG version 2
### Install Go dependencies
You need to pull in all required Go dependencies. To do so, run
* `make mod-download-local`
* `make mod-vendor-local`
### Test your build toolchain
The first thing you can do whether your build toolchain is setup correctly is by generating the glue code for the API and after that, run a normal build:
* `make codegen-local`
* `make build-local`
This should return without any error.
### Run unit-tests
The next thing is to make sure that unit tests are running correctly on your system. These will require that all dependencies, such as Helm, Kustomize, Git, GnuPG, etc are correctly installed and fully functioning:
* `make test-local`
### Run end-to-end tests
The final step is running the End-to-End testsuite, which makes sure that your Kubernetes dependencies are working properly. This will involve starting all of the ArgoCD components locally on your computer. The end-to-end tests consists of two parts: a server component, and a client component.
* First, start the End-to-End server: `make start-e2e-local`. This will spawn a number of processes and services on your system.
* When all components have started, run `make test-e2e-local` to run the end-to-end tests against your local services.
For more information about End-to-End tests, refer to the [End-to-End test documentation](test-e2e.md).
## Enhancement proposals
If you are proposing a major feature, change in design or process refactor, please help define how it would look like with a new enhancement proposal as described in the enhancement proposal [template](/docs/proposals/001-proposal-template.md).
The contents of this document have been moved to the
[Toolchain guide](toolchain-guide.md)

View File

@@ -4,9 +4,9 @@
During development, it might be viable to run ArgoCD outside of a Kubernetes cluster. This will greatly speed up development, as you don't have to constantly build, push and install new ArgoCD Docker images with your latest changes.
You will still need a working Kubernetes cluster, as described in the [Contribution Guide](contributing.md), where ArgoCD will store all of its resources.
You will still need a working Kubernetes cluster, as described in the [Toolchain Guide](toolchain-guide.md), where ArgoCD will store all of its resources and configuration.
If you followed the [Contribution Guide](contributing.md) in setting up your toolchain, you can run ArgoCD locally with these simple steps:
If you followed the [Toolchain Guide](toolchain-guide.md) in setting up your toolchain, you can run ArgoCD locally with these simple steps:
### Install ArgoCD resources to your cluster

View File

@@ -0,0 +1,327 @@
# Development toolchain
## Preface
!!!note "Before you start"
The Argo CD project continuously grows, both in terms of features and community size. It gets adopted by more and more organisations which entrust Argo CD to handle their critical production workloads. Thus, we need to take great care with any changes that affect compatibility, performance, scalability, stability and security of Argo CD. For this reason, every new feature or larger enhancement must be properly designed and discussed before it gets accepted into the code base.
We do welcome and encourage everyone to participate in the Argo CD project, but please understand that we can't accept each and every contribution from the community, for various reasons. If you want to submit code for a great new feature or enhancement, we kindly ask you to take a look at the
[code contribution guide](code-contributions.md#) before you start to write code or submit a PR.
We want to make contributing to Argo CD as simple and smooth as possible.
This guide shall help you in setting up your build & test environment, so that you can start developing and testing bug fixes and feature enhancements without having to make too much effort in setting up a local toolchain.
If you want to submit a PR, please read this document carefully, as it contains important information guiding you through our PR quality gates.
As is the case with the development process, this document is under constant change. If you notice any error, or if you think this document is out-of-date, or if you think it is missing something: Feel free to submit a PR or submit a bug to our GitHub issue tracker.
If you need guidance with submitting a PR, or have any other questions regarding development of Argo CD, do not hesitate to [join our Slack](https://argoproj.github.io/community/join-slack) and get in touch with us in the `#argo-contributors` channel!
## Before you start
You will need at least the following things in your toolchain in order to develop and test Argo CD locally:
* A Kubernetes cluster. You won't need a fully blown multi-master, multi-node cluster, but you will need something like K3S, Minikube or microk8s. You will also need a working Kubernetes client (`kubectl`) configuration in your development environment. The configuration must reside in `~/.kube/config` and the API server URL must point to the IP address of your local machine (or VM), and **not** to `localhost` or `127.0.0.1` if you are using the virtualized development toolchain (see below)
* You will also need a working Docker runtime environment, to be able to build and run images.
The Docker version must be fairly recent, and support multi-stage builds. You should not work as root. Make your local user a member of the `docker` group to be able to control the Docker service on your machine.
* Obviously, you will need a `git` client for pulling source code and pushing back your changes.
* Last but not least, you will need a Go SDK and related tools (such as GNU `make`) installed and working on your development environment. The minimum required Go version for building and testing Argo CD is **v1.16**.
* We will assume that your Go workspace is at `~/go`.
!!! note
**Attention minikube users**: By default, minikube will create Kubernetes client configuration that uses authentication data from files. This is incompatible with the virtualized toolchain. So if you intend to use the virtualized toolchain, you have to embed this authentication data into the client configuration. To do so, start minikube using `minikube start --embed-certs`. Please also note that minikube using the Docker driver is currently not supported with the virtualized toolchain, because the Docker driver exposes the API server on 127.0.0.1 hard-coded. If in doubt, run `make verify-kube-connect` to find out.
## Submitting PRs
### Continuous Integration process
When you submit a PR against Argo CD's GitHub repository, a couple of CI checks will be run automatically to ensure your changes will build fine and meet certain quality standards. Your contribution needs to pass those checks in order to be merged into the repository.
!!!note
Please make sure that you always create PRs from a branch that is up-to-date with the latest changes from Argo CD's master branch. Depending on how long it takes for the maintainers to review and merge your PR, it might be necessary to pull in latest changes into your branch again.
Please understand that we, as an Open Source project, have limited capacities for reviewing and merging PRs to Argo CD. We will do our best to review your PR and give you feedback as soon as possible, but please bear with us if it takes a little longer as expected.
The following read will help you to submit a PR that meets the standards of our CI tests:
### Title of the PR
Please use a meaningful and concise title for your PR. This will help us to pick PRs for review quickly, and the PR title will also end up in the Changelog.
We use the [Semantic PR title checker](https://github.com/zeke/semantic-pull-requests) to categorize your PR into one of the following categories:
* `fix` - Your PR contains one or more code bug fixes
* `feat` - Your PR contains a new feature
* `docs` - Your PR improves the documentation
* `chore` - Your PR improves any internals of Argo CD, such as the build process, unit tests, etc
Please prefix the title of your PR with one of the valid categories. For example, if you chose the title your PR `Add documentation for GitHub SSO integration`, please use `docs: Add documentation for GitHub SSO integration` instead.
### Contributor License Agreement
Every contributor to Argo CD must have signed the current Contributor License Agreement (CLA). You only have to sign the CLA when you are a first time contributor, or when the agreement has changed since your last time signing it. The main purpose of the CLA is to ensure that you hold the required rights for your contribution. The CLA signing is an automated process.
You can read the current version of the CLA [here](https://cla-assistant.io/argoproj/argo-cd).
### PR template checklist
Upon opening a PR, the details will contain a checklist from a template. Please read the checklist, and tick those marks that apply to you.
### Automated builds & tests
After you have submitted your PR, and whenever you push new commits to that branch, GitHub will run a number of Continuous Integration checks against your code. It will execute the following actions, and each of them has to pass:
* Build the Go code (`make build`)
* Generate API glue code and manifests (`make codegen`)
* Run a Go linter on the code (`make lint`)
* Run the unit tests (`make test`)
* Run the End-to-End tests (`make test-e2e`)
* Build and lint the UI code (`make lint-ui`)
* Build the `argocd` CLI (`make cli`)
If any of these tests in the CI pipeline fail, it means that some of your contribution is considered faulty (or a test might be flaky, see below).
### Code test coverage
We use [CodeCov](https://codecov.io) in our CI pipeline to check for test coverage, and once you submit your PR, it will run and report on the coverage difference as a comment within your PR. If the difference is too high in the negative, i.e. your submission introduced a significant drop in code coverage, the CI check will fail.
Whenever you develop a new feature or submit a bug fix, please also write appropriate unit tests for it. If you write a completely new module, please aim for at least 80% of coverage.
If you want to see how much coverage just a specific module (i.e. your new one) has, you can set the `TEST_MODULE` to the (fully qualified) name of that module with `make test`, i.e.:
```bash
make test TEST_MODULE=github.com/argoproj/argo-cd/server/cache
...
ok github.com/argoproj/argo-cd/server/cache 0.029s coverage: 89.3% of statements
```
## Local vs Virtualized toolchain
Argo CD provides a fully virtualized development and testing toolchain using Docker images. It is recommended to use those images, as they provide the same runtime environment as the final product and it is much easier to keep up-to-date with changes to the toolchain and dependencies. But as using Docker comes with a slight performance penalty, you might want to setup a local toolchain.
Most relevant targets for the build & test cycles in the `Makefile` provide two variants, one of them suffixed with `-local`. For example, `make test` will run unit tests in the Docker container, `make test-local` will run it natively on your local system.
If you are going to use the virtualized toolchain, please bear in mind the following things:
* Your Kubernetes API server must listen on the interface of your local machine or VM, and not on `127.0.0.1` only.
* Your Kubernetes client configuration (`~/.kube/config`) must not use an API URL that points to `localhost` or `127.0.0.1`.
You can test whether the virtualized toolchain has access to your Kubernetes cluster by running `make verify-kube-connect` (*after* you have setup your development environment, as described below), which will run `kubectl version` inside the Docker container used for running all tests.
The Docker container for the virtualized toolchain will use the following local mounts from your workstation, and possibly modify its contents:
* `~/go/src` - Your Go workspace's source directory (modifications expected)
* `~/.cache/go-build` - Your Go build cache (modifications expected)
* `~/.kube` - Your Kubernetes client configuration (no modifications)
* `/tmp` - Your system's temp directory (modifications expected)
## Setting up your development environment
The following steps are required no matter whether you chose to use a virtualized or a local toolchain.
### Clone the Argo CD repository from your personal fork on GitHub
* `mkdir -p ~/go/src/github.com/argoproj`
* `cd ~/go/src/github.com/argoproj`
* `git clone https://github.com/yourghuser/argo-cd`
* `cd argo-cd`
### Optional: Setup an additional Git remote
While everyone has their own Git workflow, the author of this document recommends to create a remote called `upstream` in your local copy pointing to the original Argo CD repository. This way, you can easily keep your local branches up-to-date by merging in latest changes from the Argo CD repository, i.e. by doing a `git pull upstream master` in your locally checked out branch. To create the remote, run `git remote add upstream https://github.com/argoproj/argo-cd`
### Install the must-have requirements
Make sure you fulfill the pre-requisites above and run some preliminary tests. Neither of them should report an error.
* Run `kubectl version`
* Run `docker version`
* Run `go version`
### Build (or pull) the required Docker image
Build the required Docker image by running `make test-tools-image` or pull the latest version by issuing `docker pull argoproj/argocd-test-tools`.
The `Dockerfile` used to build these images can be found at `test/container/Dockerfile`.
### Test connection from build container to your K8s cluster
Run `make verify-kube-connect`, it should execute without error.
If you receive an error similar to the following:
```
The connection to the server 127.0.0.1:6443 was refused - did you specify the right host or port?
make: *** [Makefile:386: verify-kube-connect] Error 1
```
you should edit your `~/.kube/config` and modify the `server` option to point to your correct K8s API (as described above).
### Using k3d
[k3d](https://github.com/rancher/k3d) is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s), a minimal Kubernetes distribution, in docker. Because it's running in a docker container, you're dealing with docker's internal networking rules when using k3d. A typical Kubernetes cluster running on your local machine is part of the same network that you're on so you can access it using **kubectl**. However, a Kubernetes cluster running within a docker container (in this case, the one launched by make) cannot access 0.0.0.0 from inside the container itself, when 0.0.0.0 is a network resource outside the container itself (and/or the container's network). This is the cost of a fully self-contained, disposable Kubernetes cluster. The following steps should help with a successful `make verify-kube-connect` execution.
1. Find your host IP by executing `ifconfig` on Mac/Linux and `ipconfig` on Windows. For most users, the following command works to find the IP address.
* For Mac:
```
IP=`ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}'`
echo $IP
```
* For Linux:
```
IP=`ifconfig eth0 | grep inet | grep -v inet6 | awk '{print $2}'`
echo $IP
```
Keep in mind that this IP is dynamically assigned by the router so if your router restarts for any reason, your IP might change.
2. Edit your ~/.kube/config and replace 0.0.0.0 with the above IP address.
3. Execute a `kubectl version` to make sure you can still connect to the Kubernetes API server via this new IP. Run `make verify-kube-connect` and check if it works.
4. Finally, so that you don't have to keep updating your kube-config whenever you spin up a new k3d cluster, add `--api-port $IP:6550` to your **k3d cluster create** command, where $IP is the value from step 1. An example command is provided here:
```
k3d cluster create my-cluster --wait --k3s-server-arg '--disable=traefik' --api-port $IP:6550 -p 443:443@loadbalancer
```
## The development cycle
When you have developed and possibly manually tested the code you want to contribute, you should ensure that everything will build correctly. Commit your changes to the local copy of your Git branch and perform the following steps:
### Pull in all build dependencies
As build dependencies change over time, you have to synchronize your development environment with the current specification. In order to pull in all required dependencies, issue:
* `make dep-ui`
Argo CD recently migrated to Go modules. Usually, dependencies will be downloaded on build time, but the Makefile provides two targets to download and vendor all dependencies:
* `make mod-download` will download all required Go modules and
* `make mod-vendor` will vendor those dependencies into the Argo CD source tree
### Generate API glue code and other assets
Argo CD relies on Google's [Protocol Buffers](https://developers.google.com/protocol-buffers) for its API, and this makes heavy use of auto-generated glue code and stubs. Whenever you touched parts of the API code, you must re-generate the auto generated code.
* Run `make codegen`, this might take a while
* Check if something has changed by running `git status` or `git diff`
* Commit any possible changes to your local Git branch, an appropriate commit message would be `Changes from codegen`, for example.
!!!note
There are a few non-obvious assets that are auto-generated. You should not change the autogenerated assets, as they will be overwritten by a subsequent run of `make codegen`. Instead, change their source files. Prominent examples of non-obvious auto-generated code are `swagger.json` or the installation manifest YAMLs.
### Build your code and run unit tests
After the code glue has been generated, your code should build and the unit tests should run without any errors. Execute the following statements:
* `make build`
* `make test`
These steps are non-modifying, so there's no need to check for changes afterwards.
### Lint your code base
In order to keep a consistent code style in our source tree, your code must be well-formed in accordance to some widely accepted rules, which are applied by a Linter.
The Linter might make some automatic changes to your code, such as indentation fixes. Some other errors reported by the Linter have to be fixed manually.
* Run `make lint` and observe any errors reported by the Linter
* Fix any of the errors reported and commit to your local branch
* Finally, after the Linter reports no errors anymore, run `git status` or `git diff` to check for any changes made automatically by Lint
* If there were automatic changes, commit them to your local branch
If you touched UI code, you should also run the Yarn linter on it:
* Run `make lint-ui`
* Fix any of the errors reported by it
## Contributing to Argo CD UI
Argo CD, along with Argo Workflows, uses shared React components from [Argo UI](https://github.com/argoproj/argo-ui). Examples of some of these components include buttons, containers, form controls,
and others. Although you can make changes to these files and run them locally, in order to have these changes added to the Argo CD repo, you will need to follow these steps.
1. Fork and clone the [Argo UI repository](https://github.com/argoproj/argo-ui).
2. `cd` into your `argo-ui` directory, and then run `yarn install`.
3. Make your file changes.
4. Run `yarn start` to start a [storybook](https://storybook.js.org/) dev server and view the components in your browser. Make sure all your changes work as expected.
5. Use [yarn link](https://classic.yarnpkg.com/en/docs/cli/link/) to link Argo UI package to your Argo CD repository. (Commands below assume that `argo-ui` and `argo-cd` are both located within the same parent folder)
* `cd argo-ui`
* `yarn link`
* `cd ../argo-cd/ui`
* `yarn link argo-ui`
Once `argo-ui` package has been successfully linked, test out changes in your local development environment.
6. Commit changes and open a PR to [Argo UI](https://github.com/argoproj/argo-ui).
7. Once your PR has been merged in Argo UI, `cd` into your `argo-cd` folder and run `yarn add https://github.com/argoproj/argo-ui.git`. This will update the commit SHA in the `ui/yarn.lock` file to use the lastest master commit for argo-ui.
8. Submit changes to `ui/yarn.lock`in a PR to Argo CD.
## Setting up a local toolchain
For development, you can either use the fully virtualized toolchain provided as Docker images, or you can set up the toolchain on your local development machine. Due to the dynamic nature of requirements, you might want to stay with the virtualized environment.
### Install required dependencies and build-tools
!!!note
The installations instructions are valid for Linux hosts only. Mac instructions will follow shortly.
For installing the tools required to build and test Argo CD on your local system, we provide convenient installer scripts. By default, they will install binaries to `/usr/local/bin` on your system, which might require `root` privileges.
You can change the target location by setting the `BIN` environment before running the installer scripts. For example, you can install the binaries into `~/go/bin` (which should then be the first component in your `PATH` environment, i.e. `export PATH=~/go/bin:$PATH`):
```shell
make BIN=~/go/bin install-tools-local
```
Additionally, you have to install at least the following tools via your OS's package manager (this list might not be always up-to-date):
* Git LFS plugin
* GnuPG version 2
### Install Go dependencies
You need to pull in all required Go dependencies. To do so, run
* `make mod-download-local`
* `make mod-vendor-local`
### Test your build toolchain
The first thing you can do whether your build toolchain is setup correctly is by generating the glue code for the API and after that, run a normal build:
* `make codegen-local`
* `make build-local`
This should return without any error.
### Run unit-tests
The next thing is to make sure that unit tests are running correctly on your system. These will require that all dependencies, such as Helm, Kustomize, Git, GnuPG, etc are correctly installed and fully functioning:
* `make test-local`
### Run end-to-end tests
The final step is running the End-to-End testsuite, which makes sure that your Kubernetes dependencies are working properly. This will involve starting all of the Argo CD components locally on your computer. The end-to-end tests consists of two parts: a server component, and a client component.
* First, start the End-to-End server: `make start-e2e-local`. This will spawn a number of processes and services on your system.
* When all components have started, run `make test-e2e-local` to run the end-to-end tests against your local services.
For more information about End-to-End tests, refer to the [End-to-End test documentation](test-e2e.md).

View File

@@ -28,7 +28,7 @@ kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/core-install.yaml
```
Use `argocd login --k8s-api` to [configure](./user-guide/commands/argocd_login.md) CLI access and skip steps 3-5.
Use `argocd login --core` to [configure](./user-guide/commands/argocd_login.md) CLI access and skip steps 3-5.
## 2. Download Argo CD CLI

View File

@@ -69,5 +69,5 @@ RUN apt-get update && \
chmod +x /usr/local/bin/sops
# Switch back to non-root user
USER argocd
USER 999
```

View File

@@ -1,13 +1,13 @@
# Installation
Argo CD has two type of installations: multi-tennant and core.
Argo CD has two type of installations: multi-tenant and core.
## Multi-Tenant
The multi-tenant installation is the most common way to install Argo CD. This type of installation is typically used to service multiple application developer teams
in the organization and maintained by a platform team.
The end-users can access Argo CD via API server using Web UI or `argocd` CLI. The `argocd` has to be configured using `argocd login <server-host>` command
The end-users can access Argo CD via the API server using the Web UI or `argocd` CLI. The `argocd` CLI has to be configured using `argocd login <server-host>` command
(learn more [here](../user-guide/commands/argocd_login.md)).
Two types of installation manifests are provided:
@@ -18,7 +18,7 @@ Not recommended for production use. This type of installation is typically used
* [install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/install.yaml) - Standard Argo CD installation with cluster-admin access. Use this
manifest set if you plan to use Argo CD to deploy applications in the same cluster that Argo CD runs
in (i.e. kubernetes.svc.default). Will still be able to deploy to external clusters with inputted
in (i.e. kubernetes.svc.default). It will still be able to deploy to external clusters with inputted
credentials.
* [namespace-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/namespace-install.yaml) - Installation of Argo CD which requires only
@@ -26,11 +26,11 @@ Not recommended for production use. This type of installation is typically used
need Argo CD to deploy applications in the same cluster that Argo CD runs in, and will rely solely
on inputted cluster credentials. An example of using this set of manifests is if you run several
Argo CD instances for different teams, where each instance will be deploying applications to
external clusters. Will still be possible to deploy to the same cluster (kubernetes.svc.default)
external clusters. It will still be possible to deploy to the same cluster (kubernetes.svc.default)
with inputted credentials (i.e. `argocd cluster add <CONTEXT> --in-cluster --namespace <YOUR NAMESPACE>`).
> Note: Argo CD CRDs are not included into [namespace-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/namespace-install.yaml).
> and have to be installed separately. The CRD manifests are located in [manifests/crds](https://github.com/argoproj/argo-cd/blob/master/manifests/crds) directory.
> and have to be installed separately. The CRD manifests are located in the [manifests/crds](https://github.com/argoproj/argo-cd/blob/master/manifests/crds) directory.
> Use the following command to install them:
> ```bash
> kubectl apply -k https://github.com/argoproj/argo-cd/manifests/crds\?ref\=stable
@@ -38,7 +38,7 @@ Not recommended for production use. This type of installation is typically used
### High Availability:
High Availability installation is recommended for production use. Bundle includes the same components but tunned for high availability and resiliency.
High Availability installation is recommended for production use. This bundle includes the same components but tuned for high availability and resiliency.
* [ha/install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/ha/install.yaml) - the same as install.yaml but with multiple replicas for
supported components.
@@ -49,10 +49,16 @@ High Availability installation is recommended for production use. Bundle include
## Core
The core installation is most suitable for cluster administrators who indepently use Argo CD and don't need multi-tenancy features. This installation
includes less components and easier to setup. The bundle does not include API server, UI as well as install non-HA light-weight version of each component.
includes fewer components and is easier to setup. The bundle does not include the API server or UI, and installs the lightweight (non-HA) version of each component.
The end-users need Kubernetes access to manage Argo CD. The `argocd` CLI has to be configured using `argocd login --k8s-api` command. The Web UI is also
available and can be started using `argocd admin dashboard` command.
The end-users need Kubernetes access to manage Argo CD. The `argocd` CLI has to be configured using the following commands:
```bash
kubectl config set-context --current --namespace=argocd # change current kube context to argocd namespace
argocd login --core
```
The Web UI is also available and can be started using the `argocd admin dashboard` command.
Installation manifests are available at [core-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/core-install.yaml).
@@ -74,4 +80,4 @@ resources:
## Helm
The Argo CD can be installed using [Helm](https://helm.sh/). The Helm chart is currently community maintained and available at
[argo-helm/charts/argo-cd](https://github.com/argoproj/argo-helm/tree/master/charts/argo-cd).
[argo-helm/charts/argo-cd](https://github.com/argoproj/argo-helm/tree/master/charts/argo-cd).

View File

@@ -2,7 +2,7 @@
## Upgraded Kustomize Version
Note that bundled Kustomize has been upgraded to v4.1.2. Some of the flags are changed in Kustomize V4.
Note that bundled Kustomize has been upgraded to v4.2.0. Some of the flags are changed in Kustomize V4.
For example flag name `load_restrictor` is changed in Kustomize v4+. It is changed from `--load_restrictor=none` to `--load-restrictor LoadRestrictionsNone`.
## Replacing `--app-resync` flag with `timeout.reconciliation` setting
@@ -23,4 +23,23 @@ and [argocd-repo-creds.yaml](../argocd-repo-creds.yaml).
## The `argocd-util` CLI commands merged into `argocd admin`
The `argocd-util` CLI commands are available under `argocd admin` and the `argocd-util` binary is no longer available.
The `argocd-util` CLI commands are available under `argocd admin` and the `argocd-util` binary is no longer available.
## Replace runtime system user while [BYOI](https://argoproj.github.io/argo-cd/operator-manual/custom_tools/#byoi-build-your-own-image)
Runtime system user should to be changed from `argocd` to `999`, as shown below.
```dockerfile
FROM argoproj/argocd:latest
# Switch to root for the ability to perform install
USER root
# Something custom here
RUN apt-get update
# Switch back to non-root user
# deprecated: USER argocd
USER 999
```

View File

@@ -25,7 +25,7 @@ no fix yet.
|2020-04-08|[CVE-2020-11576](https://nvd.nist.gov/vuln/detail/CVE-2020-11576)|User Enumeration|Medium|v1.5.0|v1.5.1|
|2020-04-08|[CVE-2020-8826](https://nvd.nist.gov/vuln/detail/CVE-2020-8826)|Session-fixation|High|all|n/a|
|2020-04-08|[CVE-2020-8827](https://nvd.nist.gov/vuln/detail/CVE-2020-8827)|Insufficient anti-automation/anti-brute force|High|all <= 1.5.3|v1.5.3|
|2020-04-08|[CVE-2020-8828](https://nvd.nist.gov/vuln/detail/CVE-2020-8828)|Insecure default administrative password|High|all|n/a|
|2020-04-08|[CVE-2020-8828](https://nvd.nist.gov/vuln/detail/CVE-2020-8828)|Insecure default administrative password|High|all <= 1.8.0|1.8.0|
|2020-04-08|[CVE-2018-21034](https://nvd.nist.gov/vuln/detail/CVE-2018-21034)|Sensitive Information Disclosure|Medium|all <= v1.5.0|v1.5.0|
## Known Issues And Workarounds
@@ -105,7 +105,7 @@ Upgrade to ArgoCD v1.5.1 or higher. As a workaround, disable local users and use
|Risk|Reported by|Fix version|Workaround|
|----|-----------|-----------|----------|
|High|[Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai)|n/a|Yes|
|High|[Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai)|1.8.0|Yes|
**Details:**

View File

@@ -79,7 +79,7 @@ argocd app [flags]
* [argocd app patch](argocd_app_patch.md) - Patch application
* [argocd app patch-resource](argocd_app_patch-resource.md) - Patch resource in an application
* [argocd app resources](argocd_app_resources.md) - List resource of application
* [argocd app rollback](argocd_app_rollback.md) - Rollback application to a previous deployed version by History ID
* [argocd app rollback](argocd_app_rollback.md) - Rollback application to a previous deployed version by History ID, omitted will Rollback to the previous version
* [argocd app set](argocd_app_set.md) - Set application parameters
* [argocd app sync](argocd_app_sync.md) - Sync an application to its target state
* [argocd app terminate-op](argocd_app_terminate-op.md) - Terminate running operation of an application

View File

@@ -1,9 +1,9 @@
## argocd app rollback
Rollback application to a previous deployed version by History ID
Rollback application to a previous deployed version by History ID, omitted will Rollback to the previous version
```
argocd app rollback APPNAME ID [flags]
argocd app rollback APPNAME [ID] [flags]
```
### Options

View File

@@ -21,6 +21,9 @@ argocd repocreds add REPOURL [flags]
# Add credentials with GitHub App authentication to use for all repositories under https://ghe.example.com/repos
argocd repocreds add https://ghe.example.com/repos/ --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3
# Add credentials with helm oci registry so that these oci registry urls do not need to be added as repos individually.
argocd repocreds add localhost:5000/myrepo --enable-oci --type helm
```
### Options

View File

@@ -126,3 +126,10 @@ syncOptions:
```
If the `Replace=true` sync option is set the Argo CD will use `kubectl replace` or `kubectl create` command to apply changes.
This can also be configured at individual resource level.
```yaml
metadata:
annotations:
argocd.argoproj.io/sync-options: Replace=true
```

2
go.mod
View File

@@ -7,7 +7,7 @@ require (
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/alicebob/miniredis/v2 v2.14.2
github.com/argoproj/gitops-engine v0.3.1-0.20210709004906-a4c77d5c70fb
github.com/argoproj/gitops-engine v0.4.0
github.com/argoproj/pkg v0.9.1
github.com/bombsimon/logrusr v1.0.0
github.com/bradleyfalzon/ghinstallation v1.1.1

4
go.sum
View File

@@ -96,8 +96,8 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/argoproj/gitops-engine v0.3.1-0.20210709004906-a4c77d5c70fb h1:zwnkwh45K57emwKikePwtK4BLc3IuplxYxqMzZ4XTNY=
github.com/argoproj/gitops-engine v0.3.1-0.20210709004906-a4c77d5c70fb/go.mod h1:EdFe8qIOqsmbyxRhtIydU4BUeyZ4VTsY6R3XVQhU9LA=
github.com/argoproj/gitops-engine v0.4.0 h1:h141jAgWhgp1iGAgfHM1Hg1POvszDNmqyEfTFHsG1CA=
github.com/argoproj/gitops-engine v0.4.0/go.mod h1:EdFe8qIOqsmbyxRhtIydU4BUeyZ4VTsY6R3XVQhU9LA=
github.com/argoproj/pkg v0.9.1 h1:osfOS3QkzfRf+W43lbCZb0o0bzrBweQhL+U3rgEg+5M=
github.com/argoproj/pkg v0.9.1/go.mod h1:ra+bQPmbVAoEL+gYSKesuigt4m49i3Qa3mE/xQcjCiA=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=

View File

@@ -1 +0,0 @@
4efb7d0dadba7fab5191c680fcb342c2b6f252f230019cf9cffd5e4b0cad1d12 kustomize_4.1.2_linux_amd64.tar.gz

View File

@@ -1 +0,0 @@
d4b0ababb06d7208b439c48c5dadf979433f062ee7131203f6a94ce1159c9d5e kustomize_4.1.2_linux_arm64.tar.gz

View File

@@ -0,0 +1 @@
220dd03dcda8e45dc50e4e42b2d71882cbc4c05e0ed863513e67930ecad939eb kustomize_4.2.0_linux_amd64.tar.gz

View File

@@ -0,0 +1 @@
33f2cf3b5db64c09560c187224e9d29452fde2b7f00f85941604fc75d9769e4a kustomize_4.2.0_linux_arm64.tar.gz

View File

@@ -1,4 +1,4 @@
#!/bin/bash
set -eux -o pipefail
KUSTOMIZE_VERSION=4.1.2 "$(dirname $0)/../install.sh" helm2-linux jq-linux kustomize-linux protoc-linux swagger-linux
KUSTOMIZE_VERSION=4.2.0 "$(dirname $0)/../install.sh" helm2-linux jq-linux kustomize-linux protoc-linux swagger-linux

View File

@@ -5,7 +5,7 @@ set -eux -o pipefail
export TARGET_FILE=helm-v${helm2_version}-linux-${ARCHITECTURE}.tar.gz
[ -e ${DOWNLOADS}/${TARGET_FILE} ] || curl -sLf --retry 3 -o ${DOWNLOADS}/${TARGET_FILE} https://storage.googleapis.com/kubernetes-helm/helm-v${helm2_version}-linux-$ARCHITECTURE.tar.gz
[ -e ${DOWNLOADS}/${TARGET_FILE} ] || curl -sLf --retry 3 -o ${DOWNLOADS}/${TARGET_FILE} https://get.helm.sh/helm-v${helm2_version}-linux-$ARCHITECTURE.tar.gz
$(dirname $0)/compare-chksum.sh
mkdir -p /tmp/helm2 && tar -C /tmp/helm2 -xf $DOWNLOADS/${TARGET_FILE}
sudo install -m 0755 /tmp/helm2/linux-$ARCHITECTURE/helm $BIN/helm2

View File

@@ -14,6 +14,6 @@ jq_version=1.6
ksonnet_version=0.13.1
kubectl_version=1.17.8
kubectx_version=0.6.3
kustomize4_version=4.1.2
kustomize4_version=4.2.0
protoc_version=3.7.1
swagger_version=0.19.0

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: latest
newTag: v2.1.0-rc3
resources:
- ./application-controller
- ./dex

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../crds
- ../cluster-rbac
- ../base/config
- ../base/application-controller
- ../base/repo-server

View File

@@ -11,7 +11,7 @@ patchesStrategicMerge:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: latest
newTag: v2.1.0-rc3
resources:
- ../../base/application-controller
- ../../base/dex

View File

@@ -3684,7 +3684,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -3895,7 +3895,7 @@ spec:
key: reposerver.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -4138,7 +4138,7 @@ spec:
key: server.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -4334,7 +4334,7 @@ spec:
key: controller.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -1071,7 +1071,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -1282,7 +1282,7 @@ spec:
key: reposerver.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1525,7 +1525,7 @@ spec:
key: server.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -1721,7 +1721,7 @@ spec:
key: controller.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -3049,7 +3049,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -3224,7 +3224,7 @@ spec:
key: reposerver.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -3463,7 +3463,7 @@ spec:
key: server.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3653,7 +3653,7 @@ spec:
key: controller.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -436,7 +436,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
name: copyutil
volumeMounts:
@@ -611,7 +611,7 @@ spec:
key: reposerver.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -850,7 +850,7 @@ spec:
key: server.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -1040,7 +1040,7 @@ spec:
key: controller.default.cache.expiration
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v2.1.0-rc3
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -109,7 +109,8 @@ nav:
- Command Reference: user-guide/commands/argocd.md
- Developer Guide:
- developer-guide/index.md
- developer-guide/contributing.md
- Code Contribution Guide: developer-guide/code-contributions.md
- Toolchain Guide: developer-guide/toolchain-guide.md
- developer-guide/release-process-and-cadence.md
- developer-guide/running-locally.md
- developer-guide/debugging-remote-environment.md

View File

@@ -47,6 +47,7 @@ import (
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/env"
grpc_util "github.com/argoproj/argo-cd/v2/util/grpc"
http_util "github.com/argoproj/argo-cd/v2/util/http"
argoio "github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/kube"
"github.com/argoproj/argo-cd/v2/util/localconfig"
@@ -354,16 +355,29 @@ func (c *client) HTTPClient() (*http.Client, error) {
if err != nil {
return nil, err
}
headers, err := parseHeaders(c.Headers)
if err != nil {
return nil, err
}
if c.UserAgent != "" {
headers.Set("User-Agent", c.UserAgent)
}
return &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
Transport: &http_util.TransportWithHeader{
RoundTripper: &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
Header: headers,
},
}, nil
}
@@ -508,11 +522,14 @@ func (c *client) newConn() (*grpc.ClientConn, io.Closer, error) {
ctx := context.Background()
for _, kv := range c.Headers {
if len(strings.Split(kv, ":"))%2 == 1 {
return nil, nil, fmt.Errorf("additional headers must be colon(:)-separated: %s", kv)
headers, err := parseHeaders(c.Headers)
if err != nil {
return nil, nil, err
}
for k, vs := range headers {
for _, v := range vs {
ctx = metadata.AppendToOutgoingContext(ctx, k, v)
}
ctx = metadata.AppendToOutgoingContext(ctx, strings.Split(kv, ":")[0], strings.Split(kv, ":")[1])
}
if c.UserAgent != "" {
@@ -796,3 +813,15 @@ func isCanceledContextErr(err error) bool {
}
return false
}
func parseHeaders(headerStrings []string) (http.Header, error) {
headers := http.Header{}
for _, kv := range headerStrings {
items := strings.Split(kv, ":")
if len(items)%2 == 1 {
return nil, fmt.Errorf("additional headers must be colon(:)-separated: %s", kv)
}
headers.Add(items[0], items[1])
}
return headers, nil
}

View File

@@ -1607,7 +1607,7 @@ type OrphanedResourceKey struct {
// IsWarn returns true if warnings are enabled for orphan resources monitoring
func (s *OrphanedResourcesMonitorSettings) IsWarn() bool {
return s.Warn == nil || *s.Warn
return s.Warn != nil && *s.Warn
}
// SignatureKey is the specification of a key required to verify commit signatures with

View File

@@ -2228,3 +2228,14 @@ func TestRemoveEnvEntry(t *testing.T) {
assert.EqualError(t, err, `unable to find env variable with key "key" for plugin "test"`)
})
}
func TestOrphanedResourcesMonitorSettings_IsWarn(t *testing.T) {
settings := OrphanedResourcesMonitorSettings{}
assert.False(t, settings.IsWarn())
settings.Warn = pointer.BoolPtr(false)
assert.False(t, settings.IsWarn())
settings.Warn = pointer.BoolPtr(true)
assert.True(t, settings.IsWarn())
}

View File

@@ -46,8 +46,8 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client))
var repoCacheExpiration time.Duration
var revisionCacheExpiration time.Duration
cmd.Flags().DurationVar(&repoCacheExpiration, "repo-cache-expiration", env.ParseDurationFromEnv("ARGOCD_REPO_CACHE_EXPIRATION", 24*time.Hour, 0, math.MaxInt32), "Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data")
cmd.Flags().DurationVar(&revisionCacheExpiration, "revision-cache-expiration", env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", 3*time.Minute, 0, math.MaxInt32), "Cache expiration for cached revision")
cmd.Flags().DurationVar(&repoCacheExpiration, "repo-cache-expiration", env.ParseDurationFromEnv("ARGOCD_REPO_CACHE_EXPIRATION", 24*time.Hour, 0, math.MaxInt64), "Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data")
cmd.Flags().DurationVar(&revisionCacheExpiration, "revision-cache-expiration", env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", 3*time.Minute, 0, math.MaxInt64), "Cache expiration for cached revision")
repoFactory := cacheutil.AddCacheFlagsToCmd(cmd, opts...)
@@ -152,9 +152,9 @@ func (c *Cache) GetManifests(revision string, appSrc *appv1.ApplicationSource, c
return fmt.Errorf("Unable to generate hash value: %s", err)
}
// If the expected hash of the cache entry does not match the actual hash value...
if hash != res.CacheEntryHash {
log.Warnf("Manifest hash did not match expected value, treating as a cache miss: %s", appName)
// If cached result does not have manifests or the expected hash of the cache entry does not match the actual hash value...
if hash != res.CacheEntryHash || res.ManifestResponse == nil && res.MostRecentError == "" {
log.Warnf("Manifest hash did not match expected value or cached manifests response is empty, treating as a cache miss: %s", appName)
err = c.DeleteManifests(revision, appSrc, clusterInfo, namespace, appLabelKey, appName)
if err != nil {

View File

@@ -168,6 +168,22 @@ func TestGenerateManifests_K8SAPIResetCache(t *testing.T) {
assert.True(t, len(res.Manifests) > 1)
}
func TestGenerateManifests_EmptyCache(t *testing.T) {
service := newService("../..")
src := argoappv1.ApplicationSource{Path: "manifests/base"}
q := apiclient.ManifestRequest{
Repo: &argoappv1.Repository{}, ApplicationSource: &src,
}
err := service.cache.SetManifests(mock.Anything, &src, &q, "", "", "", &cache.CachedManifestResponse{ManifestResponse: nil})
assert.NoError(t, err)
res, err := service.GenerateManifest(context.Background(), &q)
assert.NoError(t, err)
assert.True(t, len(res.Manifests) > 0)
}
// ensure we can use a semver constraint range (>= 1.0.0) and get back the correct chart (1.0.0)
func TestHelmManifestFromChartRepo(t *testing.T) {
service := newService(".")

View File

@@ -41,9 +41,9 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client))
var oidcCacheExpiration time.Duration
var loginAttemptsExpiration time.Duration
cmd.Flags().DurationVar(&connectionStatusCacheExpiration, "connection-status-cache-expiration", env.ParseDurationFromEnv("ARGOCD_SERVER_CONNECTION_STATUS_CACHE_EXPIRATION", 1*time.Hour, 0, math.MaxInt32), "Cache expiration for cluster/repo connection status")
cmd.Flags().DurationVar(&oidcCacheExpiration, "oidc-cache-expiration", env.ParseDurationFromEnv("ARGOCD_SERVER_OIDC_CACHE_EXPIRATION", 3*time.Minute, 0, math.MaxInt32), "Cache expiration for OIDC state")
cmd.Flags().DurationVar(&loginAttemptsExpiration, "login-attempts-expiration", env.ParseDurationFromEnv("ARGOCD_SERVER_LOGIN_ATTEMPTS_EXPIRATION", 24*time.Hour, 0, math.MaxInt32), "Cache expiration for failed login attempts")
cmd.Flags().DurationVar(&connectionStatusCacheExpiration, "connection-status-cache-expiration", env.ParseDurationFromEnv("ARGOCD_SERVER_CONNECTION_STATUS_CACHE_EXPIRATION", 1*time.Hour, 0, math.MaxInt64), "Cache expiration for cluster/repo connection status")
cmd.Flags().DurationVar(&oidcCacheExpiration, "oidc-cache-expiration", env.ParseDurationFromEnv("ARGOCD_SERVER_OIDC_CACHE_EXPIRATION", 3*time.Minute, 0, math.MaxInt64), "Cache expiration for OIDC state")
cmd.Flags().DurationVar(&loginAttemptsExpiration, "login-attempts-expiration", env.ParseDurationFromEnv("ARGOCD_SERVER_LOGIN_ATTEMPTS_EXPIRATION", 24*time.Hour, 0, math.MaxInt64), "Cache expiration for failed login attempts")
fn := appstatecache.AddCacheFlagsToCmd(cmd, opts...)

View File

@@ -67,7 +67,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if argoURL == "" {
// golang does not provide any easy way to determine scheme of current request
// so redirecting ot http which will auto-redirect too https if necessary
argoURL = fmt.Sprintf("http://%s", r.Host) + strings.TrimRight(strings.TrimLeft(h.rootPath, "/"), "/")
host := strings.TrimRight(r.Host, "/")
argoURL = fmt.Sprintf("http://%s", host) + "/" + strings.TrimRight(strings.TrimLeft(h.rootPath, "/"), "/")
}
logoutRedirectURL := strings.TrimRight(strings.TrimLeft(argoURL, "/"), "/")

View File

@@ -35,7 +35,7 @@ var (
nonOidcToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE2MDU1NzQyMTIsImlzcyI6ImFyZ29jZCIsIm5iZiI6MTYwNTU3NDIxMiwic3ViIjoiYWRtaW4ifQ.zDJ4piwWnwsHON-oPusHMXWINlnrRDTQykYogT7afeE"
expectedNonOIDCLogoutURL = "http://localhost:4000"
expectedOIDCLogoutURL = "https://dev-5695098.okta.com/oauth2/v1/logout?id_token_hint=" + oidcToken + "&post_logout_redirect_uri=" + baseURL
expectedOIDCLogoutURLWithRootPath = "https://dev-5695098.okta.com/oauth2/v1/logout?id_token_hint=" + oidcToken + "&post_logout_redirect_uri=" + baseURL + rootPath
expectedOIDCLogoutURLWithRootPath = "https://dev-5695098.okta.com/oauth2/v1/logout?id_token_hint=" + oidcToken + "&post_logout_redirect_uri=" + baseURL + "/" + rootPath
)
func TestConstructLogoutURL(t *testing.T) {

View File

@@ -66,7 +66,7 @@ func TestProjectCreation(t *testing.T) {
assert.Equal(t, "https://github.com/argoproj/argo-cd.git", proj.Spec.SourceRepos[0])
assert.NotNil(t, proj.Spec.OrphanedResources)
assert.True(t, proj.Spec.OrphanedResources.IsWarn())
assert.False(t, proj.Spec.OrphanedResources.IsWarn())
assertProjHasEvent(t, proj, "create", argo.EventReasonResourceCreated)

View File

@@ -38,12 +38,13 @@ $header: 120px;
position: fixed;
top: $header + 100px;
right: 60px;
z-index: 1;
}
&__refreshing-label {
position: fixed;
top: $header + 100px;
left: 60px;
left: 310px;
background-color: $argo-color-gray-4;
border: 1px solid $argo-color-gray-5;
border-radius: 5px;
@@ -191,16 +192,12 @@ $header: 120px;
line-height: 1.5em;
}
@include breakpoint(xxlarge up) {
.filters-group {
position: fixed;
padding-right: 1.5em;
max-height: calc(100vh - 200px);
overflow: hidden;
}
.filters-group:hover {
overflow-y: auto;
.filters-group__panel {
top: 230px;
}
@include breakpoint(large down) {
.filters-group__panel {
top: 280px;
}
}
}

View File

@@ -9,7 +9,6 @@ import {delay, filter, map, mergeMap, repeat, retryWhen} from 'rxjs/operators';
import {DataLoader, EmptyState, ErrorNotification, ObservableQuery, Page, Paginate, Revision, Timestamp} from '../../../shared/components';
import {AppContext, ContextApis} from '../../../shared/context';
import * as appModels from '../../../shared/models';
import {ApplicationTree} from '../../../shared/models';
import {AppDetailsPreferences, AppsDetailsViewType, services} from '../../../shared/services';
import {ApplicationConditions} from '../application-conditions/application-conditions';
@@ -36,8 +35,6 @@ interface FilterInput {
health: string[];
sync: string[];
namespace: string[];
createdWithin: number[]; // number of minutes the resource must be created within
ownership: string[];
}
export const NodeInfo = (node?: string): {key: string; container: number} => {
@@ -145,7 +142,7 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
const filteredRes = application.status.resources.concat(orphaned).filter(res => {
const resNode: ResourceTreeNode = {...res, root: null, info: null, parentRefs: [], resourceVersion: '', uid: ''};
resNode.root = resNode;
return this.filterTreeNode(tree, resNode, treeFilter);
return this.filterTreeNode(resNode, treeFilter);
});
return (
@@ -154,7 +151,7 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
title='Application Details'
toolbar={{
breadcrumbs: [{title: 'Applications', path: '/applications'}, {title: this.props.match.params.name}],
actionMenu: {items: this.getApplicationActionMenu(application)},
actionMenu: {items: this.getApplicationActionMenu(application, true)},
tools: (
<React.Fragment key='app-list-tools'>
<div className='application-details__view-type'>
@@ -219,28 +216,23 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
</div>
)}
{((pref.view === 'tree' || pref.view === 'network') && (
<div className='row'>
<div className='columns small-12 xxlarge-2'>
<Filters pref={pref} tree={tree} onSetFilter={setFilter} onClearFilter={clearFilter} />
</div>
<div className='columns small-12 xxlarge-10'>
<ApplicationResourceTree
nodeFilter={node => this.filterTreeNode(tree, node, treeFilter)}
selectedNodeFullName={this.selectedNodeKey}
onNodeClick={fullName => this.selectNode(fullName)}
nodeMenu={node =>
AppUtils.renderResourceMenu(node, application, tree, this.appContext, this.appChanged, () =>
this.getApplicationActionMenu(application)
)
}
tree={tree}
app={application}
showOrphanedResources={pref.orphanedResources}
useNetworkingHierarchy={pref.view === 'network'}
onClearFilter={clearFilter}
/>
</div>
</div>
<Filters pref={pref} tree={tree} onSetFilter={setFilter} onClearFilter={clearFilter}>
<ApplicationResourceTree
nodeFilter={node => this.filterTreeNode(node, treeFilter)}
selectedNodeFullName={this.selectedNodeKey}
onNodeClick={fullName => this.selectNode(fullName)}
nodeMenu={node =>
AppUtils.renderResourceMenu(node, application, tree, this.appContext, this.appChanged, () =>
this.getApplicationActionMenu(application, false)
)
}
tree={tree}
app={application}
showOrphanedResources={pref.orphanedResources}
useNetworkingHierarchy={pref.view === 'network'}
onClearFilter={clearFilter}
/>
</Filters>
)) ||
(pref.view === 'pods' && (
<PodView
@@ -249,7 +241,7 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
onItemClick={fullName => this.selectNode(fullName)}
nodeMenu={node =>
AppUtils.renderResourceMenu(node, application, tree, this.appContext, this.appChanged, () =>
this.getApplicationActionMenu(application)
this.getApplicationActionMenu(application, false)
)
}
/>
@@ -272,7 +264,7 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
tree,
this.appContext,
this.appChanged,
() => this.getApplicationActionMenu(application)
() => this.getApplicationActionMenu(application, false)
)
}
/>
@@ -376,48 +368,49 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
);
}
private getApplicationActionMenu(app: appModels.Application) {
private getApplicationActionMenu(app: appModels.Application, needOverlapLabelOnNarrowScreen: boolean) {
const refreshing = app.metadata.annotations && app.metadata.annotations[appModels.AnnotationRefreshKey];
const fullName = AppUtils.nodeKey({group: 'argoproj.io', kind: app.kind, name: app.metadata.name, namespace: app.metadata.namespace});
const ActionMenuItem = (prop: {actionLabel: string}) => <span className={needOverlapLabelOnNarrowScreen ? 'show-for-large' : ''}>{prop.actionLabel}</span>;
return [
{
iconClassName: 'fa fa-info-circle',
title: <span className='show-for-large'>App Details</span>,
title: <ActionMenuItem actionLabel='App Details' />,
action: () => this.selectNode(fullName)
},
{
iconClassName: 'fa fa-file-medical',
title: <span className='show-for-large'>App Diff</span>,
title: <ActionMenuItem actionLabel='App Diff' />,
action: () => this.selectNode(fullName, 0, 'diff'),
disabled: app.status.sync.status === appModels.SyncStatuses.Synced
},
{
iconClassName: 'fa fa-sync',
title: <span className='show-for-large'>Sync</span>,
title: <ActionMenuItem actionLabel='Sync' />,
action: () => AppUtils.showDeploy('all', this.appContext)
},
{
iconClassName: 'fa fa-info-circle',
title: <span className='show-for-large'>Sync Status</span>,
title: <ActionMenuItem actionLabel='Sync Status' />,
action: () => this.setOperationStatusVisible(true),
disabled: !app.status.operationState
},
{
iconClassName: 'fa fa-history',
title: <span className='show-for-large'>History and rollback</span>,
title: <ActionMenuItem actionLabel='History and rollback' />,
action: () => this.setRollbackPanelVisible(0),
disabled: !app.status.operationState
},
{
iconClassName: 'fa fa-times-circle',
title: <span className='show-for-large'>Delete</span>,
title: <ActionMenuItem actionLabel='Delete' />,
action: () => this.deleteApplication()
},
{
iconClassName: classNames('fa fa-redo', {'status-icon--spin': !!refreshing}),
title: (
<React.Fragment>
<span className='show-for-large'>Refresh</span>{' '}
<ActionMenuItem actionLabel='Refresh' />{' '}
<DropDownMenu
items={[
{
@@ -441,42 +434,20 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
];
}
private filterTreeNode(tree: ApplicationTree, node: ResourceTreeNode, filterInput: FilterInput, ownership?: string): boolean {
private filterTreeNode(node: ResourceTreeNode, filterInput: FilterInput): boolean {
const syncStatuses = filterInput.sync.map(item => (item === 'OutOfSync' ? ['OutOfSync', 'Unknown'] : [item])).reduce((first, second) => first.concat(second), []);
const minutesAgo = (m: number) => {
const d = new Date();
d.setTime(d.getTime() - m * 60000);
return d;
};
const createdAt = new Date(node.createdAt); // will be falsely if the node has not been created, and so will not appear
const createdWithin = (n: number) => createdAt.getTime() > minutesAgo(n).getTime();
const root = node.root || ({} as ResourceTreeNode);
const hook = root && root.hook;
if (
(filterInput.kind.length === 0 || filterInput.kind.indexOf(node.kind) > -1) &&
(syncStatuses.length === 0 || hook || (root.status && syncStatuses.indexOf(root.status) > -1)) &&
(filterInput.health.length === 0 || hook || (root.health && filterInput.health.indexOf(root.health.status) > -1)) &&
(filterInput.namespace.length === 0 || filterInput.namespace.includes(node.namespace)) &&
(filterInput.createdWithin.length === 0 || !!filterInput.createdWithin.find(v => createdWithin(v)))
(filterInput.namespace.length === 0 || filterInput.namespace.includes(node.namespace))
) {
return true;
}
if (filterInput.ownership.includes('Owned') && ownership !== 'Owners') {
const owned = tree.nodes.filter(n => (node.parentRefs || []).find(r => r.uid === n.uid));
if (owned.find(n => this.filterTreeNode(tree, n, filterInput, 'Owned'))) {
return true;
}
}
if (filterInput.ownership.includes('Owners') && ownership !== 'Owned') {
const owners = tree.nodes.filter(n => (n.parentRefs || []).find(r => r.uid === node.uid));
if (owners.find(n => this.filterTreeNode(tree, n, filterInput, 'Owners'))) {
return true;
}
}
return false;
}
@@ -551,8 +522,6 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
const health = new Array<string>();
const sync = new Array<string>();
const namespace = new Array<string>();
const createdWithin = new Array<number>();
const ownership = new Array<string>();
for (const item of filterInput || []) {
const [type, val] = item.split(':');
switch (type) {
@@ -568,15 +537,9 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{nam
case 'namespace':
namespace.push(val);
break;
case 'createdWithin':
createdWithin.push(parseInt(val, 10));
break;
case 'ownership':
ownership.push(val);
break;
}
}
return {kind, health, sync, namespace, createdWithin, ownership};
return {kind, health, sync, namespace};
}
private setOperationStatusVisible(isVisible: boolean) {

View File

@@ -10,7 +10,13 @@ function toOption(label: string) {
return {label};
}
export const Filters = (props: {pref: AppDetailsPreferences; tree: ApplicationTree; onSetFilter: (items: string[]) => void; onClearFilter: () => void}) => {
export const Filters = (props: {
children?: React.ReactNode;
pref: AppDetailsPreferences;
tree: ApplicationTree;
onSetFilter: (items: string[]) => void;
onClearFilter: () => void;
}) => {
const {pref, tree, onSetFilter} = props;
const onClearFilter = () => {
@@ -51,19 +57,11 @@ export const Filters = (props: {pref: AppDetailsPreferences; tree: ApplicationTr
onSetFilter(strings);
};
const ResourceFilter = (p: {label: string; prefix: string; options: {label: string}[]; field?: boolean; radio?: boolean; wrap?: boolean}) => {
const ResourceFilter = (p: {label: string; prefix: string; options: {label: string}[]; field?: boolean; radio?: boolean}) => {
return loading ? (
<div>Loading...</div>
) : (
<Filter
label={p.label}
selected={selectedFor(p.prefix)}
setSelected={v => setFilters(p.prefix, v)}
options={p.options}
field={!!p.field}
radio={!!p.radio}
wrap={!!p.wrap}
/>
<Filter label={p.label} selected={selectedFor(p.prefix)} setSelected={v => setFilters(p.prefix, v)} options={p.options} field={!!p.field} radio={!!p.radio} />
);
};
@@ -78,6 +76,7 @@ export const Filters = (props: {pref: AppDetailsPreferences; tree: ApplicationTr
.sort();
const namespaces = tree.nodes
.map(x => x.namespace)
.filter(x => !!x)
.concat(alreadyFilteredOn('namespace'))
.filter(uniq)
.sort();
@@ -87,18 +86,16 @@ export const Filters = (props: {pref: AppDetailsPreferences; tree: ApplicationTr
};
return (
<FiltersGroup appliedFilter={pref.resourceFilter} onClearFilter={onClearFilter} setShown={setShown} shown={shown}>
<div className='filters-container__text-filters'>
{ResourceFilter({label: 'KINDS', prefix: 'kind', options: kinds.map(toOption), field: true})}
{ResourceFilter({
label: 'SYNC STATUS',
prefix: 'sync',
options: ['Synced', 'OutOfSync'].map(label => ({
label,
icon: <ComparisonStatusIcon status={label as SyncStatusCode} noSpin={true} />
}))
})}
</div>
<FiltersGroup content={props.children} appliedFilter={pref.resourceFilter} onClearFilter={onClearFilter} setShown={setShown} expanded={shown}>
{ResourceFilter({label: 'KINDS', prefix: 'kind', options: kinds.map(toOption), field: true})}
{ResourceFilter({
label: 'SYNC STATUS',
prefix: 'sync',
options: ['Synced', 'OutOfSync'].map(label => ({
label,
icon: <ComparisonStatusIcon status={label as SyncStatusCode} noSpin={true} />
}))
})}
{ResourceFilter({
label: 'HEALTH STATUS',
prefix: 'health',
@@ -107,12 +104,7 @@ export const Filters = (props: {pref: AppDetailsPreferences; tree: ApplicationTr
icon: <HealthStatusIcon state={{status: label as HealthStatusCode, message: ''}} noSpin={true} />
}))
})}
<div className='filters-container__subgroup'>
{namespaces.length > 1 &&
ResourceFilter({label: 'NAMESPACES', prefix: 'namespace', options: (namespaces || []).filter(l => l && l !== '').map(toOption), field: true})}
{ResourceFilter({label: 'OWNERSHIP', prefix: 'ownership', wrap: true, options: ['Owners', 'Owned'].map(toOption)})}
{ResourceFilter({label: 'AGE', prefix: 'createdWithin', options: ['1m', '3m', '5m', '15m', '60m'].map(toOption), radio: true, wrap: true})}
</div>
{namespaces.length > 1 && ResourceFilter({label: 'NAMESPACES', prefix: 'namespace', options: (namespaces || []).filter(l => l && l !== '').map(toOption), field: true})}
</FiltersGroup>
);
};

View File

@@ -304,9 +304,7 @@ export class PodView extends React.Component<PodViewProps> {
}
const statusByKey = new Map<string, ResourceStatus>();
if (this.props.app) {
this.props.app.status.resources.forEach(res => statusByKey.set(nodeKey(res), res));
}
this.props.app.status?.resources?.forEach(res => statusByKey.set(nodeKey(res), res));
(tree.nodes || []).forEach((rnode: ResourceTreeNode) => {
// make sure each node has not null/undefined parentRefs field
rnode.parentRefs = rnode.parentRefs || [];

View File

@@ -433,12 +433,15 @@ export const ApplicationResourceTree = (props: ApplicationResourceTreeProps) =>
} else {
// Tree view
const managedKeys = new Set(props.app.status.resources.map(nodeKey));
const orphanedKeys = new Set(props.tree.orphanedNodes?.map(nodeKey));
const orphans: ResourceTreeNode[] = [];
nodes.forEach(node => {
if ((node.parentRefs || []).length === 0 || managedKeys.has(nodeKey(node))) {
roots.push(node);
} else {
orphans.push(node);
if (orphanedKeys.has(nodeKey(node))) {
orphans.push(node);
}
node.parentRefs.forEach(parent => {
const children = childrenByParentKey.get(treeNodeKey(parent)) || [];
children.push(node);

View File

@@ -23,7 +23,7 @@ export const ApplicationResourcesDiff = (props: ApplicationResourcesDiffProps) =
b: state.predictedLiveState ? jsYaml.safeDump(state.predictedLiveState, {indent: 2}) : '',
hook: state.hook,
// doubles as sort order
name: (state.group || '') + '/' + state.kind + '/' + state.namespace + '/' + state.name
name: (state.group || '') + '/' + state.kind + '/' + (state.namespace ? state.namespace + '/' : '') + state.name
};
})
.filter(i => !i.hook)

View File

@@ -18,4 +18,15 @@
.fa-exclamation-triangle {
color: $argo-status-warning-color;
}
&__warning {
border-left: 5px solid $argo-failed-color-light;
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
margin-top: 1em;
padding-left: 1em;
background: white;
box-shadow: 1px 2px 3px $argo-color-gray-5;
color: $argo-color-gray-6;
}
}

View File

@@ -5,7 +5,7 @@ import * as ReactForm from 'react-form';
require('./application-sync-options.scss');
const REPLACE_WARNING = `The resource will be synced using 'kubectl replace/create' command that is a potentially destructive action.`;
export const REPLACE_WARNING = `The resources will be synced using 'kubectl replace/create' command that is a potentially destructive action and might cause resources recreation.`;
export interface ApplicationSyncOptionProps {
options: string[];
@@ -59,9 +59,12 @@ function booleanOption(name: string, label: string, defaultVal: boolean, props:
/>
<label htmlFor={`sync-option-${name}`}>{label}</label>{' '}
{warning && (
<Tooltip content={warning}>
<i className='fa fa-exclamation-triangle' />
</Tooltip>
<>
<Tooltip content={warning}>
<i className='fa fa-exclamation-triangle' />
</Tooltip>
{checked && <div className='application-sync-options__warning'>{warning}</div>}
</>
)}
</React.Fragment>
);
@@ -86,7 +89,6 @@ const syncOptions: Array<(props: ApplicationSyncOptionProps) => React.ReactNode>
props => booleanOption('CreateNamespace', 'Auto-Create Namespace', false, props, false),
props => booleanOption('PruneLast', 'Prune Last', false, props, false),
props => booleanOption('ApplyOutOfSyncOnly', 'Apply Out of Sync Only', false, props, false),
props => booleanOption('Replace', 'Replace', false, props, false, REPLACE_WARNING),
props => selectOption('PrunePropagationPolicy', 'Prune Propagation Policy', 'foreground', ['foreground', 'background', 'orphan'], props)
];
@@ -104,6 +106,9 @@ export const ApplicationSyncOptions = (props: ApplicationSyncOptionProps) => (
{render(props)}
</div>
))}
<div className='small-12' style={optionStyle}>
{booleanOption('Replace', 'Replace', false, props, false, REPLACE_WARNING)}
</div>
</div>
);

View File

@@ -2,11 +2,11 @@ import {ErrorNotification, FormField, NotificationType, SlidingPanel} from 'argo
import * as React from 'react';
import {Form, FormApi, Text} from 'react-form';
import {CheckboxField, Spinner} from '../../../shared/components';
import {ARGO_WARNING_COLOR, CheckboxField, Spinner} from '../../../shared/components';
import {Consumer} from '../../../shared/context';
import * as models from '../../../shared/models';
import {services} from '../../../shared/services';
import {ApplicationManualSyncFlags, ApplicationSyncOptions, SyncFlags} from '../application-sync-options/application-sync-options';
import {ApplicationManualSyncFlags, ApplicationSyncOptions, SyncFlags, REPLACE_WARNING} from '../application-sync-options/application-sync-options';
import {ComparisonStatusIcon, nodeKey} from '../utils';
require('./application-sync-panel.scss');
@@ -48,7 +48,7 @@ export const ApplicationSyncPanel = ({application, selectedResource, hide}: {app
defaultValues={{
revision: application.spec.source.targetRevision || 'HEAD',
resources: appResources.map((_, i) => i === syncResIndex || syncResIndex === -1),
syncOptions: application.spec.syncPolicy ? application.spec.syncPolicy.syncOptions : ''
syncOptions: application.spec.syncPolicy ? application.spec.syncPolicy.syncOptions : []
}}
validateError={values => ({
resources: values.resources.every((item: boolean) => !item) && 'Select at least one resource'
@@ -60,6 +60,19 @@ export const ApplicationSyncPanel = ({application, selectedResource, hide}: {app
resources = null;
}
const replace = params.syncOptions?.findIndex((opt: string) => opt === 'Replace=true') > -1;
if (replace) {
const confirmed = await ctx.popup.confirm('Synchronize using replace?', () => (
<div>
<i className='fa fa-exclamation-triangle' style={{color: ARGO_WARNING_COLOR}} /> {REPLACE_WARNING} Are you sure you want to continue?
</div>
));
if (!confirmed) {
setPending(false);
return;
}
}
const syncFlags = {...params.syncFlags} as SyncFlags;
if (syncFlags.ApplyOnly) {

View File

@@ -32,11 +32,16 @@ export function getFilterResults(applications: Application[], pref: AppsListPref
namespaces: pref.namespacesFilter.length === 0 || pref.namespacesFilter.some(ns => app.spec.destination.namespace && minimatch(app.spec.destination.namespace, ns)),
clusters:
pref.clustersFilter.length === 0 ||
pref.clustersFilter.some(
selector =>
(app.spec.destination.server && selector.includes(app.spec.destination.server)) ||
(app.spec.destination.name && selector.includes(app.spec.destination.name))
),
pref.clustersFilter.some(filterString => {
const match = filterString.match('^(.*) [(](http.*)[)]$');
if (match?.length === 3) {
const [, name, url] = match;
return url === app.spec.destination.server || name === app.spec.destination.name;
} else {
const inputMatch = filterString.match('^http.*$');
return (inputMatch && inputMatch[0] === app.spec.destination.server) || (app.spec.destination.name && minimatch(app.spec.destination.name, filterString));
}
}),
labels: pref.labelsFilter.length === 0 || pref.labelsFilter.every(selector => LabelSelector.match(selector, app.metadata.labels))
}
}));
@@ -54,6 +59,7 @@ interface AppFilterProps {
apps: FilteredApp[];
pref: AppsListPreferences;
onChange: (newPrefs: AppsListPreferences) => void;
children?: React.ReactNode;
}
const getCounts = (apps: FilteredApp[], filterType: keyof FilterResult, filter: (app: Application) => string, init?: string[]) => {
@@ -213,15 +219,13 @@ export const ApplicationsFilter = (props: AppFilterProps) => {
};
return (
<FiltersGroup setShown={setShown} shown={!props.pref.hideFilters}>
<FiltersGroup setShown={setShown} expanded={!props.pref.hideFilters} content={props.children}>
<SyncFilter {...props} />
<HealthFilter {...props} />
<div className='filters-container__subgroup'>
<LabelsFilter {...props} />
<ProjectFilter {...props} />
<ClusterFilter {...props} />
<NamespaceFilter {...props} />
</div>
<LabelsFilter {...props} />
<ProjectFilter {...props} />
<ClusterFilter {...props} />
<NamespaceFilter {...props} />
</FiltersGroup>
);
};

View File

@@ -169,4 +169,13 @@
display: inline-block;
width: 28px;
}
.filters-group__panel {
top: 120px;
}
@include breakpoint(medium down) {
.filters-group__panel {
top: 200px;
}
}
}

View File

@@ -378,31 +378,20 @@ export const ApplicationsList = (props: RouteComponentProps<{}>) => {
<ViewPref>
{pref => {
const {filteredApps, filterResults} = filterApps(applications, pref, pref.search);
return applications.length === 0 && (pref.labelsFilter || []).length === 0 ? (
<EmptyState icon='argo-icon-application'>
<h4>No applications yet</h4>
<h5>Create new application to start managing resources in your cluster</h5>
<button
qe-id='applications-list-button-create-application'
className='argo-button argo-button--base'
onClick={() => ctx.navigation.goto('.', {new: JSON.stringify({})})}>
Create application
</button>
</EmptyState>
) : (
<div className='row'>
<div className='columns small-12 xxlarge-2'>
<ApplicationsFilter apps={filterResults} onChange={newPrefs => onFilterPrefChanged(ctx, newPrefs)} pref={pref} />
{syncAppsInput && (
<ApplicationsSyncPanel
key='syncsPanel'
show={syncAppsInput}
hide={() => ctx.navigation.goto('.', {syncApps: null})}
apps={filteredApps}
/>
)}
</div>
<div className='columns small-12 xxlarge-10'>
const appsView =
applications.length === 0 && (pref.labelsFilter || []).length === 0 ? (
<EmptyState icon='argo-icon-application'>
<h4>No applications yet</h4>
<h5>Create new application to start managing resources in your cluster</h5>
<button
qe-id='applications-list-button-create-application'
className='argo-button argo-button--base'
onClick={() => ctx.navigation.goto('.', {new: JSON.stringify({})})}>
Create application
</button>
</EmptyState>
) : (
<ApplicationsFilter apps={filterResults} onChange={newPrefs => onFilterPrefChanged(ctx, newPrefs)} pref={pref}>
{(pref.view === 'summary' && <ApplicationsSummary applications={filteredApps} />) || (
<Paginate
header={filteredApps.length > 1 && <ApplicationsStatusBar applications={filteredApps} />}
@@ -444,8 +433,18 @@ export const ApplicationsList = (props: RouteComponentProps<{}>) => {
}
</Paginate>
)}
</div>
</div>
</ApplicationsFilter>
);
return (
<>
{appsView}
<ApplicationsSyncPanel
key='syncsPanel'
show={syncAppsInput}
hide={() => ctx.navigation.goto('.', {syncApps: null})}
apps={filteredApps}
/>
</>
);
}}
</ViewPref>

View File

@@ -4,7 +4,7 @@
$height: 16px;
$border-width: 2px;
margin: 0px;
width: 80%;
width: 100%;
height: $height;
display: flex;
border-radius: 25px;

View File

@@ -217,7 +217,7 @@ export const ApplicationTiles = ({applications, syncApplication, refreshApplicat
refreshApplication(app.metadata.name);
}}>
<i className={classNames('fa fa-redo', {'status-icon--spin': AppUtils.isAppRefreshing(app)})} />{' '}
<span className='show-for-xlarge'>Refresh</span>
<span className='show-for-xxlarge'>Refresh</span>
</a>
&nbsp;
<a
@@ -227,7 +227,7 @@ export const ApplicationTiles = ({applications, syncApplication, refreshApplicat
e.stopPropagation();
deleteApplication(app.metadata.name);
}}>
<i className='fa fa-times-circle' /> Delete
<i className='fa fa-times-circle' /> <span className='show-for-xxlarge'>Delete</span>
</a>
</div>
</div>

View File

@@ -9,17 +9,17 @@
margin-bottom: 1em;
&__header {
display: flex;
align-items: center;
flex-wrap: wrap;
font-size: 13px;
color: $argo-color-gray-5;
font-weight: 500;
margin-bottom: 0.5em;
height: 28px;
position: relative;
}
&__collapse {
position: absolute;
right: 0;
cursor: pointer;
margin-left: auto;
color: $argo-color-gray-7;
@@ -43,70 +43,95 @@
&__loading {
text-align: center;
}
&--wrap {
overflow: auto;
.checkbox__item {
min-width: 4.5em;
float: left;
}
}
}
.filters-group {
&__container {
display: flex;
&__toggle {
display: none;
cursor: pointer;
position: absolute;
left: 16em;
color: $argo-color-gray-6;
}
.fa-filter {
float: left;
margin-right: 1em;
}
&__panel {
transition: width 0.2s ease-in-out;
background-color: $argo-color-gray-3;
box-shadow: 1px 1px 2px 0px rgba(0, 0, 0, 0.1);
padding: 10px;
overflow-y: hidden;
overflow-x: hidden;
z-index: 1;
position: fixed;
width: 240px;
margin-top: -18px;
left: 60px;
bottom: 0;
&__title {
margin-bottom: 1em;
font-size: 13px;
color: $argo-color-gray-6;
display: flex;
align-items: center;
width: 200px;
}
.filter {
margin-right: 15px;
height: max-content;
min-width: 200px;
max-width: 200px;
&:hover {
overflow-y: auto;
}
&__subgroup {
align-self: start;
display: flex;
flex-wrap: wrap;
.filter {
width: 205px;
transition: visibility 0.2s ease-in-out;
}
}
&__container--hidden {
display: none;
&__content {
padding-left: 230px;
}
}
@include breakpoint(xxlarge up) {
.filter {
width: 100%;
margin-right: 0;
@include breakpoint(xlarge down) {
.filters-group__toggle {
display: inline;
}
.filters-group {
&__container {
width: 260px;
flex-wrap: wrap;
padding-bottom: 6em;
.filters-group--expanded {
.filters-group__toggle {
color: $argo-color-gray-8;
}
}
&__title .action-button {
.filters-group__toggle:hover {
color: $argo-color-gray-7;
}
.filters-group:not(.filters-group--expanded) {
.filters-group__panel:not(.filters-group__panel:hover) {
width: 30px;
.filters-group__toggle {
display: none;
}
&__text-filters {
width: 100%;
&__title {
display: none;
}
.filter {
visibility: hidden;
}
}
&__container--hidden {
display: flex;
.filters-group__content {
padding-left: 30px;
}
}
}

View File

@@ -1,5 +1,5 @@
import {ActionButton, Autocomplete, CheckboxOption, CheckboxRow} from 'argo-ui/v2';
import classNames from 'classnames';
import {Autocomplete, CheckboxOption, CheckboxRow} from 'argo-ui/v2';
import * as classNames from 'classnames';
import * as React from 'react';
import './filter.scss';
@@ -15,24 +15,32 @@ interface FilterProps {
retry?: () => void;
loading?: boolean;
radio?: boolean;
wrap?: boolean;
}
export const FiltersGroup = (props: {children?: React.ReactNode; appliedFilter?: string[]; shown: boolean; setShown: (val: boolean) => void; onClearFilter?: () => void}) => {
export const FiltersGroup = (props: {
children?: React.ReactNode;
content: React.ReactNode;
appliedFilter?: string[];
expanded: boolean;
setShown: (val: boolean) => void;
onClearFilter?: () => void;
}) => {
return (
<div className='filters-group'>
<div className='filters-group__container__title'>
FILTERS <i className='fa fa-filter' />
{props.appliedFilter?.length > 0 && props.onClearFilter && (
<ActionButton label={'CLEAR ALL'} action={() => props.onClearFilter()} style={{marginLeft: 'auto', fontSize: '12px', lineHeight: '5px', display: 'block'}} />
)}
<ActionButton
label={!props.shown ? 'SHOW' : 'HIDE'}
action={() => props.setShown(!props.shown)}
style={{marginLeft: props.appliedFilter?.length > 0 ? '5px' : 'auto', fontSize: '12px', lineHeight: '5px'}}
/>
<div className={classNames('filters-group', {'filters-group--expanded': props.expanded})}>
<div className='filters-group__panel'>
<i className='fa fa-filter' />
<div className='filters-group__panel__title'>
FILTERS{' '}
{props.appliedFilter?.length > 0 && props.onClearFilter && (
<button onClick={() => props.onClearFilter()} className='argo-button argo-button--base argo-button--sm'>
CLEAR ALL
</button>
)}
<i className='fa fa-thumbtack filters-group__toggle' onClick={() => props.setShown(!props.expanded)} />
</div>
<>{props.children}</>
</div>
<div className={classNames('filters-group__container', {'filters-group__container--hidden': !props.shown})}>{props.children}</div>
<div className='filters-group__content'>{props.content}</div>
</div>
);
};
@@ -60,12 +68,19 @@ export const Filter = (props: FilterProps) => {
}
}, [values]);
React.useEffect(() => {
if (props.selected.length === 0) {
setValues({} as {[label: string]: boolean});
setInput('');
}
}, [props.selected.length]);
return (
<div className={classNames('filter', {'filter--wrap': props.wrap})}>
<div className='filter'>
<div className='filter__header'>
{props.label || 'FILTER'}
{(props.selected || []).length > 0 || (props.field && Object.keys(values).length > 0) ? (
<div
<button
className='argo-button argo-button--base argo-button--sm'
style={{marginLeft: 'auto'}}
onClick={() => {
@@ -73,7 +88,7 @@ export const Filter = (props: FilterProps) => {
setInput('');
}}>
<i className='fa fa-times-circle' /> CLEAR
</div>
</button>
) : (
<i className={`fa fa-caret-${collapsed ? 'down' : 'up'} filter__collapse`} onClick={() => setCollapsed(!collapsed)} />
)}

View File

@@ -324,6 +324,10 @@ export function renderResourceMenu(
} else {
const isRoot = resource.root && nodeKey(resource.root) === nodeKey(resource);
const items: MenuItem[] = [
{
title: 'Details',
action: () => appContext.apis.navigation.goto('.', {node: nodeKey(resource)})
},
...((isRoot && [
{
title: 'Sync',

View File

@@ -48,7 +48,7 @@ export const AccountDetails = (props: RouteComponentProps<{name: string}>) => {
<Form
onSubmit={async (params, event, api) => {
const expiresIn = convertExpiresInToSeconds(params.expiresIn);
const confirmed = await ctx.popup.confirm('Generate new token?', 'Are you sure you want to generate new token?');
const confirmed = await ctx.popup.confirm('Generate new token', 'Are you sure you want to generate a new token?');
if (!confirmed) {
return;
}

View File

@@ -1,9 +1,9 @@
const ARGO_SUCCESS_COLOR = '#18BE94';
const ARGO_WARNING_COLOR = '#f4c030';
const ARGO_FAILED_COLOR = '#E96D76';
const ARGO_RUNNING_COLOR = '#0DADEA';
const ARGO_GRAY4_COLOR = '#CCD6DD';
const ARGO_TERMINATING_COLOR = '#DE303D';
export const ARGO_SUCCESS_COLOR = '#18BE94';
export const ARGO_WARNING_COLOR = '#f4c030';
export const ARGO_FAILED_COLOR = '#E96D76';
export const ARGO_RUNNING_COLOR = '#0DADEA';
export const ARGO_GRAY4_COLOR = '#CCD6DD';
export const ARGO_TERMINATING_COLOR = '#DE303D';
export const COLORS = {
connection_status: {

View File

@@ -2,8 +2,6 @@
.paginate {
position: relative;
height: 25px;
@media (max-width: map-get($breakpoints, medium)) {
& {
@@ -23,11 +21,9 @@
}
&__paginator {
ul {
display: inline-block;
margin: 0;
padding: 0;
}
display: inline-block;
padding: 0;
margin-bottom: 0;
li {
display: inline-block;
@@ -50,15 +46,7 @@
}
&__size-menu {
display: flex;
width: 100%;
justify-content: space-between;
align-items: center;
position: absolute;
z-index: 1;
right: 0;
top: 50%;
transform: translateY(-50%);
margin-left: auto;
li {
display: block;
}

View File

@@ -1,6 +1,5 @@
import {DataLoader, DropDownMenu} from 'argo-ui';
// import {ApplicationStatusBar} from '../../../applications/components/applications-list/applications-status-bar';
import * as React from 'react';
import ReactPaginate from 'react-paginate';
import {services} from '../../services';
@@ -30,35 +29,37 @@ export function Paginate<T>({page, onPageChange, children, data, emptyState, pre
function paginator() {
return (
<React.Fragment>
{pageCount > 1 && (
<ReactPaginate
containerClassName='paginate__paginator'
forcePage={page}
pageCount={pageCount}
pageRangeDisplayed={5}
marginPagesDisplayed={2}
onPageChange={item => onPageChange(item.selected)}
/>
)}
<div className='paginate__size-menu'>
{header || <div />}
<DropDownMenu
anchor={() => (
<a>
Items per page: {pageSize === -1 ? 'all' : pageSize} <i className='fa fa-caret-down' />
</a>
)}
items={[5, 10, 15, 20, -1].map(count => ({
title: count === -1 ? 'all' : count.toString(),
action: () => {
pref.pageSizes[preferencesKey] = count;
services.viewPreferences.updatePreferences(pref);
}
}))}
/>
<div style={{marginBottom: '0.5em'}}>
<div style={{display: 'flex', alignItems: 'start', marginBottom: '0.5em'}}>
{pageCount > 1 && (
<ReactPaginate
containerClassName='paginate__paginator'
forcePage={page}
pageCount={pageCount}
pageRangeDisplayed={5}
marginPagesDisplayed={2}
onPageChange={item => onPageChange(item.selected)}
/>
)}
<div className='paginate__size-menu'>
<DropDownMenu
anchor={() => (
<a>
Items per page: {pageSize === -1 ? 'all' : pageSize} <i className='fa fa-caret-down' />
</a>
)}
items={[5, 10, 15, 20, -1].map(count => ({
title: count === -1 ? 'all' : count.toString(),
action: () => {
pref.pageSizes[preferencesKey] = count;
services.viewPreferences.updatePreferences(pref);
}
}))}
/>
</div>
</div>
</React.Fragment>
{header}
</div>
);
}

View File

@@ -1669,7 +1669,7 @@ are-we-there-yet@~1.1.2:
"argo-ui@git+https://github.com/argoproj/argo-ui.git":
version "1.0.0"
resolved "git+https://github.com/argoproj/argo-ui.git#e72b3655abe21c16a12ca82369d90209fa1f05d9"
resolved "git+https://github.com/argoproj/argo-ui.git#e81faeb971180151796ab5aeca8e8740ed4a3578"
dependencies:
"@fortawesome/fontawesome-free" "^5.8.1"
"@tippy.js/react" "^2.1.2"
@@ -8804,9 +8804,9 @@ tapable@^1.0.0, tapable@^1.1.3:
integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==
tar@^6.0.2:
version "6.1.0"
resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.0.tgz#d1724e9bcc04b977b18d5c573b333a2207229a83"
integrity sha512-DUCttfhsnLCjwoDoFcI+B2iJgYa93vBnDUATYEeRx6sntCTdN01VnqsIuTlALXla/LWooNg0yEGeB+Y8WdFxGA==
version "6.1.3"
resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.3.tgz#e44b97ee7d6cc7a4c574e8b01174614538291825"
integrity sha512-3rUqwucgVZXTeyJyL2jqtUau8/8r54SioM1xj3AmTX3HnWQdj2AydfJ2qYYayPyIIznSplcvU9mhBb7dR2XF3w==
dependencies:
chownr "^2.0.0"
fs-minipass "^2.0.0"

View File

@@ -586,7 +586,7 @@ func GetGlobalProjects(proj *argoappv1.AppProject, projLister applicationsv1.App
}
}
if !matchMe {
break
continue
}
//If proj is a match for this global project setting, then it is its global project
globalProj, err := projLister.AppProjects(proj.Namespace).Get(gp.ProjectName)

View File

@@ -758,3 +758,89 @@ func TestFilterByName(t *testing.T) {
assert.Len(t, res, 0)
})
}
func TestGetGlobalProjects(t *testing.T) {
t.Run("Multiple global projects", func(t *testing.T) {
namespace := "default"
cm := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-cm",
Namespace: test.FakeArgoCDNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{
"globalProjects": `
- projectName: default-x
labelSelector:
matchExpressions:
- key: is-x
operator: Exists
- projectName: default-non-x
labelSelector:
matchExpressions:
- key: is-x
operator: DoesNotExist
`,
},
}
defaultX := &argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "default-x", Namespace: namespace},
Spec: argoappv1.AppProjectSpec{
ClusterResourceWhitelist: []metav1.GroupKind{
{Group: "*", Kind: "*"},
},
ClusterResourceBlacklist: []metav1.GroupKind{
{Kind: "Volume"},
},
},
}
defaultNonX := &argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "default-non-x", Namespace: namespace},
Spec: argoappv1.AppProjectSpec{
ClusterResourceBlacklist: []metav1.GroupKind{
{Group: "*", Kind: "*"},
},
},
}
isX := &argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "is-x",
Namespace: namespace,
Labels: map[string]string{
"is-x": "yep",
},
},
}
isNoX := &argoappv1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "is-no-x", Namespace: namespace},
}
projClientset := appclientset.NewSimpleClientset(defaultX, defaultNonX, isX, isNoX)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
informer := v1alpha1.NewAppProjectInformer(projClientset, namespace, 0, indexers)
go informer.Run(ctx.Done())
cache.WaitForCacheSync(ctx.Done(), informer.HasSynced)
kubeClient := fake.NewSimpleClientset(&cm)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
projLister := applisters.NewAppProjectLister(informer.GetIndexer())
xGlobalProjects := GetGlobalProjects(isX, projLister, settingsMgr)
assert.Len(t, xGlobalProjects, 1)
assert.Equal(t, xGlobalProjects[0].Name, "default-x")
nonXGlobalProjects := GetGlobalProjects(isNoX, projLister, settingsMgr)
assert.Len(t, nonXGlobalProjects, 1)
assert.Equal(t, nonXGlobalProjects[0].Name, "default-non-x")
})
}

2
util/cache/cache.go vendored
View File

@@ -48,7 +48,7 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client))
cmd.Flags().IntVar(&redisDB, "redisdb", env.ParseNumFromEnv("REDISDB", 0, 0, math.MaxInt32), "Redis database.")
cmd.Flags().StringArrayVar(&sentinelAddresses, "sentinel", []string{}, "Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). ")
cmd.Flags().StringVar(&sentinelMaster, "sentinelmaster", "master", "Redis sentinel master group name.")
cmd.Flags().DurationVar(&defaultCacheExpiration, "default-cache-expiration", env.ParseDurationFromEnv("ARGOCD_DEFAULT_CACHE_EXPIRATION", 24*time.Hour, 0, math.MaxInt32), "Cache expiration default")
cmd.Flags().DurationVar(&defaultCacheExpiration, "default-cache-expiration", env.ParseDurationFromEnv("ARGOCD_DEFAULT_CACHE_EXPIRATION", 24*time.Hour, 0, math.MaxInt64), "Cache expiration default")
cmd.Flags().BoolVar(&redisUseTLS, "redis-use-tls", false, "Use TLS when connecting to Redis. ")
cmd.Flags().StringVar(&redisClientCertificate, "redis-client-certificate", "", "Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).")
cmd.Flags().StringVar(&redisClientKey, "redis-client-key", "", "Path to Redis client key (e.g. /etc/certs/redis/client.crt).")

View File

@@ -162,6 +162,7 @@ func SetLogLevel(logLevel string) {
level, err := log.ParseLevel(text.FirstNonEmpty(logLevel, log.InfoLevel.String()))
errors.CheckError(err)
os.Setenv(common.EnvLogLevel, level.String())
log.SetLevel(level)
}
// SetGLogLevel set the glog level for the k8s go-client

View File

@@ -43,6 +43,17 @@ func loadFlags() error {
if key != "" {
flags[key] = "true"
}
// pkg shellquota doesn't recognize `=` so that the opts in format `foo=bar` could not work.
// issue ref: https://github.com/argoproj/argo-cd/issues/6822
for k, v := range flags {
if strings.Contains(k, "=") && strings.Count(k, "=") == 1 && v == "true" {
kv := strings.Split(k, "=")
actualKey, actualValue := kv[0], kv[1]
if _, ok := flags[actualKey]; !ok {
flags[actualKey] = actualValue
}
}
}
return nil
}

View File

@@ -81,3 +81,9 @@ func TestFlagWithDoubleQuotes(t *testing.T) {
assert.Equal(t, "bar baz", GetFlag("foo", ""))
}
func TestFlagWithEqualSign(t *testing.T) {
loadOpts(t, "--foo=bar")
assert.Equal(t, "bar", GetFlag("foo", ""))
}

View File

@@ -136,3 +136,22 @@ func (d DebugTransport) RoundTrip(req *http.Request) (*http.Response, error) {
log.Printf("%s", respDump)
return resp, nil
}
// TransportWithHeader is a HTTP Client Transport with default headers.
type TransportWithHeader struct {
RoundTripper http.RoundTripper
Header http.Header
}
func (rt *TransportWithHeader) RoundTrip(r *http.Request) (*http.Response, error) {
if rt.Header != nil {
headers := rt.Header.Clone()
for k, vs := range r.Header {
for _, v := range vs {
headers.Add(k, v)
}
}
r.Header = headers
}
return rt.RoundTripper.RoundTrip(r)
}

View File

@@ -39,3 +39,49 @@ func TestSplitCookie(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, cookieValue, token)
}
// TestRoundTripper just copy request headers to the resposne.
type TestRoundTripper struct{}
func (rt TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
resp := http.Response{}
resp.Header = http.Header{}
for k, vs := range req.Header {
for _, v := range vs {
resp.Header.Add(k, v)
}
}
return &resp, nil
}
func TestTransportWithHeader(t *testing.T) {
client := &http.Client{}
req, _ := http.NewRequest("GET", "/foo", nil)
req.Header.Set("Bar", "req_1")
req.Header.Set("Foo", "req_1")
// No default headers.
client.Transport = &TransportWithHeader{
RoundTripper: &TestRoundTripper{},
}
resp, err := client.Do(req)
assert.NoError(t, err)
assert.Equal(t, resp.Header, http.Header{
"Bar": []string{"req_1"},
"Foo": []string{"req_1"},
})
// with default headers.
client.Transport = &TransportWithHeader{
RoundTripper: &TestRoundTripper{},
Header: http.Header{
"Foo": []string{"default_1", "default_2"},
},
}
resp, err = client.Do(req)
assert.NoError(t, err)
assert.Equal(t, resp.Header, http.Header{
"Bar": []string{"req_1"},
"Foo": []string{"default_1", "default_2", "req_1"},
})
}

View File

@@ -2,11 +2,13 @@ package settings
import (
"context"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"math/big"
"net/url"
"path"
"reflect"
@@ -35,7 +37,6 @@ import (
"github.com/argoproj/argo-cd/v2/util"
"github.com/argoproj/argo-cd/v2/util/kube"
"github.com/argoproj/argo-cd/v2/util/password"
argorand "github.com/argoproj/argo-cd/v2/util/rand"
tlsutil "github.com/argoproj/argo-cd/v2/util/tls"
)
@@ -1466,6 +1467,8 @@ func isIncompleteSettingsError(err error) bool {
// InitializeSettings is used to initialize empty admin password, signature, certificate etc if missing
func (mgr *SettingsManager) InitializeSettings(insecureModeEnabled bool) (*ArgoCDSettings, error) {
const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
cdSettings, err := mgr.GetSettings()
if err != nil && !isIncompleteSettingsError(err) {
return nil, err
@@ -1486,7 +1489,16 @@ func (mgr *SettingsManager) InitializeSettings(insecureModeEnabled bool) (*ArgoC
if adminAccount.Enabled {
now := time.Now().UTC()
if adminAccount.PasswordHash == "" {
initialPassword := argorand.RandString(initialPasswordLength)
randBytes := make([]byte, initialPasswordLength)
for i := 0; i < initialPasswordLength; i++ {
num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
if err != nil {
return err
}
randBytes[i] = letters[num.Int64()]
}
initialPassword := string(randBytes)
hashedPassword, err := password.HashPassword(initialPassword)
if err != nil {
return err