Compare commits

...

37 Commits

Author SHA1 Message Date
Justin Marquis
685040cf82 fix: upgrade slsa-github-generator (#15173)
Signed-off-by: Justin Marquis <34fathombelow@protonmail.com>
2023-08-22 18:17:50 -04:00
gcp-cherry-pick-bot[bot]
2333b048f9 fix(ui): code lint (#15150) (#15161)
Signed-off-by: ebuildy <ebuildy@gmail.com>
Co-authored-by: Thomas Decaux <ebuildy@gmail.com>
2023-08-22 14:20:53 -04:00
gcp-cherry-pick-bot[bot]
7a353a55c8 fix: windows build (#15154) (#15157)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-08-22 13:08:32 -04:00
github-actions[bot]
521c99e05f Bump version to 2.7.12 (#15140)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: crenshaw-dev <crenshaw-dev@users.noreply.github.com>
2023-08-21 16:29:04 -04:00
pasha-codefresh
003d6d1da5 Merge pull request from GHSA-c8xw-vjgf-94hr
Signed-off-by: pashakostohrys <pavel@codefresh.io>
2023-08-21 16:15:09 -04:00
gcp-cherry-pick-bot[bot]
7e3572f373 docs(progressive syncs): specify which ConfigMap to use (#15119) (#15134)
Signed-off-by: Gaël Jourdan-Weil <gjourdanweil@gmail.com>
Co-authored-by: Gaël Jourdan-Weil <gjourdanweil@gmail.com>
2023-08-21 16:13:48 -04:00
gcp-cherry-pick-bot[bot]
580966ce85 docs: ✏️ fix typo on configmap name for private certs (#9596) (#15132)
Signed-off-by: Gaël Jourdan-Weil <gael.jourdan-weil@kelkoogroup.com>
Co-authored-by: Gaël Jourdan-Weil <gael.jourdan-weil@kelkoogroup.com>
2023-08-21 16:13:19 -04:00
gcp-cherry-pick-bot[bot]
693ea664ff docs: add docs for various annotations and labels (#14020) (#15113)
* docs: add docs for various annotations



* more info



* more docs



---------

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-08-18 11:22:49 -04:00
gcp-cherry-pick-bot[bot]
2ff6e5178e docs: kubectl to synchronize argocd apps (#14881) (#15086)
We can use kubectl to synchronize argocd applications the same way we can use
the argocd cli or ui, however there's no documentation.

This PR adds documentation for kubectl.

Signed-off-by: Jordi Grant Esteve <jgrant.esteve@gmail.com>
Co-authored-by: selaci <selaci@users.noreply.github.com>
2023-08-18 11:19:53 -04:00
gcp-cherry-pick-bot[bot]
6e6d6e01e2 docs: fix typo (#15083) (#15089)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-08-18 11:18:44 -04:00
gcp-cherry-pick-bot[bot]
af202bc7ef docs: document permitOnlyProjectScopedClusters field (#15076) (#15081)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-08-16 15:44:13 -04:00
gcp-cherry-pick-bot[bot]
849b5a6344 fix: bump ubuntu base image (#15020) (#15021) (#15024)
Latest version of the ubuntu image addresses CVE-2023-38408.

https://ubuntu.com/security/notices/USN-6242-1
https://github.com/docker-library/repo-info/blob/master/repos/ubuntu/remote/22.04.md

resolves #15020

Signed-off-by: Mason Cole <macole@beyondtrust.com>
Co-authored-by: Mason Cole <117116981+bt-macole@users.noreply.github.com>
2023-08-11 15:47:35 -04:00
gcp-cherry-pick-bot[bot]
2d483a4c2f fix(cmp): send sigterm to cmp commands before sigkill to allow for potential cleanup (#9180) (#14955) (#14959)
* fix: send sigterm to cmp commands before sigkill to allow for potential cleanup



* fix: unit test for runCommand in cmpserver to test cleanup modified



* fix: change unit test for plugin/runCommand to avoid bad trap along with lint fix



---------

Signed-off-by: Ashin Sabu <ashin.sabu@harness.io>
Co-authored-by: Ashin Sabu <139749674+ashinsabu3@users.noreply.github.com>
2023-08-08 11:38:11 -04:00
gcp-cherry-pick-bot[bot]
fcdaf9b4b6 fix(ui): COPY JSON for ArgoCD version should include trailing newline (#5117) (#14917) (#14939)
Signed-off-by: Vipin M S <vipinachar2016@gmail.com>
Co-authored-by: Vipin M S <40431065+vipinachar@users.noreply.github.com>
2023-08-07 11:02:59 -04:00
gcp-cherry-pick-bot[bot]
990ab0ef81 docs: Update Generators-Git.md (#14921) (#14935)
Remove a misleading symbol from the pattern for the path.Match function. The pipe symbol doesn't have any special meaning.

Signed-off-by: German Lashevich <german.lashevich@gmail.com>
Co-authored-by: German Lashevich <design.ber@gmail.com>
2023-08-07 10:48:47 -04:00
github-actions[bot]
ec195adad8 Bump version to 2.7.11 (#14933)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: crenshaw-dev <crenshaw-dev@users.noreply.github.com>
2023-08-07 10:01:25 -04:00
gcp-cherry-pick-bot[bot]
743334ec1d docs: add ignoreDifferences name and namespace fields (#14741) (#14806)
* Update application.yaml



* Update docs/operator-manual/application.yaml



---------

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-08-03 10:28:35 -04:00
gcp-cherry-pick-bot[bot]
97ddc4b245 docs: Update application.yaml (#14742) (#14810)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-08-03 10:27:10 -04:00
gcp-cherry-pick-bot[bot]
89c30d8d34 docs: Change Generator docs for List Generator to note any key/value pairs can be used (#14825) (#14833)
This is no longer limited to cluster/url value pairs.

Signed-off-by: JesseBot <jessebot@linux.com>
Co-authored-by: JesseBot <jessebot@linux.com>
2023-08-01 13:56:11 -04:00
gcp-cherry-pick-bot[bot]
c56d7e7bc6 fix: ManagedResources API should not return diff for hooks (#14816) (#14829)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
Co-authored-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2023-08-01 09:07:37 -07:00
gcp-cherry-pick-bot[bot]
9b6892c467 fix: Correct broken forever option in pod logs viewer. Fixes #14762 (#14763) (#14804)
Signed-off-by: Alex Collins <alex_collins@intuit.com>
Co-authored-by: Alex Collins <alexec@users.noreply.github.com>
2023-07-31 19:27:11 -04:00
github-actions[bot]
469f25753b Bump version to 2.7.10 (#14801)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: crenshaw-dev <crenshaw-dev@users.noreply.github.com>
2023-07-31 17:38:18 -04:00
gcp-cherry-pick-bot[bot]
057a39d962 docs: Clarify that security policy covers last 3 versions (#14786) (#14792)
* docs: Clarify that security policy covers last 3 versions



* Update SECURITY.md



---------

Signed-off-by: Kostis Kapelonis <kostis@codefresh.io>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Kostis (Codefresh) <39800303+kostis-codefresh@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-07-31 16:38:39 -04:00
gcp-cherry-pick-bot[bot]
ad006440f3 fix(controller): cache deadlock on delete and re-add cluster (#14780) (#14794)
Signed-off-by: Nathan Romriell <nateromriell@gmail.com>
Co-authored-by: Nathan Romriell <nathan@modsy.com>
2023-07-31 16:37:58 -04:00
gcp-cherry-pick-bot[bot]
1960da7e8f docs: Add missing value (#14538) (#14775)
Signed-off-by: felix <felix@psy-coding.com>
Co-authored-by: Felix <github@felixglaeske.de>
2023-07-28 15:18:50 -04:00
gcp-cherry-pick-bot[bot]
284c16f838 fix: OCI dependency url can't contain part of repository (#14699) (#14757)
Signed-off-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
Co-authored-by: Alexander Matyushentsev <AMatyushentsev@gmail.com>
2023-07-27 16:19:10 -04:00
gcp-cherry-pick-bot[bot]
3e65ad2893 fix(sso): Set redirectURI for gitea, google, oauth Dex connectors (#11237) (#14737)
Signed-off-by: ylxianzhe <ylxianzhe@outlook.com>
Co-authored-by: XianzheTM <ylxianzhe@outlook.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-07-27 10:16:09 -04:00
gcp-cherry-pick-bot[bot]
84e2f77520 fix(server): handle PATCH in http/s server (#2677) (#14530) (#14732)
Signed-off-by: mmerrill3 <jjpaacks@gmail.com>
Co-authored-by: Michael Merrill <jjpaacks@gmail.com>
2023-07-27 10:14:56 -04:00
gcp-cherry-pick-bot[bot]
316be4eb27 fix(controller): log failed attempts to update operation state (#14273) (#14729)
* fix(controller): log failed attempts to update operation state



* new package name



* Update controller/appcontroller_test.go



---------

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-07-27 10:13:41 -04:00
Yuan Tang
0157f414f3 chore: Print in-cluster svr addr disabled warning when server starts (#14685)
* chore: Update log level to warn when in-cluster svr addr is disabled but internal addr is used (#14520)

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

* chore: Print in-cluster svr addr disabled warning during ArgoDB initialization (#14539)

* chore: Print in-cluster svr addr disabled warning during ArgoDB initialization

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

* fix: undo a change

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

* chore: move to a function

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

* chore: rename

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

---------

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

* chore: Print in-cluster svr addr disabled warning when server starts (#14553)

* chore: Print in-cluster svr addr disabled warning when server starts

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

* fix: mock

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>

* no interface change

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>

---------

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>

---------

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-07-24 17:25:48 -04:00
schakrad
fbe7f8f8d7 fix: ApplicationSet Controller crashes when tag is not closed; panic: Cannot find end tag="}}"(#14227) ( #14227) (#14689)
* appSet fix

Signed-off-by: schakrad <58915923+schakrad@users.noreply.github.com>

* Update applicationset/utils/utils_test.go

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>

---------

Signed-off-by: schakrad <58915923+schakrad@users.noreply.github.com>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2023-07-24 17:24:32 -04:00
github-actions[bot]
0ee33e52dd Bump version to 2.7.9 (#14674)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: crenshaw-dev <crenshaw-dev@users.noreply.github.com>
2023-07-24 13:08:24 -04:00
gcp-cherry-pick-bot[bot]
ba44ddb9a1 fix: webhook handler fails to refresh when alternate application namespaces are configured (#13976) (#14653)
* fix: Add failing test for webhooks in all namespaces

This adds a failing test that properly exercises this functionality over
all namespaces. The issue with the code that is under test is that it
does not pass the namespace correctly to the patch of the application,
resulting in the patch not taking place in the correct namespace



* fix: queue webhook refresh for apps in all namespaces

This passes the test in the previous commit, to ensure that webhooks
correctly refresh applications across all namespaces.



* fix: Use existing NamespacedName type

Use the existing type instead of a custom type



---------

Signed-off-by: Nikolas Skoufis <nskoufis@seek.com.au>
Co-authored-by: Nik Skoufis <n.skoufis@gmail.com>
2023-07-21 14:31:07 -04:00
gcp-cherry-pick-bot[bot]
8249eddf75 docs(deep-links): Fix link to pkg.go.dev to not return 404 (#14595) (#14640)
Signed-off-by: Håkon Solbjørg <hakon@solbj.org>
Co-authored-by: Håkon Solbjørg <hakon@solbj.org>
2023-07-21 10:20:39 -04:00
gcp-cherry-pick-bot[bot]
0d24330298 docs: Skip export keyword in notification docs (#14633) (#14643)
This change does three things:

1. It removes the `export` keyword. It's not required since the example
   executes a script where the variables are evaluated as an inline
   string. One could even argue that there is a slight security issue
   with using `export` here, since that will expose the credentials to
   all applications started in the current context.
2. It adds a space (` `) before the `PASSWORD` variable. This will keep
   it out of the user's Bash history by default. See [HISTIGNORE][bash].
3. Add a newline for clarity.

[bash]: https://www.gnu.org/software/bash/manual/bash.html#index-HISTIGNORE

Signed-off-by: Andreas Lindhé <andreas@lindhe.io>
Co-authored-by: Andreas Lindhé <lindhe@users.noreply.github.com>
2023-07-21 10:17:29 -04:00
Zadkiel Aharonian
53eeed06b0 fix(ui): correctly align status column in application resource list (#14618)
Signed-off-by: Zadkiel Aharonian <hello@zadkiel.fr>
2023-07-21 09:51:23 -04:00
gcp-cherry-pick-bot[bot]
f1bfa8c655 fix(ui): Fix Destination Cluster URL/Name Drop down not updating destination field (#13813) (#14216) (#14627)
* fix(ui): Fix Destination Cluster URL/Name Drop down not updating destination field (fixes #13813)



* Address linting errors



---------

Signed-off-by: Kyle Purkiss <kyle.purkiss@procore.com>
Co-authored-by: Kyle Purkiss <kyle.purkiss@procore.com>
2023-07-20 14:54:28 -04:00
57 changed files with 778 additions and 172 deletions

View File

@@ -85,7 +85,7 @@ jobs:
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.5.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.8.0
with:
image: quay.io/argoproj/argocd
digest: ${{ needs.build-and-publish.outputs.image-digest }}

View File

@@ -38,7 +38,7 @@ jobs:
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
if: github.repository == 'argoproj/argo-cd'
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.5.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.8.0
with:
image: quay.io/argoproj/argocd
digest: ${{ needs.argocd-image.outputs.image-digest }}
@@ -120,7 +120,7 @@ jobs:
contents: write # Needed for release uploads
if: github.repository == 'argoproj/argo-cd'
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.5.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.8.0
with:
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
provenance-name: "argocd-cli.intoto.jsonl"

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:ac58ff7fe25edc58bdf0067ca99df00014dbd032e2246d30a722fa348fd799a5
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fcabcd4577cd43cebbb808cea2b1f33a3dd7f508
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image

View File

@@ -35,9 +35,7 @@ impact on Argo CD before opening an issue at least roughly.
## Supported Versions
We currently support the most recent release (`N`, e.g. `1.8`) and the release
previous to the most recent one (`N-1`, e.g. `1.7`). With the release of
`N+1`, `N-1` drops out of support and `N` becomes `N-1`.
We currently support the last 3 minor versions of Argo CD with security and bug fixes.
We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the
supported versions, which will contain fixes for security vulnerabilities and

View File

@@ -1 +1 @@
2.7.8
2.7.12

View File

@@ -267,7 +267,10 @@ func (r *Render) Replace(tmpl string, replaceMap map[string]interface{}, useGoTe
return tmpl, nil
}
fstTmpl := fasttemplate.New(tmpl, "{{", "}}")
fstTmpl, err := fasttemplate.NewTemplate(tmpl, "{{", "}}")
if err != nil {
return "", fmt.Errorf("invalid template: %w", err)
}
replacedTmpl := fstTmpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) {
trimmedTag := strings.TrimSpace(tag)
replacement, ok := replaceMap[trimmedTag].(string)

View File

@@ -464,6 +464,14 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
}
}
func Test_Render_Replace_no_panic_on_missing_closing_brace(t *testing.T) {
r := &Render{}
assert.NotPanics(t, func() {
_, err := r.Replace("{{properly.closed}} {{improperly.closed}", nil, false)
assert.Error(t, err)
})
}
func TestRenderTemplateKeys(t *testing.T) {
t.Run("fasttemplate", func(t *testing.T) {
application := &argoappsv1.Application{

View File

@@ -97,6 +97,14 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
<-ctx.Done()
// Kill by group ID to make sure child processes are killed. The - tells `kill` that it's a group ID.
// Since we didn't set Pgid in SysProcAttr, the group ID is the same as the process ID. https://pkg.go.dev/syscall#SysProcAttr
// Sending a TERM signal first to allow any potential cleanup if needed, and then sending a KILL signal
_ = sysCallTerm(-cmd.Process.Pid)
// modify cleanup timeout to allow process to cleanup
cleanupTimeout := 5 * time.Second
time.Sleep(cleanupTimeout)
_ = sysCallKill(-cmd.Process.Pid)
}()

View File

@@ -369,6 +369,28 @@ func TestRunCommandEmptyCommand(t *testing.T) {
assert.ErrorContains(t, err, "Command is empty")
}
// TestRunCommandContextTimeoutWithGracefulTermination makes sure that the process is given enough time to cleanup before sending SIGKILL.
func TestRunCommandContextTimeoutWithCleanup(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond)
defer cancel()
// Use a subshell so there's a child command.
// This command sleeps for 4 seconds which is currently less than the 5 second delay between SIGTERM and SIGKILL signal and then exits successfully.
command := Command{
Command: []string{"sh", "-c"},
Args: []string{`(trap 'echo "cleanup completed"; exit' TERM; sleep 4)`},
}
before := time.Now()
output, err := runCommand(ctx, command, "", []string{})
after := time.Now()
assert.Error(t, err) // The command should time out, causing an error.
assert.Less(t, after.Sub(before), 1*time.Second)
// The command should still have completed the cleanup after termination.
assert.Contains(t, output, "cleanup completed")
}
func Test_getParametersAnnouncement_empty_command(t *testing.T) {
staticYAML := `
- name: static-a

View File

@@ -14,3 +14,7 @@ func newSysProcAttr(setpgid bool) *syscall.SysProcAttr {
func sysCallKill(pid int) error {
return syscall.Kill(pid, syscall.SIGKILL)
}
func sysCallTerm(pid int) error {
return syscall.Kill(pid, syscall.SIGTERM)
}

View File

@@ -14,3 +14,7 @@ func newSysProcAttr(setpgid bool) *syscall.SysProcAttr {
func sysCallKill(pid int) error {
return nil
}
func sysCallTerm(pid int) error {
return nil
}

View File

@@ -1240,40 +1240,44 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
}
func (ctrl *ApplicationController) setOperationState(app *appv1.Application, state *appv1.OperationState) {
kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error {
if state.Phase == "" {
// expose any bugs where we neglect to set phase
panic("no phase was set")
}
if state.Phase.Completed() {
now := metav1.Now()
state.FinishedAt = &now
}
patch := map[string]interface{}{
"status": map[string]interface{}{
"operationState": state,
},
}
if state.Phase.Completed() {
// If operation is completed, clear the operation field to indicate no operation is
// in progress.
patch["operation"] = nil
}
if reflect.DeepEqual(app.Status.OperationState, state) {
log.Infof("No operation updates necessary to '%s'. Skipping patch", app.QualifiedName())
return nil
}
patchJSON, err := json.Marshal(patch)
if err != nil {
return fmt.Errorf("error marshaling json: %w", err)
}
if app.Status.OperationState != nil && app.Status.OperationState.FinishedAt != nil && state.FinishedAt == nil {
patchJSON, err = jsonpatch.MergeMergePatches(patchJSON, []byte(`{"status": {"operationState": {"finishedAt": null}}}`))
if err != nil {
return fmt.Errorf("error merging operation state patch: %w", err)
}
}
logCtx := log.WithFields(log.Fields{"application": app.Name, "appNamespace": app.Namespace, "project": app.Spec.Project})
if state.Phase == "" {
// expose any bugs where we neglect to set phase
panic("no phase was set")
}
if state.Phase.Completed() {
now := metav1.Now()
state.FinishedAt = &now
}
patch := map[string]interface{}{
"status": map[string]interface{}{
"operationState": state,
},
}
if state.Phase.Completed() {
// If operation is completed, clear the operation field to indicate no operation is
// in progress.
patch["operation"] = nil
}
if reflect.DeepEqual(app.Status.OperationState, state) {
logCtx.Infof("No operation updates necessary to '%s'. Skipping patch", app.QualifiedName())
return
}
patchJSON, err := json.Marshal(patch)
if err != nil {
logCtx.Errorf("error marshaling json: %v", err)
return
}
if app.Status.OperationState != nil && app.Status.OperationState.FinishedAt != nil && state.FinishedAt == nil {
patchJSON, err = jsonpatch.MergeMergePatches(patchJSON, []byte(`{"status": {"operationState": {"finishedAt": null}}}`))
if err != nil {
logCtx.Errorf("error merging operation state patch: %v", err)
return
}
}
kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error {
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{})
if err != nil {
@@ -1281,32 +1285,36 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
if apierr.IsNotFound(err) {
return nil
}
// kube.RetryUntilSucceed logs failed attempts at "debug" level, but we want to know if this fails. Log a
// warning.
logCtx.Warnf("error patching application with operation state: %v", err)
return fmt.Errorf("error patching application with operation state: %w", err)
}
log.Infof("updated '%s' operation (phase: %s)", app.QualifiedName(), state.Phase)
if state.Phase.Completed() {
eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted}
var messages []string
if state.Operation.Sync != nil && len(state.Operation.Sync.Resources) > 0 {
messages = []string{"Partial sync operation"}
} else {
messages = []string{"Sync operation"}
}
if state.SyncResult != nil {
messages = append(messages, "to", state.SyncResult.Revision)
}
if state.Phase.Successful() {
eventInfo.Type = v1.EventTypeNormal
messages = append(messages, "succeeded")
} else {
eventInfo.Type = v1.EventTypeWarning
messages = append(messages, "failed:", state.Message)
}
ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "), "")
ctrl.metricsServer.IncSync(app, state)
}
return nil
})
logCtx.Infof("updated '%s' operation (phase: %s)", app.QualifiedName(), state.Phase)
if state.Phase.Completed() {
eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted}
var messages []string
if state.Operation.Sync != nil && len(state.Operation.Sync.Resources) > 0 {
messages = []string{"Partial sync operation"}
} else {
messages = []string{"Sync operation"}
}
if state.SyncResult != nil {
messages = append(messages, "to", state.SyncResult.Revision)
}
if state.Phase.Successful() {
eventInfo.Type = v1.EventTypeNormal
messages = append(messages, "succeeded")
} else {
eventInfo.Type = v1.EventTypeWarning
messages = append(messages, "failed:", state.Message)
}
ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "), "")
ctrl.metricsServer.IncSync(app, state)
}
}
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {

View File

@@ -3,9 +3,11 @@ package controller
import (
"context"
"encoding/json"
"errors"
"testing"
"time"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/resource"
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
@@ -927,6 +929,41 @@ func TestSetOperationStateOnDeletedApp(t *testing.T) {
assert.True(t, patched)
}
type logHook struct {
entries []logrus.Entry
}
func (h *logHook) Levels() []logrus.Level {
return []logrus.Level{logrus.WarnLevel}
}
func (h *logHook) Fire(entry *logrus.Entry) error {
h.entries = append(h.entries, *entry)
return nil
}
func TestSetOperationStateLogRetries(t *testing.T) {
hook := logHook{}
logrus.AddHook(&hook)
t.Cleanup(func() {
logrus.StandardLogger().ReplaceHooks(logrus.LevelHooks{})
})
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
patched := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if !patched {
patched = true
return true, nil, errors.New("fake error")
}
return true, nil, nil
})
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
assert.True(t, patched)
assert.Contains(t, hook.entries[0].Message, "fake error")
}
func TestNeedRefreshAppStatus(t *testing.T) {
testCases := []struct {
name string

View File

@@ -702,12 +702,14 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
}
func (c *liveStateCache) handleDeleteEvent(clusterServer string) {
c.lock.Lock()
defer c.lock.Unlock()
c.lock.RLock()
cluster, ok := c.clusters[clusterServer]
c.lock.RUnlock()
if ok {
cluster.Invalidate()
c.lock.Lock()
delete(c.clusters, clusterServer)
c.lock.Unlock()
}
}

View File

@@ -1,13 +1,16 @@
package cache
import (
"context"
"errors"
"net"
"net/url"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -15,8 +18,10 @@ import (
"github.com/argoproj/gitops-engine/pkg/cache"
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
"github.com/stretchr/testify/mock"
"k8s.io/client-go/kubernetes/fake"
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
)
type netError string
@@ -107,6 +112,98 @@ func TestHandleAddEvent_ClusterExcluded(t *testing.T) {
assert.Len(t, clustersCache.clusters, 0)
}
func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
testCluster := &appv1.Cluster{
Server: "https://mycluster",
Config: appv1.ClusterConfig{Username: "bar"},
}
fakeClient := fake.NewSimpleClientset()
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
externalLockRef := sync.RWMutex{}
gitopsEngineClusterCache := &mocks.ClusterCache{}
clustersCache := liveStateCache{
clusters: map[string]cache.ClusterCache{
testCluster.Server: gitopsEngineClusterCache,
},
clusterFilter: func(cluster *appv1.Cluster) bool {
return true
},
settingsMgr: settingsMgr,
// Set the lock here so we can reference it later
// nolint We need to overwrite here to have access to the lock
lock: externalLockRef,
}
channel := make(chan string)
// Mocked lock held by the gitops-engine cluster cache
mockMutex := sync.RWMutex{}
// Locks to force trigger condition during test
// Condition order:
// EnsuredSynced -> Locks gitops-engine
// handleDeleteEvent -> Locks liveStateCache
// EnsureSynced via sync, newResource, populateResourceInfoHandler -> attempts to Lock liveStateCache
// handleDeleteEvent via cluster.Invalidate -> attempts to Lock gitops-engine
handleDeleteWasCalled := sync.Mutex{}
engineHoldsLock := sync.Mutex{}
handleDeleteWasCalled.Lock()
engineHoldsLock.Lock()
gitopsEngineClusterCache.On("EnsureSynced").Run(func(args mock.Arguments) {
// Held by EnsureSync calling into sync and watchEvents
mockMutex.Lock()
defer mockMutex.Unlock()
// Continue Execution of timer func
engineHoldsLock.Unlock()
// Wait for handleDeleteEvent to be called triggering the lock
// on the liveStateCache
handleDeleteWasCalled.Lock()
t.Logf("handleDelete was called, EnsureSynced continuing...")
handleDeleteWasCalled.Unlock()
// Try and obtain the lock on the liveStateCache
alreadyFailed := !externalLockRef.TryLock()
if alreadyFailed {
channel <- "DEADLOCKED -- EnsureSynced could not obtain lock on liveStateCache"
return
}
externalLockRef.Lock()
t.Logf("EnsureSynce was able to lock liveStateCache")
externalLockRef.Unlock()
}).Return(nil).Once()
gitopsEngineClusterCache.On("Invalidate").Run(func(args mock.Arguments) {
// If deadlock is fixed should be able to acquire lock here
alreadyFailed := !mockMutex.TryLock()
if alreadyFailed {
channel <- "DEADLOCKED -- Invalidate could not obtain lock on gitops-engine"
return
}
mockMutex.Lock()
t.Logf("Invalidate was able to lock gitops-engine cache")
mockMutex.Unlock()
}).Return()
go func() {
// Start the gitops-engine lock holds
go func() {
err := gitopsEngineClusterCache.EnsureSynced()
if err != nil {
assert.Fail(t, err.Error())
}
}()
// Wait for EnsureSynced to grab the lock for gitops-engine
engineHoldsLock.Lock()
t.Log("EnsureSynced has obtained lock on gitops-engine")
engineHoldsLock.Unlock()
// Run in background
go clustersCache.handleDeleteEvent(testCluster.Server)
// Allow execution to continue on clusters cache call to trigger lock
handleDeleteWasCalled.Unlock()
channel <- "PASSED"
}()
select {
case str := <-channel:
assert.Equal(t, "PASSED", str, str)
case <-time.After(5 * time.Second):
assert.Fail(t, "Ended up in deadlock")
}
}
func TestIsRetryableError(t *testing.T) {
var (
tlsHandshakeTimeoutErr net.Error = netError("net/http: TLS handshake timeout")

View File

@@ -172,6 +172,7 @@ spec:
- CreateNamespace=true # Namespace Auto-Creation ensures that namespace specified as the application destination exists in the destination cluster.
- PrunePropagationPolicy=foreground # Supported policies are background, foreground and orphan.
- PruneLast=true # Allow the ability for resource pruning to happen as a final, implicit wave of a sync operation
- RespectIgnoreDifferences=true # When syncing changes, respect fields ignored by the ignoreDifferences configuration
managedNamespaceMetadata: # Sets the metadata for the application namespace. Only valid if CreateNamespace=true (see above), otherwise it's a no-op.
labels: # The labels to set on the application namespace
any: label
@@ -190,7 +191,7 @@ spec:
maxDuration: 3m # the maximum amount of time allowed for the backoff strategy
# Will ignore differences between live and desired states during the diff. Note that these configurations are not
# used during the sync process.
# used during the sync process unless the `RespectIgnoreDifferences=true` sync option is enabled.
ignoreDifferences:
# for the specified json pointers
- group: apps
@@ -202,6 +203,9 @@ spec:
kind: "*"
managedFieldsManagers:
- kube-controller-manager
# Name and namespace are optional. If specified, they must match exactly, these are not glob patterns.
name: my-deployment
namespace: my-namespace
# RevisionHistoryLimit limits the number of items kept in the application's revision history, which is used for
# informational purposes as well as for rollbacks to previous versions. This should only be changed in exceptional

View File

@@ -153,7 +153,7 @@ Or, a shorter way (using [path.Match](https://golang.org/pkg/path/#Match) syntax
```yaml
- path: /d/*
- path: /d/[f|g]
- path: /d/[fg]
exclude: true
```

View File

@@ -6,7 +6,7 @@ Generators are primarily based on the data source that they use to generate the
As of this writing there are eight generators:
- [List generator](Generators-List.md): The List generator allows you to target Argo CD Applications to clusters based on a fixed list of cluster name/URL values.
- [List generator](Generators-List.md): The List generator allows you to target Argo CD Applications to clusters based on a fixed list of any chosen key/value element pairs.
- [Cluster generator](Generators-Cluster.md): The Cluster generator allows you to target Argo CD Applications to clusters, based on the list of clusters defined within (and managed by) Argo CD (which includes automatically responding to cluster addition/removal events from Argo CD).
- [Git generator](Generators-Git.md): The Git generator allows you to create Applications based on files within a Git repository, or based on the directory structure of a Git repository.
- [Matrix generator](Generators-Matrix.md): The Matrix generator may be used to combine the generated parameters of two separate generators.

View File

@@ -15,7 +15,7 @@ As an experimental feature, progressive syncs must be explicitly enabled, in one
1. Pass `--enable-progressive-syncs` to the ApplicationSet controller args.
1. Set `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS=true` in the ApplicationSet controller environment variables.
1. Set `applicationsetcontroller.enable.progressive.syncs: true` in the Argo CD ConfigMap.
1. Set `applicationsetcontroller.enable.progressive.syncs: true` in the Argo CD `argocd-cmd-params-cm` ConfigMap.
## Strategies

View File

@@ -118,7 +118,7 @@ spec:
# static parameter announcements list.
command: [echo, '[{"name": "example-param", "string": "default-string-value"}]']
# If set to then the plugin receives repository files with original file mode. Dangerous since the repository
# If set to `true` then the plugin receives repository files with original file mode. Dangerous since the repository
# might have executable files. Set to true only if you trust the CMP plugin authors.
preserveFileMode: false
```

View File

@@ -21,7 +21,7 @@ Each link in the list has five subfields:
1. `title`: title/tag that will be displayed in the UI corresponding to that link
2. `url`: the actual URL where the deep link will redirect to, this field can be templated to use data from the
corresponding application, project or resource objects (depending on where it is located). This uses [text/template](pkg.go.dev/text/template) pkg for templating
corresponding application, project or resource objects (depending on where it is located). This uses [text/template](https://pkg.go.dev/text/template) pkg for templating
3. `description` (optional): a description for what the deep link is about
4. `icon.class` (optional): a font-awesome icon class to be used when displaying the links in dropdown menus
5. `if` (optional): a conditional statement that results in either `true` or `false`, it also has access to the same

View File

@@ -17,8 +17,9 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/st
* Add Email username and password token to `argocd-notifications-secret` secret
```bash
export EMAIL_USER=<your-username>
export PASSWORD=<your-password>
EMAIL_USER=<your-username>
PASSWORD=<your-password>
kubectl apply -n argocd -f - << EOF
apiVersion: v1
kind: Secret

View File

@@ -86,3 +86,8 @@ spec:
clusters:
- in-cluster
- cluster1
# By default, apps may sync to any cluster specified under the `destinations` field, even if they are not
# scoped to this project. Set the following field to `true` to restrict apps in this cluster to only clusters
# scoped to this project.
permitOnlyProjectScopedClusters: false

View File

@@ -0,0 +1,26 @@
# Annotations and Labels used by Argo CD
## Annotations
| Annotation key | Target resource(es) | Possible values | Description |
|--------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| argocd.argoproj.io/application-set-refresh | ApplicationSet | `"true"` | Added when an ApplicationSet is requested to be refreshed by a webhook. The ApplicationSet controller will remove this annotation at the end of reconciliation. |
| argocd.argoproj.io/compare-options | any | [see compare options docs](compare-options.md) | Configures how an app's current state is compared to its desired state. |
| argocd.argoproj.io/hook | any | [see resource hooks docs](resource_hooks.md) | Used to configure [resource hooks](resource_hooks.md). |
| argocd.argoproj.io/hook-delete-policy | any | [see resource hooks docs](resource_hooks.md#hook-deletion-policies) | Used to set a [resource hook's deletion policy](resource_hooks.md#hook-deletion-policies). |
| argocd.argoproj.io/manifest-generate-paths | Application | [see scaling docs](../operator-manual/high_availability.md#webhook-and-manifest-paths-annotation) | Used to avoid unnecessary Application refreshes, especially in mono-repos. |
| argocd.argoproj.io/refresh | Application | `normal`, `hard` | Indicates that app needs to be refreshed. Removed by application controller after app is refreshed. Value `"hard"` means manifest cache and target cluster state cache should be invalidated before refresh. |
| argocd.argoproj.io/skip-reconcile | Application | `"true"` | Indicates to the Argo CD application controller that the Application should not be reconciled. See the [skip reconcile documentation](skip_reconcile.md) for use cases. |
| argocd.argoproj.io/sync-options | any | [see sync options docs](sync-options.md) | Provides a variety of settings to determine how an Application's resources are synced. |
| argocd.argoproj.io/sync-wave | any | [see sync waves docs](sync-waves.md) | |
| argocd.argoproj.io/tracking-id | any | any | Used by Argo CD to track resources it manages. See [resource tracking docs](resource_tracking.md) for details. |
| link.argocd.argoproj.io/{some link name} | any | An http(s) URL | Adds a link to the Argo CD UI for the resource. See [external URL docs](external-url.md) for details. |
| pref.argocd.argoproj.io/default-pod-sort | Application | [see UI customization docs](../operator-manual/ui-customization.md) | Sets the Application's default grouping mechanism. |
| pref.argocd.argoproj.io/default-view | Application | [see UI customization docs](../operator-manual/ui-customization.md) | Sets the Application's default view mode (e.g. "tree" or "list") |
## Labels
| Label key | Target resource(es) | Possible values | Description |
|--------------------------------|---------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| argocd.argoproj.io/instance | Application | any | Recommended tracking label to [avoid conflicts with other tools which use `app.kubernetes.io/instance`](../faq.md#why-is-my-app-out-of-sync-even-after-syncing. |
| argocd.argoproj.io/secret-type | Secret | `cluster`, `repository`, `repo-creds` | Identifies certain types of Secrets used by Argo CD. See the [Declarative Setup docs](../operator-manual/declarative-setup.md) for details. |

View File

@@ -279,7 +279,7 @@ It is possible to add and remove TLS certificates using the ArgoCD web UI:
### Managing TLS certificates using declarative configuration
You can also manage TLS certificates in a declarative, self-managed ArgoCD setup. All TLS certificates are stored in the ConfigMap object `argocd-tls-cert-cm`.
You can also manage TLS certificates in a declarative, self-managed ArgoCD setup. All TLS certificates are stored in the ConfigMap object `argocd-tls-certs-cm`.
Please refer to the [Operator Manual](../../operator-manual/declarative-setup/#repositories-using-self-signed-tls-certificates-or-are-signed-by-custom-ca) for more information.
## Unknown SSH Hosts

View File

@@ -0,0 +1,144 @@
# Sync Applications with Kubectl
You can use "kubectl" to ask Argo CD to synchronize applications the same way you can use the CLI or UI. Many configurations like "force", "prune", "apply" and even synchronize a specific list of resources are equally supported. This is done by applying or patching the Argo CD application with a document that defines an "operation".
This "operation" defines how a synchronization should be done and for what resources these synchronization is to be done.
There are many configuration options that can be added to the "operation". Next, a few of them are explained. For more details, you can have a look at the CRD [applications.argoproj.io](https://github.com/argoproj/argo-cd/blob/master/manifests/crds/application-crd.yaml). Some of them are required, whereas others are optional.
To ask Argo CD to synchronize all resources of a given application, we can do:
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: <app-name>
namespace: <namespace>
spec:
...
operation:
initiatedBy:
username: <username>
sync:
syncStrategy:
hook: {}
```
```bash
$ kubectl apply -f <apply-file>
```
The most important part is the "sync" definition in the "operation" field. You can pass optional information like "info" or "initiatedBy". "info" allows you to add information about the operation in the form of a list. "initiatedBy" contains information about who initiated the operation request.
Or if you prefer, you also can patch:
```yaml
operation:
initiatedBy:
username: <username>
sync:
syncStrategy:
hook: {}
```
```bash
$ kubectl patch -n <namespace> app <app-name> --patch-file <patch-file> --type merge
```
Be aware that patches, specially with merge strategies, may not work the way you expect especially if you change sync strategies or options.
In these cases, "kubectl apply" gives better results.
Either with a "kubectl patch" or "kubectl apply", the state of the synchronization is reported in the "operationState" field in the application object.
```bash
$ kubectl get -n <namespace> get app <app-name> -o yaml
...
status:
operationState:
finishedAt: "2023-08-03T11:16:17Z"
message: successfully synced (all tasks run)
phase: Succeeded
```
# Apply and Hook synchronization strategies
There are two types of synchronization strategies: "hook", which is the default value, and "apply".
An "apply" sync strategy tells Argo CD to "kubectl apply", whereas a "hook" sync strategy informs Argo CD to submit any resource that's referenced in the operation. This way the synchronization of these resources will take into consideration any hook the resource has been annotated with.
```yaml
operation:
sync:
syncStrategy:
apply: {}
```
```yaml
operation:
sync:
syncStrategy:
hook: {}
```
Both strategies support "force". However, you need to be aware that a force operation deletes the resource when patch encounters a conflict after having retried 5 times.
```yaml
operation:
sync:
syncStrategy:
apply:
force: true
```
```yaml
operation:
sync:
syncStrategy:
hook:
force: true
```
# Prune
If you want to prune your resources before applying, you can instruct Argo CD to do so:
```yaml
operation:
sync:
prune: true
```
# List of resources
There's always the possibility to pass a list of resources. This list can be all resources the application manages or only a subset, for example resources that remained out of sync for some reason.
Only "kind" and "name" are required fields when referencing resources, but the fields "groups" and "namespace" can also be defined:
```yaml
operation:
sync:
resources:
- kind: Namespace
name: namespace-name
- kind: ServiceAccount
name: service-account-name
namespace: namespace-name
- group: networking.k8s.io
kind: NetworkPolicy
name: network-policy-name
namespace: namespace-name
```
# Sync Options
In an operation, you can also pass sync-options. Each of these options is passed as "name=value" pairs. For example:
```yaml
operations:
sync:
syncOptions:
- Validate=false
- Prune=false
```
For more information about sync options, please refer to [sync-options](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/)

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v2.7.8
newTag: v2.7.12
resources:
- ./application-controller
- ./dex

View File

@@ -16706,7 +16706,7 @@ spec:
key: applicationsetcontroller.enable.progressive.syncs
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -16968,7 +16968,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -17020,7 +17020,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -17233,7 +17233,7 @@ spec:
key: controller.kubectl.parallelism.limit
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v2.7.8
newTag: v2.7.12

View File

@@ -12,7 +12,7 @@ patches:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v2.7.8
newTag: v2.7.12
resources:
- ../../base/application-controller
- ../../base/applicationset-controller

View File

@@ -17927,7 +17927,7 @@ spec:
key: applicationsetcontroller.enable.progressive.syncs
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -18037,7 +18037,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -18094,7 +18094,7 @@ spec:
containers:
- args:
- /usr/local/bin/argocd-notifications
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -18399,7 +18399,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -18451,7 +18451,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -18733,7 +18733,7 @@ spec:
key: server.enable.proxy.extension
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -18978,7 +18978,7 @@ spec:
key: controller.kubectl.parallelism.limit
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1587,7 +1587,7 @@ spec:
key: applicationsetcontroller.enable.progressive.syncs
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1697,7 +1697,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1754,7 +1754,7 @@ spec:
containers:
- args:
- /usr/local/bin/argocd-notifications
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2059,7 +2059,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2111,7 +2111,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2393,7 +2393,7 @@ spec:
key: server.enable.proxy.extension
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2638,7 +2638,7 @@ spec:
key: controller.kubectl.parallelism.limit
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -17044,7 +17044,7 @@ spec:
key: applicationsetcontroller.enable.progressive.syncs
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -17154,7 +17154,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -17211,7 +17211,7 @@ spec:
containers:
- args:
- /usr/local/bin/argocd-notifications
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -17468,7 +17468,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -17520,7 +17520,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -17795,7 +17795,7 @@ spec:
key: server.enable.proxy.extension
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -18035,7 +18035,7 @@ spec:
key: controller.kubectl.parallelism.limit
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -704,7 +704,7 @@ spec:
key: applicationsetcontroller.enable.progressive.syncs
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -814,7 +814,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -871,7 +871,7 @@ spec:
containers:
- args:
- /usr/local/bin/argocd-notifications
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1128,7 +1128,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1180,7 +1180,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -1455,7 +1455,7 @@ spec:
key: server.enable.proxy.extension
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -1695,7 +1695,7 @@ spec:
key: controller.kubectl.parallelism.limit
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v2.7.8
image: quay.io/argoproj/argocd:v2.7.12
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -157,6 +157,7 @@ nav:
- user-guide/selective_sync.md
- user-guide/sync-waves.md
- user-guide/sync_windows.md
- user-guide/sync-kubectl.md
- user-guide/skip_reconcile.md
- Generating Applications with ApplicationSet: user-guide/application-set.md
- user-guide/ci_automation.md
@@ -166,6 +167,7 @@ nav:
- user-guide/external-url.md
- user-guide/extra_info.md
- Notification subscriptions: user-guide/subscriptions.md
- user-guide/annotations-and-labels.md
- Command Reference: user-guide/commands/argocd.md
- Developer Guide:
- developer-guide/index.md

View File

@@ -945,11 +945,13 @@ func getHelmRepos(appPath string, repositories []*v1alpha1.Repository, helmRepoC
repos := make([]helm.HelmRepository, 0)
for _, dep := range dependencies {
// find matching repo credentials by URL or name
repo, ok := reposByUrl[dep.Repo]
if !ok && dep.Name != "" {
repo, ok = reposByName[dep.Name]
}
if !ok {
// if no matching repo credentials found, use the repo creds from the credential list
repo = &v1alpha1.Repository{Repo: dep.Repo, Name: dep.Name, EnableOCI: dep.EnableOCI}
if repositoryCredential := getRepoCredential(helmRepoCreds, dep.Repo); repositoryCredential != nil {
repo.EnableOCI = repositoryCredential.EnableOCI
@@ -958,6 +960,16 @@ func getHelmRepos(appPath string, repositories []*v1alpha1.Repository, helmRepoC
repo.SSHPrivateKey = repositoryCredential.SSHPrivateKey
repo.TLSClientCertData = repositoryCredential.TLSClientCertData
repo.TLSClientCertKey = repositoryCredential.TLSClientCertKey
} else if repo.EnableOCI {
// finally if repo is OCI and no credentials found, use the first OCI credential matching by hostname
// see https://github.com/argoproj/argo-cd/issues/14636
for _, cred := range repositories {
if depURL, err := url.Parse("oci://" + dep.Repo); err == nil && cred.EnableOCI && depURL.Host == cred.Repo {
repo.Username = cred.Username
repo.Password = cred.Password
break
}
}
}
}
repos = append(repos, helm.HelmRepository{Name: repo.Name, Repo: repo.Repo, Creds: repo.GetHelmCreds(), EnableOci: repo.EnableOCI})

View File

@@ -2616,7 +2616,7 @@ func TestGetHelmRepos_OCIDependencies(t *testing.T) {
assert.Equal(t, len(helmRepos), 1)
assert.Equal(t, helmRepos[0].Username, "test")
assert.Equal(t, helmRepos[0].EnableOci, true)
assert.Equal(t, helmRepos[0].Repo, "example.com")
assert.Equal(t, helmRepos[0].Repo, "example.com/myrepo")
}
func TestGetHelmRepo_NamedRepos(t *testing.T) {

View File

@@ -2,5 +2,5 @@ name: my-chart
version: 1.1.0
dependencies:
- name: my-dependency
repository: oci://example.com
repository: oci://example.com/myrepo
version: '*'

View File

@@ -1389,7 +1389,7 @@ func (s *Server) ManagedResources(ctx context.Context, q *application.ResourcesQ
res := &application.ManagedResourcesResponse{}
for i := range items {
item := items[i]
if isMatchingResource(q, kube.ResourceKey{Name: item.Name, Namespace: item.Namespace, Kind: item.Kind, Group: item.Group}) {
if !item.Hook && isMatchingResource(q, kube.ResourceKey{Name: item.Name, Namespace: item.Namespace, Kind: item.Kind, Group: item.Group}) {
res.Items = append(res.Items, item)
}
}

View File

@@ -5,6 +5,7 @@ import (
"io"
"net/http"
util_session "github.com/argoproj/argo-cd/v2/util/session"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
@@ -36,11 +37,12 @@ type terminalHandler struct {
allowedShells []string
namespace string
enabledNamespaces []string
sessionManager util_session.SessionManager
}
// NewHandler returns a new terminal handler.
func NewHandler(appLister applisters.ApplicationLister, namespace string, enabledNamespaces []string, db db.ArgoDB, enf *rbac.Enforcer, cache *servercache.Cache,
appResourceTree AppResourceTreeFn, allowedShells []string) *terminalHandler {
appResourceTree AppResourceTreeFn, allowedShells []string, sessionManager util_session.SessionManager) *terminalHandler {
return &terminalHandler{
appLister: appLister,
db: db,
@@ -50,6 +52,7 @@ func NewHandler(appLister applisters.ApplicationLister, namespace string, enable
allowedShells: allowedShells,
namespace: namespace,
enabledNamespaces: enabledNamespaces,
sessionManager: sessionManager,
}
}
@@ -221,7 +224,7 @@ func (s *terminalHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fieldLog.Info("terminal session starting")
session, err := newTerminalSession(w, r, nil)
session, err := newTerminalSession(w, r, nil, s.sessionManager)
if err != nil {
http.Error(w, "Failed to start terminal session", http.StatusBadRequest)
return
@@ -277,6 +280,11 @@ type TerminalMessage struct {
Cols uint16 `json:"cols"`
}
// TerminalCommand is the struct for websocket commands,For example you need ask client to reconnect
type TerminalCommand struct {
Code int
}
// startProcess executes specified commands in the container and connects it up with the ptyHandler (a session)
func startProcess(k8sClient kubernetes.Interface, cfg *rest.Config, namespace, podName, containerName string, cmd []string, ptyHandler PtyHandler) error {
req := k8sClient.CoreV1().RESTClient().Post().

View File

@@ -3,6 +3,9 @@ package application
import (
"encoding/json"
"fmt"
"github.com/argoproj/argo-cd/v2/common"
httputil "github.com/argoproj/argo-cd/v2/util/http"
util_session "github.com/argoproj/argo-cd/v2/util/session"
"net/http"
"sync"
"time"
@@ -12,6 +15,11 @@ import (
"k8s.io/client-go/tools/remotecommand"
)
const (
ReconnectCode = 1
ReconnectMessage = "\nReconnect because the token was refreshed...\n"
)
var upgrader = func() websocket.Upgrader {
upgrader := websocket.Upgrader{}
upgrader.HandshakeTimeout = time.Second * 2
@@ -23,25 +31,40 @@ var upgrader = func() websocket.Upgrader {
// terminalSession implements PtyHandler
type terminalSession struct {
wsConn *websocket.Conn
sizeChan chan remotecommand.TerminalSize
doneChan chan struct{}
tty bool
readLock sync.Mutex
writeLock sync.Mutex
wsConn *websocket.Conn
sizeChan chan remotecommand.TerminalSize
doneChan chan struct{}
tty bool
readLock sync.Mutex
writeLock sync.Mutex
sessionManager util_session.SessionManager
token *string
}
// getToken get auth token from web socket request
func getToken(r *http.Request) (string, error) {
cookies := r.Cookies()
return httputil.JoinCookies(common.AuthCookieName, cookies)
}
// newTerminalSession create terminalSession
func newTerminalSession(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*terminalSession, error) {
func newTerminalSession(w http.ResponseWriter, r *http.Request, responseHeader http.Header, sessionManager util_session.SessionManager) (*terminalSession, error) {
token, err := getToken(r)
if err != nil {
return nil, err
}
conn, err := upgrader.Upgrade(w, r, responseHeader)
if err != nil {
return nil, err
}
session := &terminalSession{
wsConn: conn,
tty: true,
sizeChan: make(chan remotecommand.TerminalSize),
doneChan: make(chan struct{}),
wsConn: conn,
tty: true,
sizeChan: make(chan remotecommand.TerminalSize),
doneChan: make(chan struct{}),
sessionManager: sessionManager,
token: &token,
}
return session, nil
}
@@ -61,8 +84,40 @@ func (t *terminalSession) Next() *remotecommand.TerminalSize {
}
}
// reconnect send reconnect code to client and ask them init new ws session
func (t *terminalSession) reconnect() (int, error) {
reconnectCommand, _ := json.Marshal(TerminalCommand{
Code: ReconnectCode,
})
reconnectMessage, _ := json.Marshal(TerminalMessage{
Operation: "stdout",
Data: ReconnectMessage,
})
t.writeLock.Lock()
err := t.wsConn.WriteMessage(websocket.TextMessage, reconnectMessage)
if err != nil {
log.Errorf("write message err: %v", err)
return 0, err
}
err = t.wsConn.WriteMessage(websocket.TextMessage, reconnectCommand)
if err != nil {
log.Errorf("write message err: %v", err)
return 0, err
}
t.writeLock.Unlock()
return 0, nil
}
// Read called in a loop from remotecommand as long as the process is running
func (t *terminalSession) Read(p []byte) (int, error) {
// check if token still valid
_, newToken, err := t.sessionManager.VerifyToken(*t.token)
// err in case if token is revoked, newToken in case if refresh happened
if err != nil || newToken != "" {
// need to send reconnect code in case if token was refreshed
return t.reconnect()
}
t.readLock.Lock()
_, message, err := t.wsConn.ReadMessage()
t.readLock.Unlock()

View File

@@ -0,0 +1,46 @@
package application
import (
"encoding/json"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func reconnect(w http.ResponseWriter, r *http.Request) {
var upgrader = websocket.Upgrader{}
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
ts := terminalSession{wsConn: c}
_, _ = ts.reconnect()
}
func TestReconnect(t *testing.T) {
s := httptest.NewServer(http.HandlerFunc(reconnect))
defer s.Close()
u := "ws" + strings.TrimPrefix(s.URL, "http")
// Connect to the server
ws, _, err := websocket.DefaultDialer.Dial(u, nil)
assert.NoError(t, err)
defer ws.Close()
_, p, _ := ws.ReadMessage()
var message TerminalMessage
err = json.Unmarshal(p, &message)
assert.NoError(t, err)
assert.Equal(t, message.Data, ReconnectMessage)
}

View File

@@ -25,6 +25,8 @@ import (
// nolint:staticcheck
golang_proto "github.com/golang/protobuf/proto"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"github.com/argoproj/notifications-engine/pkg/api"
"github.com/argoproj/pkg/sync"
@@ -290,7 +292,9 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts) *ArgoCDServer {
apiFactory := api.NewFactory(settings_notif.GetFactorySettings(argocdService, "argocd-notifications-secret", "argocd-notifications-cm"), opts.Namespace, secretInformer, configMapInformer)
return &ArgoCDServer{
dbInstance := db.NewDB(opts.Namespace, settingsMgr, opts.KubeClientset)
a := &ArgoCDServer{
ArgoCDServerOpts: opts,
log: log.NewEntry(log.StandardLogger()),
settings: settings,
@@ -306,11 +310,19 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts) *ArgoCDServer {
policyEnforcer: policyEnf,
userStateStorage: userStateStorage,
staticAssets: http.FS(staticFS),
db: db.NewDB(opts.Namespace, settingsMgr, opts.KubeClientset),
db: dbInstance,
apiFactory: apiFactory,
secretInformer: secretInformer,
configMapInformer: configMapInformer,
}
err = a.logInClusterWarnings()
if err != nil {
// Just log. It's not critical.
log.Warnf("Failed to log in-cluster warnings: %v", err)
}
return a
}
const (
@@ -357,6 +369,47 @@ func (l *Listeners) Close() error {
return nil
}
// logInClusterWarnings checks the in-cluster configuration and prints out any warnings.
func (a *ArgoCDServer) logInClusterWarnings() error {
labelSelector := labels.NewSelector()
req, err := labels.NewRequirement(common.LabelKeySecretType, selection.Equals, []string{common.LabelValueSecretTypeCluster})
if err != nil {
return fmt.Errorf("failed to construct cluster-type label selector: %w", err)
}
labelSelector = labelSelector.Add(*req)
secretsLister, err := a.settingsMgr.GetSecretsLister()
if err != nil {
return fmt.Errorf("failed to get secrets lister: %w", err)
}
clusterSecrets, err := secretsLister.Secrets(a.ArgoCDServerOpts.Namespace).List(labelSelector)
if err != nil {
return fmt.Errorf("failed to list cluster secrets: %w", err)
}
var inClusterSecrets []string
for _, clusterSecret := range clusterSecrets {
cluster, err := db.SecretToCluster(clusterSecret)
if err != nil {
return fmt.Errorf("could not unmarshal cluster secret %q: %w", clusterSecret.Name, err)
}
if cluster.Server == v1alpha1.KubernetesInternalAPIServerAddr {
inClusterSecrets = append(inClusterSecrets, clusterSecret.Name)
}
}
if len(inClusterSecrets) > 0 {
// Don't make this call unless we actually have in-cluster secrets, to save time.
dbSettings, err := a.settingsMgr.GetSettings()
if err != nil {
return fmt.Errorf("could not get DB settings: %w", err)
}
if !dbSettings.InClusterEnabled {
for _, clusterName := range inClusterSecrets {
log.Warnf("cluster %q uses in-cluster server address but it's disabled in Argo CD settings", clusterName)
}
}
}
return nil
}
func startListener(host string, port int) (net.Listener, error) {
var conn net.Listener
var realErr error
@@ -459,12 +512,12 @@ func (a *ArgoCDServer) Run(ctx context.Context, listeners *Listeners) {
var httpL net.Listener
var httpsL net.Listener
if !a.useTLS() {
httpL = tcpm.Match(cmux.HTTP1Fast())
httpL = tcpm.Match(cmux.HTTP1Fast("PATCH"))
grpcL = tcpm.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
} else {
// We first match on HTTP 1.1 methods.
httpL = tcpm.Match(cmux.HTTP1Fast())
httpL = tcpm.Match(cmux.HTTP1Fast("PATCH"))
// If not matched, we assume that its TLS.
tlsl := tcpm.Match(cmux.Any())
@@ -479,7 +532,7 @@ func (a *ArgoCDServer) Run(ctx context.Context, listeners *Listeners) {
// Now, we build another mux recursively to match HTTPS and gRPC.
tlsm = cmux.New(tlsl)
httpsL = tlsm.Match(cmux.HTTP1Fast())
httpsL = tlsm.Match(cmux.HTTP1Fast("PATCH"))
grpcL = tlsm.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
}
@@ -909,7 +962,7 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWebHandl
}
mux.Handle("/api/", handler)
terminal := application.NewHandler(a.appLister, a.Namespace, a.ApplicationNamespaces, a.db, a.enf, a.Cache, appResourceTreeFn, a.settings.ExecShells).
terminal := application.NewHandler(a.appLister, a.Namespace, a.ApplicationNamespaces, a.db, a.enf, a.Cache, appResourceTreeFn, a.settings.ExecShells, *a.sessionMgr).
WithFeatureFlagMiddleware(a.settingsMgr.GetSettings)
th := util_session.WithAuthMiddleware(a.DisableAuth, a.sessionMgr, terminal)
mux.Handle("/terminal", th)
@@ -922,6 +975,7 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWebHandl
// will be added in mux.
registerExtensions(mux, a)
}
mustRegisterGWHandler(versionpkg.RegisterVersionServiceHandler, ctx, gwmux, conn)
mustRegisterGWHandler(clusterpkg.RegisterClusterServiceHandler, ctx, gwmux, conn)
mustRegisterGWHandler(applicationpkg.RegisterApplicationServiceHandler, ctx, gwmux, conn)

View File

@@ -14,7 +14,7 @@ FROM docker.io/library/registry:2.8@sha256:41f413c22d6156587e2a51f3e80c09808b8c7
FROM docker.io/bitnami/kubectl:1.26@sha256:90d54ce960bf00b6d06cf1c69075a120d88e9f3237096b237c0a5efcacd5ed0b as kubectl
FROM docker.io/library/ubuntu:22.04@sha256:ac58ff7fe25edc58bdf0067ca99df00014dbd032e2246d30a722fa348fd799a5
FROM docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fcabcd4577cd43cebbb808cea2b1f33a3dd7f508
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install --fix-missing -y \

View File

@@ -475,6 +475,24 @@ func TestDeleteAppResource(t *testing.T) {
Expect(HealthIs(health.HealthStatusMissing))
}
// Fix for issue #2677, support PATCH in HTTP service
func TestPatchHttp(t *testing.T) {
ctx := Given(t)
ctx.
Path(guestbookPath).
When().
CreateApp().
Sync().
PatchAppHttp(`{"metadata": {"labels": { "test": "patch" }, "annotations": { "test": "patch" }}}`).
Then().
And(func(app *Application) {
assert.Equal(t, "patch", app.Labels["test"])
assert.Equal(t, "patch", app.Annotations["test"])
})
}
// demonstrate that we cannot use a standard sync when an immutable field is changed, we must use "force"
func TestImmutableChange(t *testing.T) {
SkipOnEnv(t, "OPENSHIFT")

View File

@@ -1,12 +1,14 @@
package app
import (
"encoding/json"
"fmt"
"os"
log "github.com/sirupsen/logrus"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
client "github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
. "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/test/e2e/fixture"
"github.com/argoproj/argo-cd/v2/util/errors"
@@ -289,6 +291,28 @@ func (a *Actions) PatchApp(patch string) *Actions {
return a
}
func (a *Actions) PatchAppHttp(patch string) *Actions {
a.context.t.Helper()
var application Application
var patchType = "merge"
var appName = a.context.AppQualifiedName()
var appNamespace = a.context.AppNamespace()
patchRequest := &client.ApplicationPatchRequest{
Name: &appName,
PatchType: &patchType,
Patch: &patch,
AppNamespace: &appNamespace,
}
jsonBytes, err := json.MarshalIndent(patchRequest, "", " ")
errors.CheckError(err)
err = fixture.DoHttpJsonRequest("PATCH",
fmt.Sprintf("/api/v1/applications/%v", appName),
&application,
jsonBytes...)
errors.CheckError(err)
return a
}
func (a *Actions) AppSet(flags ...string) *Actions {
a.context.t.Helper()
args := []string{"app", "set", a.context.AppQualifiedName()}

View File

@@ -108,15 +108,24 @@ export const ApplicationCreatePanel = (props: {
const [explicitPathType, setExplicitPathType] = React.useState<{path: string; type: models.AppSourceType}>(null);
const [destFormat, setDestFormat] = React.useState('URL');
const [retry, setRetry] = React.useState(false);
const app = deepMerge(DEFAULT_APP, props.app || {});
React.useEffect(() => {
if (app?.spec?.destination?.name && app.spec.destination.name !== '') {
setDestFormat('NAME');
} else {
setDestFormat('URL');
}
}, []);
function normalizeTypeFields(formApi: FormApi, type: models.AppSourceType) {
const app = formApi.getFormState().values;
const appToNormalize = formApi.getFormState().values;
for (const item of appTypes) {
if (item.type !== type) {
delete app.spec.source[item.field];
delete appToNormalize.spec.source[item.field];
}
}
formApi.setAllValues(app);
formApi.setAllValues(appToNormalize);
}
return (
@@ -132,16 +141,10 @@ export const ApplicationCreatePanel = (props: {
}>
{({projects, clusters, reposInfo}) => {
const repos = reposInfo.map(info => info.repo).sort();
const app = deepMerge(DEFAULT_APP, props.app || {});
const repoInfo = reposInfo.find(info => info.repo === app.spec.source.repoURL);
if (repoInfo) {
normalizeAppSource(app, repoInfo.type || 'git');
}
if (app?.spec?.destination?.name && app.spec.destination.name !== '') {
setDestFormat('NAME');
} else {
setDestFormat('URL');
}
return (
<div className='application-create-panel'>
{(yamlMode && (

View File

@@ -24,7 +24,7 @@ export const ApplicationResourceList = ({
<div className='columns small-1 xxxlarge-1' />
<div className='columns small-2 xxxlarge-2'>NAME</div>
<div className='columns small-2 xxxlarge-2'>GROUP/KIND</div>
<div className='columns small-1 xxxlarge-2'>SYNC ORDER</div>
<div className='columns small-1 xxxlarge-1'>SYNC ORDER</div>
<div className='columns small-2 xxxlarge-2'>NAMESPACE</div>
<div className='columns small-2 xxxlarge-2'>CREATED AT</div>
<div className='columns small-2 xxxlarge-2'>STATUS</div>
@@ -62,7 +62,7 @@ export const ApplicationResourceList = ({
)}
</div>
<div className='columns small-2 xxxlarge-2'>{[res.group, res.kind].filter(item => !!item).join('/')}</div>
<div className='columns small-1 xxxlarge-2'>{res.syncWave || '-'}</div>
<div className='columns small-1 xxxlarge-1'>{res.syncWave || '-'}</div>
<div className='columns small-2 xxxlarge-2'>{res.namespace}</div>
<div className='columns small-2 xxxlarge-2'>{res.createdAt}</div>
<div className='columns small-2 xxxlarge-2'>

View File

@@ -4,7 +4,14 @@ import {Tooltip} from 'argo-ui';
// SinceSelector is a component that renders a dropdown menu of time ranges
export const SinceSecondsSelector = ({sinceSeconds, setSinceSeconds}: {sinceSeconds: number; setSinceSeconds: (value: number) => void}) => (
<Tooltip content='Show logs since a given time'>
<select className='argo-field' style={{marginRight: '1em'}} value={sinceSeconds} onChange={e => setSinceSeconds(parseInt(e.target.value, 10))}>
<select
className='argo-field'
style={{marginRight: '1em'}}
value={sinceSeconds}
onChange={e => {
const v = parseInt(e.target.value, 10);
setSinceSeconds(!isNaN(v) ? v : null);
}}>
<option value='60'>1m ago</option>
<option value='300'>5m ago</option>
<option value='600'>10m ago</option>

View File

@@ -72,7 +72,13 @@ export const PodTerminalViewer: React.FC<PodTerminalViewerProps> = ({
const onConnectionMessage = (e: MessageEvent) => {
const msg = JSON.parse(e.data);
connSubject.next(msg);
if (!msg?.Code) {
connSubject.next(msg);
} else {
// Do reconnect due to refresh token event
onConnectionClose();
setupConnection();
}
};
const onConnectionOpen = () => {

View File

@@ -105,7 +105,7 @@ export class VersionPanel extends React.Component<VersionPanelProps, {copyState:
}
private async onCopy(version: VersionMessage): Promise<void> {
const stringifiedVersion = JSON.stringify(version, undefined, 4);
const stringifiedVersion = JSON.stringify(version, undefined, 4) + '\n';
try {
await navigator.clipboard.writeText(stringifiedVersion);
this.setState({copyState: 'success'});

View File

@@ -68,7 +68,7 @@ func (db *db) ListClusters(ctx context.Context) (*appv1.ClusterList, error) {
inClusterEnabled := settings.InClusterEnabled
hasInClusterCredentials := false
for _, clusterSecret := range clusterSecrets {
cluster, err := secretToCluster(clusterSecret)
cluster, err := SecretToCluster(clusterSecret)
if err != nil {
log.Errorf("could not unmarshal cluster secret %s", clusterSecret.Name)
continue
@@ -77,8 +77,6 @@ func (db *db) ListClusters(ctx context.Context) (*appv1.ClusterList, error) {
if inClusterEnabled {
hasInClusterCredentials = true
clusterList.Items = append(clusterList.Items, *cluster)
} else {
log.Errorf("failed to add cluster %q to cluster list: in-cluster server address is disabled in Argo CD settings", cluster.Name)
}
} else {
clusterList.Items = append(clusterList.Items, *cluster)
@@ -122,7 +120,7 @@ func (db *db) CreateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Clust
return nil, err
}
cluster, err := secretToCluster(clusterSecret)
cluster, err := SecretToCluster(clusterSecret)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "could not unmarshal cluster secret %s", clusterSecret.Name)
}
@@ -150,7 +148,7 @@ func (db *db) WatchClusters(ctx context.Context,
common.LabelValueSecretTypeCluster,
func(secret *apiv1.Secret) {
cluster, err := secretToCluster(secret)
cluster, err := SecretToCluster(secret)
if err != nil {
log.Errorf("could not unmarshal cluster secret %s", secret.Name)
return
@@ -165,12 +163,12 @@ func (db *db) WatchClusters(ctx context.Context,
},
func(oldSecret *apiv1.Secret, newSecret *apiv1.Secret) {
oldCluster, err := secretToCluster(oldSecret)
oldCluster, err := SecretToCluster(oldSecret)
if err != nil {
log.Errorf("could not unmarshal cluster secret %s", oldSecret.Name)
return
}
newCluster, err := secretToCluster(newSecret)
newCluster, err := SecretToCluster(newSecret)
if err != nil {
log.Errorf("could not unmarshal cluster secret %s", newSecret.Name)
return
@@ -220,7 +218,7 @@ func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, erro
return nil, err
}
if len(res) > 0 {
return secretToCluster(res[0].(*apiv1.Secret))
return SecretToCluster(res[0].(*apiv1.Secret))
}
if server == appv1.KubernetesInternalAPIServerAddr {
return db.getLocalCluster(), nil
@@ -241,7 +239,7 @@ func (db *db) GetProjectClusters(ctx context.Context, project string) ([]*appv1.
}
var res []*appv1.Cluster
for i := range secrets {
cluster, err := secretToCluster(secrets[i].(*apiv1.Secret))
cluster, err := SecretToCluster(secrets[i].(*apiv1.Secret))
if err != nil {
return nil, fmt.Errorf("failed to convert secret to cluster: %w", err)
}
@@ -295,7 +293,7 @@ func (db *db) UpdateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Clust
if err != nil {
return nil, err
}
cluster, err := secretToCluster(clusterSecret)
cluster, err := SecretToCluster(clusterSecret)
if err != nil {
log.Errorf("could not unmarshal cluster secret %s", clusterSecret.Name)
return nil, err
@@ -362,8 +360,8 @@ func clusterToSecret(c *appv1.Cluster, secret *apiv1.Secret) error {
return nil
}
// secretToCluster converts a secret into a Cluster object
func secretToCluster(s *apiv1.Secret) (*appv1.Cluster, error) {
// SecretToCluster converts a secret into a Cluster object
func SecretToCluster(s *apiv1.Secret) (*appv1.Cluster, error) {
var config appv1.ClusterConfig
if len(s.Data["config"]) > 0 {
err := json.Unmarshal(s.Data["config"], &config)

View File

@@ -43,7 +43,7 @@ func Test_secretToCluster(t *testing.T) {
"config": []byte("{\"username\":\"foo\"}"),
},
}
cluster, err := secretToCluster(secret)
cluster, err := SecretToCluster(secret)
require.NoError(t, err)
assert.Equal(t, *cluster, v1alpha1.Cluster{
Name: "test",
@@ -89,7 +89,7 @@ func Test_secretToCluster_NoConfig(t *testing.T) {
"server": []byte("http://mycluster"),
},
}
cluster, err := secretToCluster(secret)
cluster, err := SecretToCluster(secret)
assert.NoError(t, err)
assert.Equal(t, *cluster, v1alpha1.Cluster{
Name: "test",
@@ -111,7 +111,7 @@ func Test_secretToCluster_InvalidConfig(t *testing.T) {
"config": []byte("{'tlsClientConfig':{'insecure':false}}"),
},
}
cluster, err := secretToCluster(secret)
cluster, err := SecretToCluster(secret)
require.Error(t, err)
assert.Nil(t, cluster)
}

View File

@@ -151,7 +151,7 @@ func replaceListSecrets(obj []interface{}, secretValues map[string]string) []int
// https://dexidp.io/docs/connectors/
func needsRedirectURI(connectorType string) bool {
switch connectorType {
case "oidc", "saml", "microsoft", "linkedin", "gitlab", "github", "bitbucket-cloud", "openshift":
case "oidc", "saml", "microsoft", "linkedin", "gitlab", "github", "bitbucket-cloud", "openshift", "gitea", "google", "oauth":
return true
}
return false

View File

@@ -270,7 +270,7 @@ func Test_GenerateDexConfig(t *testing.T) {
})
t.Run("Redirect config", func(t *testing.T) {
types := []string{"oidc", "saml", "microsoft", "linkedin", "gitlab", "github", "bitbucket-cloud"}
types := []string{"oidc", "saml", "microsoft", "linkedin", "gitlab", "github", "bitbucket-cloud", "openshift", "gitea", "google", "oauth"}
for _, c := range types {
assert.True(t, needsRedirectURI(c))
}

View File

@@ -264,7 +264,8 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload interface{}) {
for _, source := range app.Spec.GetSources() {
if sourceRevisionHasChanged(source, revision, touchedHead) && sourceUsesURL(source, webURL, repoRegexp) {
if appFilesHaveChanged(&app, changedFiles) {
_, err = argo.RefreshApp(appIf, app.ObjectMeta.Name, v1alpha1.RefreshTypeNormal)
namespacedAppInterface := a.appClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace)
_, err = argo.RefreshApp(namespacedAppInterface, app.ObjectMeta.Name, v1alpha1.RefreshTypeNormal)
if err != nil {
log.Warnf("Failed to refresh app '%s' for controller reprocessing: %v", app.ObjectMeta.Name, err)
continue

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"k8s.io/apimachinery/pkg/types"
"net/http"
"net/http/httptest"
"os"
@@ -149,10 +150,10 @@ func TestGitHubCommitEvent_MultiSource_Refresh(t *testing.T) {
func TestGitHubCommitEvent_AppsInOtherNamespaces(t *testing.T) {
hook := test.NewGlobal()
patchedApps := make([]string, 0, 3)
patchedApps := make([]types.NamespacedName, 0, 3)
reaction := func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patchAction := action.(kubetesting.PatchAction)
patchedApps = append(patchedApps, patchAction.GetName())
patchedApps = append(patchedApps, types.NamespacedName{Name: patchAction.GetName(), Namespace: patchAction.GetNamespace()})
return true, nil, nil
}
@@ -231,10 +232,10 @@ func TestGitHubCommitEvent_AppsInOtherNamespaces(t *testing.T) {
assert.Contains(t, logMessages, "Requested app 'app-to-refresh-in-globbed-namespace' refresh")
assert.NotContains(t, logMessages, "Requested app 'app-to-ignore' refresh")
assert.Contains(t, patchedApps, "app-to-refresh-in-default-namespace")
assert.Contains(t, patchedApps, "app-to-refresh-in-exact-match-namespace")
assert.Contains(t, patchedApps, "app-to-refresh-in-globbed-namespace")
assert.NotContains(t, patchedApps, "app-to-ignore")
assert.Contains(t, patchedApps, types.NamespacedName{Name: "app-to-refresh-in-default-namespace", Namespace: "argocd"})
assert.Contains(t, patchedApps, types.NamespacedName{Name: "app-to-refresh-in-exact-match-namespace", Namespace: "end-to-end-tests"})
assert.Contains(t, patchedApps, types.NamespacedName{Name: "app-to-refresh-in-globbed-namespace", Namespace: "app-team-two"})
assert.NotContains(t, patchedApps, types.NamespacedName{Name: "app-to-ignore", Namespace: "kube-system"})
assert.Len(t, patchedApps, 3)
hook.Reset()