Compare commits

...

7 Commits

Author SHA1 Message Date
Nikolaos Astyrakakis
ae10c0c6c3 fix(hook): Fixed hook code issues that caused stuck applications on "Deleting" state (Issues #18355 and #17191) (#26724)
Signed-off-by: Nikolaos Astyrakakis <nastyrakakis@gmail.com>
2026-04-09 05:19:38 -10:00
dependabot[bot]
9e80e058e7 chore(deps): bump library/golang from 1.26.1 to 1.26.2 in /test/container (#27248)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-04-09 17:52:21 +03:00
Eoin Shaughnessy
4220eddbf3 docs: fix typos (#27254)
Signed-off-by: Eoin Shaughnessy <eoinsh@gmail.com>
2026-04-09 16:21:45 +03:00
Alexander Matyushentsev
422ef230fa fix: Revert "fix: avoid calling UpdateRevisionForPaths unnecessary (#25151)" (#27241) 2026-04-09 06:39:05 -04:00
renovate[bot]
1fde0d075f chore(deps): update dependency formidable to v2.1.3 [security] (#27233)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Blake Pettersson <blake.pettersson@gmail.com>
2026-04-09 12:14:05 +02:00
dependabot[bot]
f86cd078fc chore(deps): bump github.com/coreos/go-oidc/v3 from 3.17.0 to 3.18.0 (#27247)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-04-09 09:52:19 +02:00
Alexander Matyushentsev
c3b498c2ae fix: cancel log stream goroutines on client disconnect (#27243)
Signed-off-by: Alexander Matyushentsev <alexander@akuity.io>
2026-04-09 12:53:34 +05:30
25 changed files with 369 additions and 89 deletions

View File

@@ -11,6 +11,7 @@ import (
"github.com/argoproj/argo-cd/gitops-engine/pkg/sync/hook"
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
log "github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/rest"
@@ -103,6 +104,7 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
revisions = append(revisions, src.TargetRevision)
}
// Fetch target objects from Git to know which hooks should exist
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(context.Background(), app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, true)
if err != nil {
return false, err
@@ -125,14 +127,14 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
if !isHookOfType(obj, hookType) {
continue
}
if runningHook := runningHooks[kube.GetResourceKey(obj)]; runningHook == nil {
if _, alreadyExists := runningHooks[kube.GetResourceKey(obj)]; !alreadyExists {
expectedHook[kube.GetResourceKey(obj)] = obj
}
}
// Create hooks that don't exist yet
createdCnt := 0
for _, obj := range expectedHook {
for key, obj := range expectedHook {
// Add app instance label so the hook can be tracked and cleaned up
labels := obj.GetLabels()
if labels == nil {
@@ -141,8 +143,13 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
labels[appLabelKey] = app.InstanceName(ctrl.namespace)
obj.SetLabels(labels)
logCtx.Infof("Creating %s hook resource: %s", hookType, key)
_, err = ctrl.kubectl.CreateResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), obj, metav1.CreateOptions{})
if err != nil {
if apierrors.IsAlreadyExists(err) {
logCtx.Warnf("Hook resource %s already exists, skipping", key)
continue
}
return false, err
}
createdCnt++
@@ -163,7 +170,8 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
progressingHooksCount := 0
var failedHooks []string
var failedHookObjects []*unstructured.Unstructured
for _, obj := range runningHooks {
for key, obj := range runningHooks {
hookHealth, err := health.GetResourceHealth(obj, healthOverrides)
if err != nil {
return false, err
@@ -180,12 +188,17 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
Status: health.HealthStatusHealthy,
}
}
switch hookHealth.Status {
case health.HealthStatusProgressing:
logCtx.Debugf("Hook %s is progressing", key)
progressingHooksCount++
case health.HealthStatusDegraded:
logCtx.Warnf("Hook %s is degraded: %s", key, hookHealth.Message)
failedHooks = append(failedHooks, fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()))
failedHookObjects = append(failedHookObjects, obj)
case health.HealthStatusHealthy:
logCtx.Debugf("Hook %s is healthy", key)
}
}
@@ -194,7 +207,7 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
logCtx.Infof("Deleting %d failed %s hook(s) to allow retry", len(failedHookObjects), hookType)
for _, obj := range failedHookObjects {
err = ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{})
if err != nil {
if err != nil && !apierrors.IsNotFound(err) {
logCtx.WithError(err).Warnf("Failed to delete failed hook %s/%s", obj.GetNamespace(), obj.GetName())
}
}
@@ -241,6 +254,10 @@ func (ctrl *ApplicationController) cleanupHooks(hookType HookType, liveObjs map[
hooks = append(hooks, obj)
}
if len(hooks) == 0 {
return true, nil
}
// Process hooks for deletion
for _, obj := range hooks {
deletePolicies := hook.DeletePolicies(obj)
@@ -267,7 +284,7 @@ func (ctrl *ApplicationController) cleanupHooks(hookType HookType, liveObjs map[
}
logCtx.Infof("Deleting %s hook %s/%s", hookType, obj.GetNamespace(), obj.GetName())
err = ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{})
if err != nil {
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
}

View File

@@ -3,8 +3,10 @@ package controller
import (
"testing"
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestIsHookOfType(t *testing.T) {
@@ -312,3 +314,174 @@ func TestMultiHookOfType(t *testing.T) {
})
}
}
func TestExecuteHooksAlreadyExistsLogic(t *testing.T) {
newObj := func(name string, annot map[string]string) *unstructured.Unstructured {
obj := &unstructured.Unstructured{}
obj.SetGroupVersionKind(schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"})
obj.SetName(name)
obj.SetNamespace("default")
obj.SetAnnotations(annot)
return obj
}
tests := []struct {
name string
hookType []HookType
targetAnnot map[string]string
liveAnnot map[string]string // nil -> object doesn't exist in cluster
expectCreated bool
}{
// PRE DELETE TESTS
{
name: "PreDelete (argocd): Not in cluster - should be created",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PreDelete (helm): Not in cluster - should be created",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PreDelete (argocd): Already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
expectCreated: false,
},
{
name: "PreDelete (argocd): Already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
liveAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
expectCreated: false,
},
{
name: "PreDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete", "argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
expectCreated: false,
},
{
name: "PreDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete", "argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
expectCreated: false,
},
// POST DELETE TESTS
{
name: "PostDelete (argocd): Not in cluster - should be created",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PostDelete (helm): Not in cluster - should be created",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PostDelete (argocd): Already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
expectCreated: false,
},
{
name: "PostDelete (helm): Already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete"},
expectCreated: false,
},
{
name: "PostDelete (helm+argocd): Already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
expectCreated: false,
},
{
name: "PostDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete"},
expectCreated: false,
},
{
name: "PostDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
expectCreated: false,
},
// MULTI HOOK TESTS - SKIP LOGIC
{
name: "Multi-hook (argocd): Target is (Pre,Post), Cluster has (Pre,Post) - should be skipped",
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete,PostDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete,PostDelete"},
expectCreated: false,
},
{
name: "Multi-hook (helm): Target is (Pre,Post), Cluster has (Pre,Post) - should be skipped",
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete,pre-delete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete,pre-delete"},
expectCreated: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
targetObj := newObj("my-hook", tt.targetAnnot)
targetKey := kube.GetResourceKey(targetObj)
liveObjs := make(map[kube.ResourceKey]*unstructured.Unstructured)
if tt.liveAnnot != nil {
liveObjs[targetKey] = newObj("my-hook", tt.liveAnnot)
}
runningHooks := map[kube.ResourceKey]*unstructured.Unstructured{}
for key, obj := range liveObjs {
for _, hookType := range tt.hookType {
if isHookOfType(obj, hookType) {
runningHooks[key] = obj
}
}
}
expectedHooksToCreate := map[kube.ResourceKey]*unstructured.Unstructured{}
targets := []*unstructured.Unstructured{targetObj}
for _, obj := range targets {
for _, hookType := range tt.hookType {
if !isHookOfType(obj, hookType) {
continue
}
}
objKey := kube.GetResourceKey(obj)
if _, alreadyExists := runningHooks[objKey]; !alreadyExists {
expectedHooksToCreate[objKey] = obj
}
}
if tt.expectCreated {
assert.NotEmpty(t, expectedHooksToCreate, "Expected hook to be marked for creation")
} else {
assert.Empty(t, expectedHooksToCreate, "Expected hook to be skipped (already exists)")
}
})
}
}

View File

@@ -41,18 +41,13 @@ import (
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
appstatecache "github.com/argoproj/argo-cd/v3/util/cache/appstate"
"github.com/argoproj/argo-cd/v3/util/db"
"github.com/argoproj/argo-cd/v3/util/env"
"github.com/argoproj/argo-cd/v3/util/gpg"
utilio "github.com/argoproj/argo-cd/v3/util/io"
"github.com/argoproj/argo-cd/v3/util/settings"
"github.com/argoproj/argo-cd/v3/util/stats"
)
var (
ErrCompareStateRepo = errors.New("failed to get repo objects")
processManifestGeneratePathsEnabled = env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_PROCESS_MANIFEST_GENERATE_PATHS", true)
)
var ErrCompareStateRepo = errors.New("failed to get repo objects")
type resourceInfoProviderStub struct{}
@@ -75,7 +70,7 @@ type managedResource struct {
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
SyncAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, state *v1alpha1.OperationState)
GetRepoObjs(ctx context.Context, app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
}
@@ -261,14 +256,7 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
appNamespace := app.Spec.Destination.Namespace
apiVersions := argo.APIResourcesToStrings(apiResources, true)
updateRevisions := processManifestGeneratePathsEnabled &&
// updating revisions result is not required if automated sync is not enabled
app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil &&
// using updating revisions gains performance only if manifest generation is required.
// just reading pre-generated manifests is comparable to updating revisions time-wise
app.Status.SourceType != v1alpha1.ApplicationSourceTypeDirectory
if updateRevisions && repo.Depth == 0 && syncedRevision != "" && !source.IsRef() && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" && (syncedRevision != revision || app.Spec.HasMultipleSources()) {
if repo.Depth == 0 && syncedRevision != "" && !source.IsRef() && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" && (syncedRevision != revision || app.Spec.HasMultipleSources()) {
// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
updateRevisionResult, err := repoClient.UpdateRevisionForPaths(ctx, &apiclient.UpdateRevisionForPathsRequest{
Repo: repo,
@@ -367,7 +355,7 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
}
// ResolveGitRevision will resolve the given revision to a full commit SHA. Only works for git.
func (m *appStateManager) ResolveGitRevision(repoURL, revision string) (string, error) {
func (m *appStateManager) ResolveGitRevision(repoURL string, revision string) (string, error) {
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
if err != nil {
return "", fmt.Errorf("failed to connect to repo server: %w", err)
@@ -568,7 +556,7 @@ func partitionTargetObjsForSync(targetObjs []*unstructured.Unstructured) (syncOb
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied source. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
ts := stats.NewTimingStats()
logCtx := log.WithFields(applog.GetAppLogFields(app))

View File

@@ -212,7 +212,7 @@ export IMAGE_TAG=1.5.0-myrc
> [!NOTE]
> The image will be built for `linux/amd64` platform by default. If you are running on Mac with Apple chip (ARM),
> you need to specify the correct buld platform by running:
> you need to specify the correct build platform by running:
> ```bash
> export TARGET_ARCH=linux/arm64
> ```

View File

@@ -41,7 +41,7 @@ spec:
- https://kubernetes.default.svc
- https://some-other-cluster
# Git generator generates parametes either from directory structure of files within a git repo
# Git generator generates parameters either from directory structure of files within a git repo
- git:
repoURL: https://github.com/argoproj/argo-cd.git
# OPTIONAL: use directory structure of git repo to generate parameters

View File

@@ -86,7 +86,7 @@ data:
# Optional set of OIDC claims to request on the ID token.
requestedIDTokenClaims: {"groups": {"essential": true}}
# Configuration to customize resource behavior (optional) can be configured via splitted sub keys.
# Configuration to customize resource behavior (optional) can be configured via split sub keys.
# Keys are in the form: resource.customizations.ignoreDifferences.<group_kind>, resource.customizations.health.<group_kind>
# resource.customizations.actions.<group_kind>, resource.customizations.knownTypeFields.<group_kind>
# resource.customizations.ignoreResourceUpdates.<group_kind>
@@ -115,7 +115,7 @@ data:
jsonPointers:
- /metadata/resourceVersion
# Configuration to define customizations ignoring differences during watched resource updates can be configured via splitted sub key.
# Configuration to define customizations ignoring differences during watched resource updates can be configured via split sub key.
resource.customizations.ignoreResourceUpdates.argoproj.io_Application: |
jsonPointers:
- /status

View File

@@ -125,7 +125,7 @@ data:
send: [on-deployed-template]
```
Now, with the setup above, a sync will send the list of images to your Slack application. For more information about integratin with Slack, see the [Slack integration guide](services/slack.md).
Now, with the setup above, a sync will send the list of images to your Slack application. For more information about integration with Slack, see the [Slack integration guide](services/slack.md).
### Deduplicating images

View File

@@ -182,7 +182,7 @@ on how your workloads connect to the repository server.
### Configuring TLS to argocd-repo-server
The componenets `argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
The components `argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
and `argocd-applicationset-controller` communicate with the `argocd-repo-server`
using a gRPC API over TLS. By default, `argocd-repo-server` generates a non-persistent,
self-signed certificate to use for its gRPC endpoint on startup. Because the
@@ -190,7 +190,7 @@ self-signed certificate to use for its gRPC endpoint on startup. Because the
is not available to outside consumers for verification. These components will use a
non-validating connection to the `argocd-repo-server` for this reason.
To change this behavior to be more secure by having these componenets validate the TLS certificate of the
To change this behavior to be more secure by having these components validate the TLS certificate of the
`argocd-repo-server` endpoint, the following steps need to be performed:
* Create a persistent TLS certificate to be used by `argocd-repo-server`, as

View File

@@ -272,7 +272,7 @@ curl -X POST -H "Authorization: Bearer $ARGOCD_TOKEN" -H "Content-Type: applicat
}' "http://$YOUR_ARGOCD_URL/api/v1/applications/$YOUR_APP_NAME/sync"
```
It is also possible to sync such an Applicaton using the UI, with `ApplyOutOfSyncOnly` option unchecked. However, currently, performing a sync without `ApplyOutOfSyncOnly` option is not possible using the CLI.
It is also possible to sync such an Application using the UI, with `ApplyOutOfSyncOnly` option unchecked. However, currently, performing a sync without `ApplyOutOfSyncOnly` option is not possible using the CLI.
##### Other users

View File

@@ -29,7 +29,7 @@ When Argo CD is upgraded manually using plain manifests or Kustomize overlays, i
Users upgrading Argo CD manually using `helm upgrade` are not impacted by this change, since Helm does not use client-side apply and does not result in creation of the `last-applied` annotation.
#### Users who previously upgraded to 3.3.0 or 3.3.1
In some cases, after upgrading to one of those versions and applying Server-Side Apply, the following error occured:
In some cases, after upgrading to one of those versions and applying Server-Side Apply, the following error occurred:
`one or more synchronization tasks completed unsuccessfully, reason: Failed to perform client-side apply migration: failed to perform client-side apply migration on manager kubectl-client-side-apply: error when patching "/dev/shm/2047509016": CustomResourceDefinition.apiextensions.k8s.io "applicationsets.argoproj.io" is invalid: metadata.annotations: Too long: may not be more than 262144 bytes`.
Users that have configured the sync option `ClientSideApplyMigration=false` as a temporary remediation for the above error, should remove it after upgrading to `3.3.2`. Disabling `ClientSideApplyMigration` imposes a risk to encounter conflicts between K8s field managers in the future.

View File

@@ -68,7 +68,7 @@ deploy:
## Configuring RBAC
When using ArgoCD global RBAC comfig map, you can define your `policy.csv` like so:
When using ArgoCD global RBAC config map, you can define your `policy.csv` like so:
```yaml
configs:

View File

@@ -142,7 +142,7 @@ We provide the entire application tree to accomplish two things:
Further, if an Extension needs richer information than that provided by the Resource Tree, it can request additional information about a resource from the Argo CD API server.
```typescript
interface Extention {
interface Extension {
ResourceTab: React.Component<{resource: any}>;
}
```

View File

@@ -135,7 +135,7 @@ one in charge of a given resource.
#### Include resource identifies in the `app.kubernetes.io/instance` annotation
The `app.kubernetes.io/instance` annotation might be accidently added or copied
The `app.kubernetes.io/instance` annotation might be accidentally added or copied
same as label. To prevent Argo CD confusion the annotation value should include
the identifier of the resource annotation was applied to. The resource identifier
includes the group, kind, namespace and name of the resource. It is proposed to use `;`

View File

@@ -42,7 +42,7 @@ A bounty is a special proposal created under `docs/proposals/feature-bounties`.
#### Claiming a Bounty
* Argo will pay out bounties once a pull request implementing the requested features/changes/fixes is merged.
* A bounty is limited to a single successful PR.
* Those interested in working on the bounty are encouraged to comment on the issue, and users may team up to split a bounty if they prefer but collaboration is not required and users should not shame eachother for their preferences to work alone or together.
* Those interested in working on the bounty are encouraged to comment on the issue, and users may team up to split a bounty if they prefer but collaboration is not required and users should not shame each other for their preferences to work alone or together.
* A comment of interest does not constitute a claim and will not be treated as such.
* The first pull request submitted that is ready for merge will be reviewed by maintainers. Maintainers will also consider any competing pull requests submitted within 24-hours. We expect this will be a very rare circumstance. If multiple, high-quality, merge ready pull requests are submitted, 3-5 Approvers for the sub-project will vote to decide the final pull request merged.

2
go.mod
View File

@@ -24,7 +24,7 @@ require (
github.com/cenkalti/backoff/v5 v5.0.3
github.com/cespare/xxhash/v2 v2.3.0
github.com/chainguard-dev/git-urls v1.0.2
github.com/coreos/go-oidc/v3 v3.17.0
github.com/coreos/go-oidc/v3 v3.18.0
github.com/cyphar/filepath-securejoin v0.6.1
github.com/dlclark/regexp2 v1.11.5
github.com/dustin/go-humanize v1.0.1

4
go.sum
View File

@@ -217,8 +217,8 @@ github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJ
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
github.com/coreos/go-oidc/v3 v3.18.0 h1:V9orjXynvu5wiC9SemFTWnG4F45v403aIcjWo0d41+A=
github.com/coreos/go-oidc/v3 v3.18.0/go.mod h1:DYCf24+ncYi+XkIH97GY1+dqoRlbaSI26KVTCI9SrY4=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=

View File

@@ -1908,17 +1908,23 @@ func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application.
// if k8s failed to start steaming logs (typically because Pod is not ready yet)
// then the error should be shown in the UI so that user know the reason
if err != nil {
logStream <- logEntry{line: err.Error()}
select {
case logStream <- logEntry{line: err.Error()}:
case <-ws.Context().Done():
}
} else {
parseLogsStream(podName, stream, logStream)
parseLogsStream(ws.Context(), podName, stream, logStream)
}
close(logStream)
}()
}
logStream := mergeLogStreams(streams, time.Millisecond*100)
logStream := mergeLogStreams(ws.Context(), streams, time.Millisecond*100)
sentCount := int64(0)
done := make(chan error)
// Buffered so the goroutine below can always send and exit, even if PodLogs has already
// returned due to client disconnect (ws.Context().Done). Without this, the goroutine
// would block on "done <- err" forever, leaking memory via bufio and mergeLogStreams buffers.
done := make(chan error, 1)
go func() {
for entry := range logStream {
if entry.err != nil {

View File

@@ -2,6 +2,7 @@ package application
import (
"bufio"
"context"
"errors"
"io"
"strings"
@@ -17,8 +18,9 @@ type logEntry struct {
err error
}
// parseLogsStream converts given ReadCloser into channel that emits log entries
func parseLogsStream(podName string, stream io.ReadCloser, ch chan logEntry) {
// parseLogsStream converts given ReadCloser into channel that emits log entries.
// It stops early if ctx is cancelled, avoiding goroutine leaks when the caller disconnects.
func parseLogsStream(ctx context.Context, podName string, stream io.ReadCloser, ch chan logEntry) {
bufReader := bufio.NewReader(stream)
eof := false
for !eof {
@@ -30,7 +32,10 @@ func parseLogsStream(podName string, stream io.ReadCloser, ch chan logEntry) {
break
}
} else if err != nil && !errors.Is(err, io.EOF) {
ch <- logEntry{err: err}
select {
case ch <- logEntry{err: err}:
case <-ctx.Done():
}
break
}
@@ -39,13 +44,20 @@ func parseLogsStream(podName string, stream io.ReadCloser, ch chan logEntry) {
timeStampStr := parts[0]
logTime, err := time.Parse(time.RFC3339Nano, timeStampStr)
if err != nil {
ch <- logEntry{err: err}
select {
case ch <- logEntry{err: err}:
case <-ctx.Done():
}
break
}
lines := strings.Join(parts[1:], " ")
for line := range strings.SplitSeq(lines, "\r") {
ch <- logEntry{line: line, timeStamp: logTime, podName: podName}
select {
case ch <- logEntry{line: line, timeStamp: logTime, podName: podName}:
case <-ctx.Done():
return
}
}
}
}
@@ -53,7 +65,8 @@ func parseLogsStream(podName string, stream io.ReadCloser, ch chan logEntry) {
// mergeLogStreams merge two stream of logs and ensures that merged logs are sorted by timestamp.
// The implementation uses merge sort: method reads next log entry from each stream if one of streams is empty
// it waits for no longer than specified duration and then merges available entries.
func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) chan logEntry {
// ctx cancellation causes all internal goroutines to exit promptly, preventing goroutine and memory leaks.
func mergeLogStreams(ctx context.Context, streams []chan logEntry, bufferingDuration time.Duration) chan logEntry {
merged := make(chan logEntry)
// buffer of received log entries for each stream
@@ -70,7 +83,17 @@ func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) c
lock.Lock()
entriesPerStream[index] = append(entriesPerStream[index], next)
lock.Unlock()
process <- struct{}{}
select {
case process <- struct{}{}:
case <-ctx.Done():
// drain remaining entries so parseLogsStream goroutine can exit
for range streams[index] {
}
if atomic.AddInt32(&streamsCount, -1) == 0 {
close(process)
}
return
}
}
// stop processing after all streams got closed
if atomic.AddInt32(&streamsCount, -1) == 0 {
@@ -111,7 +134,11 @@ func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) c
}
lock.Unlock()
for i := range entries {
merged <- entries[i]
select {
case merged <- entries[i]:
case <-ctx.Done():
return false
}
}
return len(entries) > 0
}
@@ -120,11 +147,11 @@ func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) c
var sentAt time.Time
ticker := time.NewTicker(bufferingDuration)
done := make(chan struct{})
tickerDone := make(chan struct{})
go func() {
for {
select {
case <-done:
case <-tickerDone:
return
case <-ticker.C:
sentAtLock.Lock()
@@ -133,18 +160,30 @@ func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) c
_ = send(true)
sentAt = time.Now()
}
sentAtLock.Unlock()
}
}
}()
go func() {
for range process {
if send(false) {
sentAtLock.Lock()
sentAt = time.Now()
sentAtLock.Unlock()
loop:
for {
select {
case _, ok := <-process:
if !ok {
break loop
}
if send(false) {
sentAtLock.Lock()
sentAt = time.Now()
sentAtLock.Unlock()
}
case <-ctx.Done():
// client disconnected: stop immediately without flushing
ticker.Stop()
tickerDone <- struct{}{}
close(merged)
return
}
}
@@ -152,10 +191,10 @@ func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) c
ticker.Stop()
// ticker.Stop() does not close the channel, and it does not wait for the channel to be drained. So we need to
// explicitly prevent the gorountine from leaking by closing the channel. We also need to prevent the goroutine
// explicitly prevent the goroutine from leaking by closing the channel. We also need to prevent the goroutine
// from calling `send` again, because `send` pushes to the `merged` channel which we're about to close.
// This describes the approach nicely: https://stackoverflow.com/questions/17797754/ticker-stop-behaviour-in-golang
done <- struct{}{}
tickerDone <- struct{}{}
close(merged)
}()
return merged

View File

@@ -1,6 +1,7 @@
package application
import (
"context"
"io"
"strings"
"testing"
@@ -16,7 +17,7 @@ func TestParseLogsStream_Successful(t *testing.T) {
res := make(chan logEntry)
go func() {
parseLogsStream("test", r, res)
parseLogsStream(context.Background(), "test", r, res)
close(res)
}()
@@ -39,7 +40,7 @@ func TestParseLogsStream_ParsingError(t *testing.T) {
res := make(chan logEntry)
go func() {
parseLogsStream("test", r, res)
parseLogsStream(context.Background(), "test", r, res)
close(res)
}()
@@ -55,19 +56,19 @@ func TestParseLogsStream_ParsingError(t *testing.T) {
func TestMergeLogStreams(t *testing.T) {
first := make(chan logEntry)
go func() {
parseLogsStream("first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1
parseLogsStream(context.Background(), "first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1
2021-02-09T00:00:03Z 3`)), first)
close(first)
}()
second := make(chan logEntry)
go func() {
parseLogsStream("second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2
parseLogsStream(context.Background(), "second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2
2021-02-09T00:00:04Z 4`)), second)
close(second)
}()
merged := mergeLogStreams([]chan logEntry{first, second}, time.Second)
merged := mergeLogStreams(context.Background(), []chan logEntry{first, second}, time.Second)
var lines []string
for entry := range merged {
lines = append(lines, entry.line)
@@ -83,18 +84,18 @@ func TestMergeLogStreams_RaceCondition(_ *testing.T) {
second := make(chan logEntry)
go func() {
parseLogsStream("first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1`)), first)
parseLogsStream(context.Background(), "first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1`)), first)
time.Sleep(time.Duration(i%3) * time.Millisecond)
close(first)
}()
go func() {
parseLogsStream("second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2`)), second)
parseLogsStream(context.Background(), "second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2`)), second)
time.Sleep(time.Duration((i+1)%3) * time.Millisecond)
close(second)
}()
merged := mergeLogStreams([]chan logEntry{first, second}, 1*time.Millisecond)
merged := mergeLogStreams(context.Background(), []chan logEntry{first, second}, 1*time.Millisecond)
// Drain the channel
for range merged {
@@ -105,3 +106,39 @@ func TestMergeLogStreams_RaceCondition(_ *testing.T) {
// and channel closer.
}
}
// TestMergeLogStreams_ContextCancellation verifies that cancelling the context causes mergeLogStreams
// to close the merged channel promptly, allowing all internal goroutines to exit without leaking.
func TestMergeLogStreams_ContextCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
// unbuffered pipe: write end will block until someone reads
pr, pw := io.Pipe()
ch := make(chan logEntry)
go func() {
parseLogsStream(ctx, "test", pr, ch)
close(ch)
}()
merged := mergeLogStreams(ctx, []chan logEntry{ch}, time.Second)
// cancel before the pipe produces any data
cancel()
_ = pw.Close()
// merged must be closed (context cancelled), not block forever
done := make(chan struct{})
go func() {
for range merged {
}
close(done)
}()
select {
case <-done:
// merged closed promptly — no leak
case <-time.After(5 * time.Second):
t.Fatal("mergeLogStreams did not close merged channel after context cancellation")
}
}

View File

@@ -8,7 +8,7 @@ RUN ln -s /usr/lib/$(uname -m)-linux-gnu /usr/lib/linux-gnu
# Please make sure to also check the contained yarn version and update the references below when upgrading this image's version
FROM docker.io/library/node:22.9.0@sha256:8398ea18b8b72817c84af283f72daed9629af2958c4f618fe6db4f453c5c9328 AS node
FROM docker.io/library/golang:1.26.1@sha256:cd78d88e00afadbedd272f977d375a6247455f3a4b1178f8ae8bbcb201743a8a AS golang
FROM docker.io/library/golang:1.26.2@sha256:2a2b4b5791cea8ae09caecba7bad0bd9631def96e5fe362e4a5e67009fe4ae61 AS golang
FROM docker.io/library/registry:3.1@sha256:afcd13fd045b8859ac4f60fef26fc2d2f9b7b9d9e604c3c4f7c2fb1b94f95a64 AS registry

View File

@@ -33,6 +33,6 @@ export default [
files: ['./src/**/*.{ts,tsx}']
},
{
ignores: ['dist', 'assets', '**/*.config.js', '__mocks__', 'coverage', '**/*.test.{ts,tsx}']
ignores: ['dist', 'assets', '**/*.config.js', 'jest.setup.js', '__mocks__', 'coverage', '**/*.test.{ts,tsx}']
}
];

View File

@@ -1,6 +1,7 @@
module.exports = {
preset: 'ts-jest',
testEnvironment: 'jsdom',
setupFiles: ['./jest.setup.js'],
reporters: ['default', 'jest-junit'],
collectCoverage: true,
transformIgnorePatterns: ['node_modules/(?!(argo-ui|.*\\.pnpm.*argo-ui.*)/)'],

9
ui/jest.setup.js Normal file
View File

@@ -0,0 +1,9 @@
// TODO: This needs to be polyfilled until jest-environment-jsdom decides to pull in a version of jsdom that's >=27.4.0
const {TextEncoder, TextDecoder} = require('util');
if (typeof globalThis.TextEncoder === 'undefined') {
globalThis.TextEncoder = TextEncoder;
}
if (typeof globalThis.TextDecoder === 'undefined') {
globalThis.TextDecoder = TextDecoder;
}

View File

@@ -64,7 +64,7 @@
"@types/react-dom": "^16.8.2",
"normalize-url": "4.5.1",
"rxjs": "6.6.7",
"formidable": "2.1.2"
"formidable": "2.1.3"
}
},
"devDependencies": {

46
ui/pnpm-lock.yaml generated
View File

@@ -9,7 +9,7 @@ overrides:
'@types/react-dom': ^16.8.2
normalize-url: 4.5.1
rxjs: 6.6.7
formidable: 2.1.2
formidable: 2.1.3
importers:
@@ -29,7 +29,7 @@ importers:
version: 6.1.6(react-dom@16.14.0(react@16.14.0))(react@16.14.0)
argo-ui:
specifier: git+https://github.com/argoproj/argo-ui.git
version: https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12)
version: https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12)
buffer:
specifier: ^6.0.3
version: 6.0.3
@@ -1455,6 +1455,10 @@ packages:
'@leichtgewicht/ip-codec@2.0.5':
resolution: {integrity: sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==}
'@noble/hashes@1.8.0':
resolution: {integrity: sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==}
engines: {node: ^14.21.3 || >=16}
'@nodelib/fs.scandir@2.1.5':
resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
engines: {node: '>= 8'}
@@ -1475,6 +1479,9 @@ packages:
engines: {node: '>=10'}
deprecated: This functionality has been moved to @npmcli/fs
'@paralleldrive/cuid2@2.3.1':
resolution: {integrity: sha512-XO7cAxhnTZl0Yggq6jOgjiOHhbgcO4NqFqwSmQpjK3b6TEE6Uj/jfSk6wzYyemh3+I0sHirKSetjQwn5cZktFw==}
'@parcel/watcher-android-arm64@2.5.1':
resolution: {integrity: sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==}
engines: {node: '>= 10.0.0'}
@@ -2078,8 +2085,8 @@ packages:
arg@4.1.3:
resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==}
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7:
resolution: {tarball: https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7}
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba:
resolution: {tarball: https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba}
version: 1.0.0
peerDependencies:
'@types/react': ^16.9.3
@@ -2596,6 +2603,7 @@ packages:
deep-diff@0.3.8:
resolution: {integrity: sha512-yVn6RZmHiGnxRKR9sJb3iVV2XTF1Ghh2DiWRZ3dMnGc43yUdWWF/kX6lQyk3+P84iprfWKU/8zFTrlkvtFm1ug==}
deprecated: Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.
deep-equal@1.1.2:
resolution: {integrity: sha512-5tdhKF6DbU7iIzrIOa1AOUt39ZRm13cmL1cGEh//aqR8x9+tNfbywRf0n5FD/18OKMdo7DNEtrX2t22ZAkI+eg==}
@@ -3073,9 +3081,9 @@ packages:
resolution: {integrity: sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==}
engines: {node: '>= 6'}
formidable@2.1.2:
resolution: {integrity: sha512-CM3GuJ57US06mlpQ47YcunuUZ9jpm8Vx+P2CGt2j7HpgkKZO/DJYQ0Bobim8G6PFQmK5lOqOOdUXboU+h73A4g==}
deprecated: 'ACTION REQUIRED: SWITCH TO v3 - v1 and v2 are VULNERABLE! v1 is DEPRECATED FOR OVER 2 YEARS! Use formidable@latest or try formidable-mini for fresh projects'
formidable@2.1.3:
resolution: {integrity: sha512-vDI5JjeALeGXpyL8v71ZG2VgHY5zD6qg1IvypU7aJCYvREZyhawrYJxMdsWO+m5DIGLiMiDH71yEN8RO4wQAMQ==}
deprecated: 'ATTENTION: please upgrade to v3! The v1 and v2 versions are pretty old and deprecated'
forwarded@0.2.0:
resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==}
@@ -3168,7 +3176,7 @@ packages:
glob@7.2.3:
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
deprecated: Glob versions prior to v9 are no longer supported
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
global@4.4.0:
resolution: {integrity: sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==}
@@ -3243,10 +3251,6 @@ packages:
resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==}
hasBin: true
hexoid@1.0.0:
resolution: {integrity: sha512-QFLV0taWQOZtvIRIAdBChesmogZrtuXvVWsFHZTk2SU+anspqZ2vMnoLg7IE1+Uk16N19APic1BuF8bC8c2m5g==}
engines: {node: '>=8'}
history@4.10.1:
resolution: {integrity: sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==}
@@ -5132,6 +5136,7 @@ packages:
tar@6.2.1:
resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==}
engines: {node: '>=10'}
deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
teeny-request@7.1.1:
resolution: {integrity: sha512-iwY6rkW5DDGq8hE2YgNQlKbptYpY5Nn2xecjQiNjOXWbKzPGUfmeUBCSQbbr306d7Z7U2N0TPl+/SwYRfua1Dg==}
@@ -5526,6 +5531,7 @@ packages:
whatwg-encoding@2.0.0:
resolution: {integrity: sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==}
engines: {node: '>=12'}
deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation
whatwg-mimetype@3.0.0:
resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==}
@@ -6889,6 +6895,8 @@ snapshots:
'@leichtgewicht/ip-codec@2.0.5': {}
'@noble/hashes@1.8.0': {}
'@nodelib/fs.scandir@2.1.5':
dependencies:
'@nodelib/fs.stat': 2.0.5
@@ -6911,6 +6919,10 @@ snapshots:
mkdirp: 1.0.4
rimraf: 3.0.2
'@paralleldrive/cuid2@2.3.1':
dependencies:
'@noble/hashes': 1.8.0
'@parcel/watcher-android-arm64@2.5.1':
optional: true
@@ -7581,7 +7593,7 @@ snapshots:
arg@4.1.3: {}
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12):
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12):
dependencies:
'@fortawesome/fontawesome-free': 6.7.2
'@tippy.js/react': 3.1.1(react-dom@16.14.0(react@16.14.0))(react@16.14.0)
@@ -8846,10 +8858,10 @@ snapshots:
hasown: 2.0.2
mime-types: 2.1.35
formidable@2.1.2:
formidable@2.1.3:
dependencies:
'@paralleldrive/cuid2': 2.3.1
dezalgo: 1.0.4
hexoid: 1.0.0
once: 1.4.0
qs: 6.15.0
@@ -9013,8 +9025,6 @@ snapshots:
he@1.2.0: {}
hexoid@1.0.0: {}
history@4.10.1:
dependencies:
'@babel/runtime': 7.27.6
@@ -11224,7 +11234,7 @@ snapshots:
debug: 4.4.1
fast-safe-stringify: 2.1.1
form-data: 4.0.4
formidable: 2.1.2
formidable: 2.1.3
methods: 1.1.2
mime: 2.6.0
qs: 6.15.0