Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
568d1b3dab chore(deps): bump pnpm/action-setup from 4.1.0 to 5.0.0
Bumps [pnpm/action-setup](https://github.com/pnpm/action-setup) from 4.1.0 to 5.0.0.
- [Release notes](https://github.com/pnpm/action-setup/releases)
- [Commits](a7487c7e89...fc06bc1257)

---
updated-dependencies:
- dependency-name: pnpm/action-setup
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-04-09 03:13:29 +00:00
49 changed files with 164 additions and 768 deletions

View File

@@ -18,7 +18,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -36,7 +36,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -24,7 +24,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -35,7 +35,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@@ -63,7 +63,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Checkout code
@@ -88,7 +88,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Checkout code
@@ -124,7 +124,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Checkout code
@@ -153,7 +153,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Create checkout directory
@@ -226,7 +226,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Create checkout directory
@@ -295,7 +295,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Checkout code
@@ -357,13 +357,13 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v5.0.0
with:
package_json_file: ui/package.json
- name: Setup NodeJS
@@ -415,7 +415,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Checkout code
@@ -496,7 +496,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Free Disk Space (Ubuntu)
@@ -521,7 +521,7 @@ jobs:
# renovate: datasource=node-version packageName=node versioning=node
node-version: '22.9.0'
- name: Install pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v5.0.0
with:
package_json_file: ui/package.json
- name: GH actions workaround - Kill XSP4 process
@@ -632,7 +632,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- run: |

View File

@@ -45,7 +45,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -61,7 +61,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -37,7 +37,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -34,7 +34,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -27,7 +27,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -51,7 +51,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
- name: Checkout code
@@ -226,7 +226,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Install pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v5.0.0
with:
package_json_file: ui/package.json

View File

@@ -16,23 +16,11 @@ jobs:
runs-on: ubuntu-24.04
if: github.repository == 'argoproj/argo-cd'
steps:
- name: Harden the runner (Block unknown outbound calls)
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: block
disable-sudo-and-containers: "false" # renovatebot runs in `docker run`
allowed-endpoints: >
github.com:443
api.github.com:443
raw.githubusercontent.com:443
release-assets.githubusercontent.com:443
ghcr.io:443
pkg-containers.githubusercontent.com:443
hub.docker.com:443
proxy.golang.org:443
nodejs.org:443
pypi.org:443
egress-policy: audit
- name: Get token
id: get_token

View File

@@ -31,7 +31,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit

View File

@@ -16,14 +16,11 @@ jobs:
stale:
runs-on: ubuntu-24.04
steps:
- name: Harden the runner (Block unknown outbound calls)
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: block
disable-sudo-and-containers: "true"
allowed-endpoints: >
api.github.com:443
egress-policy: audit
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
with:

View File

@@ -22,7 +22,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
if: ${{ vars.disable_harden_runner != 'true' }}
uses: step-security/harden-runner@f808768d1510423e83855289c910610ca9b43176 # v2.17.0
uses: step-security/harden-runner@fe104658747b27e96e4f7e80cd0a94068e53901d # v2.16.1
with:
egress-policy: audit
agent-enabled: "false"

View File

@@ -4,7 +4,6 @@ WORKDIR /app/ui
COPY ui /app/ui
RUN npm install -g corepack@0.34.6 && corepack enable && pnpm install --frozen-lockfile
ENTRYPOINT ["pnpm", "start"]
RUN npm install -g corepack@0.34.6 && corepack enable && pnpm install
ENTRYPOINT ["pnpm", "start"]

View File

@@ -662,17 +662,8 @@ install-go-tools-local:
dep-ui: test-tools-image
$(call run-in-test-client,make dep-ui-local)
.PHONY: dep-ui-local
dep-ui-local:
cd ui && pnpm install --frozen-lockfile
.PHONY: run-pnpm
run-pnpm: test-tools-image
$(call run-in-test-client,make 'PNPM_COMMAND=$(PNPM_COMMAND)' run-pnpm-local)
.PHONY: run-pnpm-local
run-pnpm-local:
cd ui && pnpm $(PNPM_COMMAND)
cd ui && pnpm install
start-test-k8s:
go run ./hack/k8s

View File

@@ -275,7 +275,7 @@ docker_build(
only=['ui'],
live_update=[
sync('ui', '/app/ui'),
run('sh -c "cd /app/ui && pnpm install --frozen-lockfile"', trigger=['/app/ui/package.json', '/app/ui/pnpm-lock.yaml']),
run('sh -c "cd /app/ui && pnpm install"', trigger=['/app/ui/package.json', '/app/ui/pnpm-lock.yaml']),
],
)

View File

@@ -11,7 +11,6 @@ import (
"github.com/argoproj/argo-cd/gitops-engine/pkg/sync/hook"
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
log "github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/rest"
@@ -104,7 +103,6 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
revisions = append(revisions, src.TargetRevision)
}
// Fetch target objects from Git to know which hooks should exist
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(context.Background(), app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, true)
if err != nil {
return false, err
@@ -127,14 +125,14 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
if !isHookOfType(obj, hookType) {
continue
}
if _, alreadyExists := runningHooks[kube.GetResourceKey(obj)]; !alreadyExists {
if runningHook := runningHooks[kube.GetResourceKey(obj)]; runningHook == nil {
expectedHook[kube.GetResourceKey(obj)] = obj
}
}
// Create hooks that don't exist yet
createdCnt := 0
for key, obj := range expectedHook {
for _, obj := range expectedHook {
// Add app instance label so the hook can be tracked and cleaned up
labels := obj.GetLabels()
if labels == nil {
@@ -143,13 +141,8 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
labels[appLabelKey] = app.InstanceName(ctrl.namespace)
obj.SetLabels(labels)
logCtx.Infof("Creating %s hook resource: %s", hookType, key)
_, err = ctrl.kubectl.CreateResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), obj, metav1.CreateOptions{})
if err != nil {
if apierrors.IsAlreadyExists(err) {
logCtx.Warnf("Hook resource %s already exists, skipping", key)
continue
}
return false, err
}
createdCnt++
@@ -170,8 +163,7 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
progressingHooksCount := 0
var failedHooks []string
var failedHookObjects []*unstructured.Unstructured
for key, obj := range runningHooks {
for _, obj := range runningHooks {
hookHealth, err := health.GetResourceHealth(obj, healthOverrides)
if err != nil {
return false, err
@@ -188,17 +180,12 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
Status: health.HealthStatusHealthy,
}
}
switch hookHealth.Status {
case health.HealthStatusProgressing:
logCtx.Debugf("Hook %s is progressing", key)
progressingHooksCount++
case health.HealthStatusDegraded:
logCtx.Warnf("Hook %s is degraded: %s", key, hookHealth.Message)
failedHooks = append(failedHooks, fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()))
failedHookObjects = append(failedHookObjects, obj)
case health.HealthStatusHealthy:
logCtx.Debugf("Hook %s is healthy", key)
}
}
@@ -207,7 +194,7 @@ func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Ap
logCtx.Infof("Deleting %d failed %s hook(s) to allow retry", len(failedHookObjects), hookType)
for _, obj := range failedHookObjects {
err = ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
if err != nil {
logCtx.WithError(err).Warnf("Failed to delete failed hook %s/%s", obj.GetNamespace(), obj.GetName())
}
}
@@ -254,10 +241,6 @@ func (ctrl *ApplicationController) cleanupHooks(hookType HookType, liveObjs map[
hooks = append(hooks, obj)
}
if len(hooks) == 0 {
return true, nil
}
// Process hooks for deletion
for _, obj := range hooks {
deletePolicies := hook.DeletePolicies(obj)
@@ -284,7 +267,7 @@ func (ctrl *ApplicationController) cleanupHooks(hookType HookType, liveObjs map[
}
logCtx.Infof("Deleting %s hook %s/%s", hookType, obj.GetNamespace(), obj.GetName())
err = ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
if err != nil {
return false, err
}
}

View File

@@ -3,10 +3,8 @@ package controller
import (
"testing"
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestIsHookOfType(t *testing.T) {
@@ -314,174 +312,3 @@ func TestMultiHookOfType(t *testing.T) {
})
}
}
func TestExecuteHooksAlreadyExistsLogic(t *testing.T) {
newObj := func(name string, annot map[string]string) *unstructured.Unstructured {
obj := &unstructured.Unstructured{}
obj.SetGroupVersionKind(schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"})
obj.SetName(name)
obj.SetNamespace("default")
obj.SetAnnotations(annot)
return obj
}
tests := []struct {
name string
hookType []HookType
targetAnnot map[string]string
liveAnnot map[string]string // nil -> object doesn't exist in cluster
expectCreated bool
}{
// PRE DELETE TESTS
{
name: "PreDelete (argocd): Not in cluster - should be created",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PreDelete (helm): Not in cluster - should be created",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PreDelete (argocd): Already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
expectCreated: false,
},
{
name: "PreDelete (argocd): Already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
liveAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
expectCreated: false,
},
{
name: "PreDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete", "argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: map[string]string{"helm.sh/hook": "pre-delete"},
expectCreated: false,
},
{
name: "PreDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PreDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "pre-delete", "argocd.argoproj.io/hook": "PreDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
expectCreated: false,
},
// POST DELETE TESTS
{
name: "PostDelete (argocd): Not in cluster - should be created",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PostDelete (helm): Not in cluster - should be created",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete"},
liveAnnot: nil,
expectCreated: true,
},
{
name: "PostDelete (argocd): Already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
expectCreated: false,
},
{
name: "PostDelete (helm): Already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete"},
expectCreated: false,
},
{
name: "PostDelete (helm+argocd): Already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
expectCreated: false,
},
{
name: "PostDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete"},
expectCreated: false,
},
{
name: "PostDelete (helm+argocd): One of two already exists - should be skipped",
hookType: []HookType{PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete", "argocd.argoproj.io/hook": "PostDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
expectCreated: false,
},
// MULTI HOOK TESTS - SKIP LOGIC
{
name: "Multi-hook (argocd): Target is (Pre,Post), Cluster has (Pre,Post) - should be skipped",
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
targetAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete,PostDelete"},
liveAnnot: map[string]string{"argocd.argoproj.io/hook": "PreDelete,PostDelete"},
expectCreated: false,
},
{
name: "Multi-hook (helm): Target is (Pre,Post), Cluster has (Pre,Post) - should be skipped",
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
targetAnnot: map[string]string{"helm.sh/hook": "post-delete,pre-delete"},
liveAnnot: map[string]string{"helm.sh/hook": "post-delete,pre-delete"},
expectCreated: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
targetObj := newObj("my-hook", tt.targetAnnot)
targetKey := kube.GetResourceKey(targetObj)
liveObjs := make(map[kube.ResourceKey]*unstructured.Unstructured)
if tt.liveAnnot != nil {
liveObjs[targetKey] = newObj("my-hook", tt.liveAnnot)
}
runningHooks := map[kube.ResourceKey]*unstructured.Unstructured{}
for key, obj := range liveObjs {
for _, hookType := range tt.hookType {
if isHookOfType(obj, hookType) {
runningHooks[key] = obj
}
}
}
expectedHooksToCreate := map[kube.ResourceKey]*unstructured.Unstructured{}
targets := []*unstructured.Unstructured{targetObj}
for _, obj := range targets {
for _, hookType := range tt.hookType {
if !isHookOfType(obj, hookType) {
continue
}
}
objKey := kube.GetResourceKey(obj)
if _, alreadyExists := runningHooks[objKey]; !alreadyExists {
expectedHooksToCreate[objKey] = obj
}
}
if tt.expectCreated {
assert.NotEmpty(t, expectedHooksToCreate, "Expected hook to be marked for creation")
} else {
assert.Empty(t, expectedHooksToCreate, "Expected hook to be skipped (already exists)")
}
})
}
}

View File

@@ -41,13 +41,18 @@ import (
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
appstatecache "github.com/argoproj/argo-cd/v3/util/cache/appstate"
"github.com/argoproj/argo-cd/v3/util/db"
"github.com/argoproj/argo-cd/v3/util/env"
"github.com/argoproj/argo-cd/v3/util/gpg"
utilio "github.com/argoproj/argo-cd/v3/util/io"
"github.com/argoproj/argo-cd/v3/util/settings"
"github.com/argoproj/argo-cd/v3/util/stats"
)
var ErrCompareStateRepo = errors.New("failed to get repo objects")
var (
ErrCompareStateRepo = errors.New("failed to get repo objects")
processManifestGeneratePathsEnabled = env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_PROCESS_MANIFEST_GENERATE_PATHS", true)
)
type resourceInfoProviderStub struct{}
@@ -70,7 +75,7 @@ type managedResource struct {
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
SyncAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, state *v1alpha1.OperationState)
GetRepoObjs(ctx context.Context, app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
}
@@ -256,7 +261,14 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
appNamespace := app.Spec.Destination.Namespace
apiVersions := argo.APIResourcesToStrings(apiResources, true)
if repo.Depth == 0 && syncedRevision != "" && !source.IsRef() && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" && (syncedRevision != revision || app.Spec.HasMultipleSources()) {
updateRevisions := processManifestGeneratePathsEnabled &&
// updating revisions result is not required if automated sync is not enabled
app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil &&
// using updating revisions gains performance only if manifest generation is required.
// just reading pre-generated manifests is comparable to updating revisions time-wise
app.Status.SourceType != v1alpha1.ApplicationSourceTypeDirectory
if updateRevisions && repo.Depth == 0 && syncedRevision != "" && !source.IsRef() && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" && (syncedRevision != revision || app.Spec.HasMultipleSources()) {
// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
updateRevisionResult, err := repoClient.UpdateRevisionForPaths(ctx, &apiclient.UpdateRevisionForPathsRequest{
Repo: repo,
@@ -355,7 +367,7 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
}
// ResolveGitRevision will resolve the given revision to a full commit SHA. Only works for git.
func (m *appStateManager) ResolveGitRevision(repoURL string, revision string) (string, error) {
func (m *appStateManager) ResolveGitRevision(repoURL, revision string) (string, error) {
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
if err != nil {
return "", fmt.Errorf("failed to connect to repo server: %w", err)
@@ -556,7 +568,7 @@ func partitionTargetObjsForSync(targetObjs []*unstructured.Unstructured) (syncOb
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied source. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
ts := stats.NewTimingStats()
logCtx := log.WithFields(applog.GetAppLogFields(app))

View File

@@ -23,37 +23,12 @@ All following commands in this guide assume the namespace is already set.
kubectl config set-context --current --namespace=argocd
```
### Pull in all UI build dependencies
### Pull in all build dependencies
As build dependencies change over time, you have to synchronize your development environment with the current specification. In order to pull in all required UI dependencies (NPM packages), issue:
As build dependencies change over time, you have to synchronize your development environment with the current specification. In order to pull in all required dependencies, issue:
* `make dep-ui` or `make dep-ui-local`
These commands run `pnpm install --frozen-lockfile` command, which only brings package versions that are defined in the `pnpm-lock.yaml` file without trying to resolve and download new package versions.
### Updating UI build dependencies
If you need to add new UI dependencies or update existing ones you need
to run a `pnpm` command in the ./ui directory to resolve and download new packages.
You can run it in the docker container using the `make run-pnpm` make target.
For example, to add new dependency `newpackage` you may run command like
```shell
make run-pnpm PNPM_COMMAND="add newpackage --ignore-scripts"
```
To upgrade an existing package:
```shell
make run-pnpm PNPM_COMMAND="update existingpackage@1.0.2 --ignore-scripts"
```
Please consider using best security practices when adding or upgrading
NPM dependencies, such as this
[guide](https://github.com/lirantal/npm-security-best-practices/blob/main/README.md).
### Generate API glue code and other assets
Argo CD relies on Google's [Protocol Buffers](https://developers.google.com/protocol-buffers) for its API, and this makes heavy use of auto-generated glue code and stubs. Whenever you touched parts of the API code, you must re-generate the auto generated code.

View File

@@ -212,7 +212,7 @@ export IMAGE_TAG=1.5.0-myrc
> [!NOTE]
> The image will be built for `linux/amd64` platform by default. If you are running on Mac with Apple chip (ARM),
> you need to specify the correct build platform by running:
> you need to specify the correct buld platform by running:
> ```bash
> export TARGET_ARCH=linux/arm64
> ```

View File

@@ -41,7 +41,7 @@ spec:
- https://kubernetes.default.svc
- https://some-other-cluster
# Git generator generates parameters either from directory structure of files within a git repo
# Git generator generates parametes either from directory structure of files within a git repo
- git:
repoURL: https://github.com/argoproj/argo-cd.git
# OPTIONAL: use directory structure of git repo to generate parameters

View File

@@ -86,7 +86,7 @@ data:
# Optional set of OIDC claims to request on the ID token.
requestedIDTokenClaims: {"groups": {"essential": true}}
# Configuration to customize resource behavior (optional) can be configured via split sub keys.
# Configuration to customize resource behavior (optional) can be configured via splitted sub keys.
# Keys are in the form: resource.customizations.ignoreDifferences.<group_kind>, resource.customizations.health.<group_kind>
# resource.customizations.actions.<group_kind>, resource.customizations.knownTypeFields.<group_kind>
# resource.customizations.ignoreResourceUpdates.<group_kind>
@@ -115,7 +115,7 @@ data:
jsonPointers:
- /metadata/resourceVersion
# Configuration to define customizations ignoring differences during watched resource updates can be configured via split sub key.
# Configuration to define customizations ignoring differences during watched resource updates can be configured via splitted sub key.
resource.customizations.ignoreResourceUpdates.argoproj.io_Application: |
jsonPointers:
- /status

View File

@@ -125,7 +125,7 @@ data:
send: [on-deployed-template]
```
Now, with the setup above, a sync will send the list of images to your Slack application. For more information about integration with Slack, see the [Slack integration guide](services/slack.md).
Now, with the setup above, a sync will send the list of images to your Slack application. For more information about integratin with Slack, see the [Slack integration guide](services/slack.md).
### Deduplicating images

View File

@@ -182,7 +182,7 @@ on how your workloads connect to the repository server.
### Configuring TLS to argocd-repo-server
The components `argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
The componenets `argocd-server`, `argocd-application-controller`, `argocd-notifications-controller`,
and `argocd-applicationset-controller` communicate with the `argocd-repo-server`
using a gRPC API over TLS. By default, `argocd-repo-server` generates a non-persistent,
self-signed certificate to use for its gRPC endpoint on startup. Because the
@@ -190,7 +190,7 @@ self-signed certificate to use for its gRPC endpoint on startup. Because the
is not available to outside consumers for verification. These components will use a
non-validating connection to the `argocd-repo-server` for this reason.
To change this behavior to be more secure by having these components validate the TLS certificate of the
To change this behavior to be more secure by having these componenets validate the TLS certificate of the
`argocd-repo-server` endpoint, the following steps need to be performed:
* Create a persistent TLS certificate to be used by `argocd-repo-server`, as

View File

@@ -272,7 +272,7 @@ curl -X POST -H "Authorization: Bearer $ARGOCD_TOKEN" -H "Content-Type: applicat
}' "http://$YOUR_ARGOCD_URL/api/v1/applications/$YOUR_APP_NAME/sync"
```
It is also possible to sync such an Application using the UI, with `ApplyOutOfSyncOnly` option unchecked. However, currently, performing a sync without `ApplyOutOfSyncOnly` option is not possible using the CLI.
It is also possible to sync such an Applicaton using the UI, with `ApplyOutOfSyncOnly` option unchecked. However, currently, performing a sync without `ApplyOutOfSyncOnly` option is not possible using the CLI.
##### Other users

View File

@@ -29,7 +29,7 @@ When Argo CD is upgraded manually using plain manifests or Kustomize overlays, i
Users upgrading Argo CD manually using `helm upgrade` are not impacted by this change, since Helm does not use client-side apply and does not result in creation of the `last-applied` annotation.
#### Users who previously upgraded to 3.3.0 or 3.3.1
In some cases, after upgrading to one of those versions and applying Server-Side Apply, the following error occurred:
In some cases, after upgrading to one of those versions and applying Server-Side Apply, the following error occured:
`one or more synchronization tasks completed unsuccessfully, reason: Failed to perform client-side apply migration: failed to perform client-side apply migration on manager kubectl-client-side-apply: error when patching "/dev/shm/2047509016": CustomResourceDefinition.apiextensions.k8s.io "applicationsets.argoproj.io" is invalid: metadata.annotations: Too long: may not be more than 262144 bytes`.
Users that have configured the sync option `ClientSideApplyMigration=false` as a temporary remediation for the above error, should remove it after upgrading to `3.3.2`. Disabling `ClientSideApplyMigration` imposes a risk to encounter conflicts between K8s field managers in the future.

View File

@@ -68,7 +68,7 @@ deploy:
## Configuring RBAC
When using ArgoCD global RBAC config map, you can define your `policy.csv` like so:
When using ArgoCD global RBAC comfig map, you can define your `policy.csv` like so:
```yaml
configs:

View File

@@ -142,7 +142,7 @@ We provide the entire application tree to accomplish two things:
Further, if an Extension needs richer information than that provided by the Resource Tree, it can request additional information about a resource from the Argo CD API server.
```typescript
interface Extension {
interface Extention {
ResourceTab: React.Component<{resource: any}>;
}
```

View File

@@ -135,7 +135,7 @@ one in charge of a given resource.
#### Include resource identifies in the `app.kubernetes.io/instance` annotation
The `app.kubernetes.io/instance` annotation might be accidentally added or copied
The `app.kubernetes.io/instance` annotation might be accidently added or copied
same as label. To prevent Argo CD confusion the annotation value should include
the identifier of the resource annotation was applied to. The resource identifier
includes the group, kind, namespace and name of the resource. It is proposed to use `;`

View File

@@ -42,7 +42,7 @@ A bounty is a special proposal created under `docs/proposals/feature-bounties`.
#### Claiming a Bounty
* Argo will pay out bounties once a pull request implementing the requested features/changes/fixes is merged.
* A bounty is limited to a single successful PR.
* Those interested in working on the bounty are encouraged to comment on the issue, and users may team up to split a bounty if they prefer but collaboration is not required and users should not shame each other for their preferences to work alone or together.
* Those interested in working on the bounty are encouraged to comment on the issue, and users may team up to split a bounty if they prefer but collaboration is not required and users should not shame eachother for their preferences to work alone or together.
* A comment of interest does not constitute a claim and will not be treated as such.
* The first pull request submitted that is ready for merge will be reviewed by maintainers. Maintainers will also consider any competing pull requests submitted within 24-hours. We expect this will be a very rare circumstance. If multiple, high-quality, merge ready pull requests are submitted, 3-5 Approvers for the sub-project will vote to decide the final pull request merged.

4
go.mod
View File

@@ -7,7 +7,7 @@ require (
dario.cat/mergo v1.0.2
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
github.com/Azure/kubelogin v0.2.17
github.com/Azure/kubelogin v0.2.16
github.com/Masterminds/semver/v3 v3.4.0
github.com/Masterminds/sprig/v3 v3.3.0
github.com/TomOnTime/utfutil v1.0.0
@@ -24,7 +24,7 @@ require (
github.com/cenkalti/backoff/v5 v5.0.3
github.com/cespare/xxhash/v2 v2.3.0
github.com/chainguard-dev/git-urls v1.0.2
github.com/coreos/go-oidc/v3 v3.18.0
github.com/coreos/go-oidc/v3 v3.17.0
github.com/cyphar/filepath-securejoin v0.6.1
github.com/dlclark/regexp2 v1.11.5
github.com/dustin/go-humanize v1.0.1

8
go.sum
View File

@@ -72,8 +72,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/Azure/kubelogin v0.2.17 h1:pRM+KHVo5Oj3aBUDbcrUTxdZHOPs02D3oZn2E3t1B4A=
github.com/Azure/kubelogin v0.2.17/go.mod h1:UcOYtp0xCIn6tg0Fl3m0WOQDKcQA8Fb22Ya/b/DDaf0=
github.com/Azure/kubelogin v0.2.16 h1:z0jwNQ9A7LvIqS0Go+6CPZv0TuQQRL2mc+zY9wjBuF8=
github.com/Azure/kubelogin v0.2.16/go.mod h1:UvizZ5Gu/2btUFXm2cccbxliK/ensgBD5NTCihZoONE=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
@@ -217,8 +217,8 @@ github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJ
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
github.com/coreos/go-oidc/v3 v3.18.0 h1:V9orjXynvu5wiC9SemFTWnG4F45v403aIcjWo0d41+A=
github.com/coreos/go-oidc/v3 v3.18.0/go.mod h1:DYCf24+ncYi+XkIH97GY1+dqoRlbaSI26KVTCI9SrY4=
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=

View File

@@ -1908,23 +1908,17 @@ func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application.
// if k8s failed to start steaming logs (typically because Pod is not ready yet)
// then the error should be shown in the UI so that user know the reason
if err != nil {
select {
case logStream <- logEntry{line: err.Error()}:
case <-ws.Context().Done():
}
logStream <- logEntry{line: err.Error()}
} else {
parseLogsStream(ws.Context(), podName, stream, logStream)
parseLogsStream(podName, stream, logStream)
}
close(logStream)
}()
}
logStream := mergeLogStreams(ws.Context(), streams, time.Millisecond*100)
logStream := mergeLogStreams(streams, time.Millisecond*100)
sentCount := int64(0)
// Buffered so the goroutine below can always send and exit, even if PodLogs has already
// returned due to client disconnect (ws.Context().Done). Without this, the goroutine
// would block on "done <- err" forever, leaking memory via bufio and mergeLogStreams buffers.
done := make(chan error, 1)
done := make(chan error)
go func() {
for entry := range logStream {
if entry.err != nil {

View File

@@ -2,7 +2,6 @@ package application
import (
"bufio"
"context"
"errors"
"io"
"strings"
@@ -18,9 +17,8 @@ type logEntry struct {
err error
}
// parseLogsStream converts given ReadCloser into channel that emits log entries.
// It stops early if ctx is cancelled, avoiding goroutine leaks when the caller disconnects.
func parseLogsStream(ctx context.Context, podName string, stream io.ReadCloser, ch chan logEntry) {
// parseLogsStream converts given ReadCloser into channel that emits log entries
func parseLogsStream(podName string, stream io.ReadCloser, ch chan logEntry) {
bufReader := bufio.NewReader(stream)
eof := false
for !eof {
@@ -32,10 +30,7 @@ func parseLogsStream(ctx context.Context, podName string, stream io.ReadCloser,
break
}
} else if err != nil && !errors.Is(err, io.EOF) {
select {
case ch <- logEntry{err: err}:
case <-ctx.Done():
}
ch <- logEntry{err: err}
break
}
@@ -44,20 +39,13 @@ func parseLogsStream(ctx context.Context, podName string, stream io.ReadCloser,
timeStampStr := parts[0]
logTime, err := time.Parse(time.RFC3339Nano, timeStampStr)
if err != nil {
select {
case ch <- logEntry{err: err}:
case <-ctx.Done():
}
ch <- logEntry{err: err}
break
}
lines := strings.Join(parts[1:], " ")
for line := range strings.SplitSeq(lines, "\r") {
select {
case ch <- logEntry{line: line, timeStamp: logTime, podName: podName}:
case <-ctx.Done():
return
}
ch <- logEntry{line: line, timeStamp: logTime, podName: podName}
}
}
}
@@ -65,8 +53,7 @@ func parseLogsStream(ctx context.Context, podName string, stream io.ReadCloser,
// mergeLogStreams merge two stream of logs and ensures that merged logs are sorted by timestamp.
// The implementation uses merge sort: method reads next log entry from each stream if one of streams is empty
// it waits for no longer than specified duration and then merges available entries.
// ctx cancellation causes all internal goroutines to exit promptly, preventing goroutine and memory leaks.
func mergeLogStreams(ctx context.Context, streams []chan logEntry, bufferingDuration time.Duration) chan logEntry {
func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) chan logEntry {
merged := make(chan logEntry)
// buffer of received log entries for each stream
@@ -83,17 +70,7 @@ func mergeLogStreams(ctx context.Context, streams []chan logEntry, bufferingDura
lock.Lock()
entriesPerStream[index] = append(entriesPerStream[index], next)
lock.Unlock()
select {
case process <- struct{}{}:
case <-ctx.Done():
// drain remaining entries so parseLogsStream goroutine can exit
for range streams[index] {
}
if atomic.AddInt32(&streamsCount, -1) == 0 {
close(process)
}
return
}
process <- struct{}{}
}
// stop processing after all streams got closed
if atomic.AddInt32(&streamsCount, -1) == 0 {
@@ -134,11 +111,7 @@ func mergeLogStreams(ctx context.Context, streams []chan logEntry, bufferingDura
}
lock.Unlock()
for i := range entries {
select {
case merged <- entries[i]:
case <-ctx.Done():
return false
}
merged <- entries[i]
}
return len(entries) > 0
}
@@ -147,11 +120,11 @@ func mergeLogStreams(ctx context.Context, streams []chan logEntry, bufferingDura
var sentAt time.Time
ticker := time.NewTicker(bufferingDuration)
tickerDone := make(chan struct{})
done := make(chan struct{})
go func() {
for {
select {
case <-tickerDone:
case <-done:
return
case <-ticker.C:
sentAtLock.Lock()
@@ -160,30 +133,18 @@ func mergeLogStreams(ctx context.Context, streams []chan logEntry, bufferingDura
_ = send(true)
sentAt = time.Now()
}
sentAtLock.Unlock()
}
}
}()
go func() {
loop:
for {
select {
case _, ok := <-process:
if !ok {
break loop
}
if send(false) {
sentAtLock.Lock()
sentAt = time.Now()
sentAtLock.Unlock()
}
case <-ctx.Done():
// client disconnected: stop immediately without flushing
ticker.Stop()
tickerDone <- struct{}{}
close(merged)
return
for range process {
if send(false) {
sentAtLock.Lock()
sentAt = time.Now()
sentAtLock.Unlock()
}
}
@@ -191,10 +152,10 @@ func mergeLogStreams(ctx context.Context, streams []chan logEntry, bufferingDura
ticker.Stop()
// ticker.Stop() does not close the channel, and it does not wait for the channel to be drained. So we need to
// explicitly prevent the goroutine from leaking by closing the channel. We also need to prevent the goroutine
// explicitly prevent the gorountine from leaking by closing the channel. We also need to prevent the goroutine
// from calling `send` again, because `send` pushes to the `merged` channel which we're about to close.
// This describes the approach nicely: https://stackoverflow.com/questions/17797754/ticker-stop-behaviour-in-golang
tickerDone <- struct{}{}
done <- struct{}{}
close(merged)
}()
return merged

View File

@@ -1,7 +1,6 @@
package application
import (
"context"
"io"
"strings"
"testing"
@@ -17,7 +16,7 @@ func TestParseLogsStream_Successful(t *testing.T) {
res := make(chan logEntry)
go func() {
parseLogsStream(context.Background(), "test", r, res)
parseLogsStream("test", r, res)
close(res)
}()
@@ -40,7 +39,7 @@ func TestParseLogsStream_ParsingError(t *testing.T) {
res := make(chan logEntry)
go func() {
parseLogsStream(context.Background(), "test", r, res)
parseLogsStream("test", r, res)
close(res)
}()
@@ -56,19 +55,19 @@ func TestParseLogsStream_ParsingError(t *testing.T) {
func TestMergeLogStreams(t *testing.T) {
first := make(chan logEntry)
go func() {
parseLogsStream(context.Background(), "first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1
parseLogsStream("first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1
2021-02-09T00:00:03Z 3`)), first)
close(first)
}()
second := make(chan logEntry)
go func() {
parseLogsStream(context.Background(), "second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2
parseLogsStream("second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2
2021-02-09T00:00:04Z 4`)), second)
close(second)
}()
merged := mergeLogStreams(context.Background(), []chan logEntry{first, second}, time.Second)
merged := mergeLogStreams([]chan logEntry{first, second}, time.Second)
var lines []string
for entry := range merged {
lines = append(lines, entry.line)
@@ -84,18 +83,18 @@ func TestMergeLogStreams_RaceCondition(_ *testing.T) {
second := make(chan logEntry)
go func() {
parseLogsStream(context.Background(), "first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1`)), first)
parseLogsStream("first", io.NopCloser(strings.NewReader(`2021-02-09T00:00:01Z 1`)), first)
time.Sleep(time.Duration(i%3) * time.Millisecond)
close(first)
}()
go func() {
parseLogsStream(context.Background(), "second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2`)), second)
parseLogsStream("second", io.NopCloser(strings.NewReader(`2021-02-09T00:00:02Z 2`)), second)
time.Sleep(time.Duration((i+1)%3) * time.Millisecond)
close(second)
}()
merged := mergeLogStreams(context.Background(), []chan logEntry{first, second}, 1*time.Millisecond)
merged := mergeLogStreams([]chan logEntry{first, second}, 1*time.Millisecond)
// Drain the channel
for range merged {
@@ -106,39 +105,3 @@ func TestMergeLogStreams_RaceCondition(_ *testing.T) {
// and channel closer.
}
}
// TestMergeLogStreams_ContextCancellation verifies that cancelling the context causes mergeLogStreams
// to close the merged channel promptly, allowing all internal goroutines to exit without leaking.
func TestMergeLogStreams_ContextCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
// unbuffered pipe: write end will block until someone reads
pr, pw := io.Pipe()
ch := make(chan logEntry)
go func() {
parseLogsStream(ctx, "test", pr, ch)
close(ch)
}()
merged := mergeLogStreams(ctx, []chan logEntry{ch}, time.Second)
// cancel before the pipe produces any data
cancel()
_ = pw.Close()
// merged must be closed (context cancelled), not block forever
done := make(chan struct{})
go func() {
for range merged {
}
close(done)
}()
select {
case <-done:
// merged closed promptly — no leak
case <-time.After(5 * time.Second):
t.Fatal("mergeLogStreams did not close merged channel after context cancellation")
}
}

View File

@@ -8,7 +8,7 @@ RUN ln -s /usr/lib/$(uname -m)-linux-gnu /usr/lib/linux-gnu
# Please make sure to also check the contained yarn version and update the references below when upgrading this image's version
FROM docker.io/library/node:22.9.0@sha256:8398ea18b8b72817c84af283f72daed9629af2958c4f618fe6db4f453c5c9328 AS node
FROM docker.io/library/golang:1.26.2@sha256:2a2b4b5791cea8ae09caecba7bad0bd9631def96e5fe362e4a5e67009fe4ae61 AS golang
FROM docker.io/library/golang:1.26.1@sha256:cd78d88e00afadbedd272f977d375a6247455f3a4b1178f8ae8bbcb201743a8a AS golang
FROM docker.io/library/registry:3.1@sha256:afcd13fd045b8859ac4f60fef26fc2d2f9b7b9d9e604c3c4f7c2fb1b94f95a64 AS registry

View File

@@ -128,9 +128,6 @@ func NewFakeSecret() *corev1.Secret {
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: FakeArgoCDNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": []byte("test"),

View File

@@ -19,9 +19,7 @@ RUN dpkg-divert --add --rename --divert /opt/google/chrome/google-chrome.real /o
WORKDIR /usr/src/app
COPY package*.json ./
COPY pnpm-lock.yaml ./
RUN npm install -g corepack@0.34.6 && corepack enable && pnpm install --frozen-lockfile
RUN npm install -g corepack@0.34.6 && corepack enable && pnpm install
COPY . .

View File

@@ -7,7 +7,7 @@ Web UI for [Argo CD](https://github.com/argoproj/argo-cd).
## Getting started
1. Install [NodeJS](https://nodejs.org/en/download/) and [pnpm](https://pnpm.io). On macOS with [Homebrew](https://brew.sh/), running `brew install node pnpm` will accomplish this.
2. Run `pnpm install --frozen-lockfile` to install local prerequisites.
2. Run `pnpm install` to install local prerequisites.
3. Run `pnpm start` to launch the webpack dev UI server.
4. Run `pnpm build` to bundle static resources into the `./dist` directory.

View File

@@ -33,6 +33,6 @@ export default [
files: ['./src/**/*.{ts,tsx}']
},
{
ignores: ['dist', 'assets', '**/*.config.js', 'jest.setup.js', '__mocks__', 'coverage', '**/*.test.{ts,tsx}']
ignores: ['dist', 'assets', '**/*.config.js', '__mocks__', 'coverage', '**/*.test.{ts,tsx}']
}
];

View File

@@ -1,7 +1,6 @@
module.exports = {
preset: 'ts-jest',
testEnvironment: 'jsdom',
setupFiles: ['./jest.setup.js'],
reporters: ['default', 'jest-junit'],
collectCoverage: true,
transformIgnorePatterns: ['node_modules/(?!(argo-ui|.*\\.pnpm.*argo-ui.*)/)'],

View File

@@ -1,9 +0,0 @@
// TODO: This needs to be polyfilled until jest-environment-jsdom decides to pull in a version of jsdom that's >=27.4.0
const {TextEncoder, TextDecoder} = require('util');
if (typeof globalThis.TextEncoder === 'undefined') {
globalThis.TextEncoder = TextEncoder;
}
if (typeof globalThis.TextDecoder === 'undefined') {
globalThis.TextDecoder = TextDecoder;
}

View File

@@ -64,7 +64,7 @@
"@types/react-dom": "^16.8.2",
"normalize-url": "4.5.1",
"rxjs": "6.6.7",
"formidable": "2.1.3"
"formidable": "2.1.2"
}
},
"devDependencies": {
@@ -76,7 +76,7 @@
"@eslint/js": "^9.1.1",
"@types/classnames": "^2.2.3",
"@types/cookie": "^0.5.1",
"@types/dagre": "0.7.54",
"@types/dagre": "0.7.42",
"@types/deepmerge": "^2.2.0",
"@types/git-url-parse": "^9.0.1",
"@types/history": "^4.7.2",

56
ui/pnpm-lock.yaml generated
View File

@@ -9,7 +9,7 @@ overrides:
'@types/react-dom': ^16.8.2
normalize-url: 4.5.1
rxjs: 6.6.7
formidable: 2.1.3
formidable: 2.1.2
importers:
@@ -29,7 +29,7 @@ importers:
version: 6.1.6(react-dom@16.14.0(react@16.14.0))(react@16.14.0)
argo-ui:
specifier: git+https://github.com/argoproj/argo-ui.git
version: https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12)
version: https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12)
buffer:
specifier: ^6.0.3
version: 6.0.3
@@ -176,8 +176,8 @@ importers:
specifier: ^0.5.1
version: 0.5.4
'@types/dagre':
specifier: 0.7.54
version: 0.7.54
specifier: 0.7.42
version: 0.7.42
'@types/deepmerge':
specifier: ^2.2.0
version: 2.2.3
@@ -1455,10 +1455,6 @@ packages:
'@leichtgewicht/ip-codec@2.0.5':
resolution: {integrity: sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==}
'@noble/hashes@1.8.0':
resolution: {integrity: sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==}
engines: {node: ^14.21.3 || >=16}
'@nodelib/fs.scandir@2.1.5':
resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
engines: {node: '>= 8'}
@@ -1479,9 +1475,6 @@ packages:
engines: {node: '>=10'}
deprecated: This functionality has been moved to @npmcli/fs
'@paralleldrive/cuid2@2.3.1':
resolution: {integrity: sha512-XO7cAxhnTZl0Yggq6jOgjiOHhbgcO4NqFqwSmQpjK3b6TEE6Uj/jfSk6wzYyemh3+I0sHirKSetjQwn5cZktFw==}
'@parcel/watcher-android-arm64@2.5.1':
resolution: {integrity: sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==}
engines: {node: '>= 10.0.0'}
@@ -1648,8 +1641,8 @@ packages:
'@types/cookiejar@2.1.5':
resolution: {integrity: sha512-he+DHOWReW0nghN24E1WUqM0efK4kI9oTqDm6XmK8ZPe2djZ90BSNdGnIyCLzCPw7/pogPlGbzI2wHGGmi4O/Q==}
'@types/dagre@0.7.54':
resolution: {integrity: sha512-QjcRY+adGbYvBFS7cwv5txhVIwX1XXIUswWl+kSQTbI6NjgZydrZkEKX/etzVd7i+bCsCb40Z/xlBY5eoFuvWQ==}
'@types/dagre@0.7.42':
resolution: {integrity: sha512-knVdi1Ul8xYgJ0wdhQ+/2YGJFKJFa/5srcPII9zvOs4KhsHfpnFrSTQXATYmjslglxRMif3Lg+wEZ0beag+94A==}
'@types/deepmerge@2.2.3':
resolution: {integrity: sha512-ct4srnukH/SHdVPyJIFV73YJIt9PTYTaqQbjrCvRrbc9LxHdGcJb132SuWwnDTPyx5UjCVS/I00wj0i5IXfqSA==}
@@ -2085,8 +2078,8 @@ packages:
arg@4.1.3:
resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==}
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba:
resolution: {tarball: https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba}
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7:
resolution: {tarball: https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7}
version: 1.0.0
peerDependencies:
'@types/react': ^16.9.3
@@ -2603,7 +2596,6 @@ packages:
deep-diff@0.3.8:
resolution: {integrity: sha512-yVn6RZmHiGnxRKR9sJb3iVV2XTF1Ghh2DiWRZ3dMnGc43yUdWWF/kX6lQyk3+P84iprfWKU/8zFTrlkvtFm1ug==}
deprecated: Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.
deep-equal@1.1.2:
resolution: {integrity: sha512-5tdhKF6DbU7iIzrIOa1AOUt39ZRm13cmL1cGEh//aqR8x9+tNfbywRf0n5FD/18OKMdo7DNEtrX2t22ZAkI+eg==}
@@ -3081,9 +3073,9 @@ packages:
resolution: {integrity: sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==}
engines: {node: '>= 6'}
formidable@2.1.3:
resolution: {integrity: sha512-vDI5JjeALeGXpyL8v71ZG2VgHY5zD6qg1IvypU7aJCYvREZyhawrYJxMdsWO+m5DIGLiMiDH71yEN8RO4wQAMQ==}
deprecated: 'ATTENTION: please upgrade to v3! The v1 and v2 versions are pretty old and deprecated'
formidable@2.1.2:
resolution: {integrity: sha512-CM3GuJ57US06mlpQ47YcunuUZ9jpm8Vx+P2CGt2j7HpgkKZO/DJYQ0Bobim8G6PFQmK5lOqOOdUXboU+h73A4g==}
deprecated: 'ACTION REQUIRED: SWITCH TO v3 - v1 and v2 are VULNERABLE! v1 is DEPRECATED FOR OVER 2 YEARS! Use formidable@latest or try formidable-mini for fresh projects'
forwarded@0.2.0:
resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==}
@@ -3176,7 +3168,7 @@ packages:
glob@7.2.3:
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
deprecated: Glob versions prior to v9 are no longer supported
global@4.4.0:
resolution: {integrity: sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==}
@@ -3251,6 +3243,10 @@ packages:
resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==}
hasBin: true
hexoid@1.0.0:
resolution: {integrity: sha512-QFLV0taWQOZtvIRIAdBChesmogZrtuXvVWsFHZTk2SU+anspqZ2vMnoLg7IE1+Uk16N19APic1BuF8bC8c2m5g==}
engines: {node: '>=8'}
history@4.10.1:
resolution: {integrity: sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==}
@@ -5136,7 +5132,6 @@ packages:
tar@6.2.1:
resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==}
engines: {node: '>=10'}
deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
teeny-request@7.1.1:
resolution: {integrity: sha512-iwY6rkW5DDGq8hE2YgNQlKbptYpY5Nn2xecjQiNjOXWbKzPGUfmeUBCSQbbr306d7Z7U2N0TPl+/SwYRfua1Dg==}
@@ -5531,7 +5526,6 @@ packages:
whatwg-encoding@2.0.0:
resolution: {integrity: sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==}
engines: {node: '>=12'}
deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation
whatwg-mimetype@3.0.0:
resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==}
@@ -6895,8 +6889,6 @@ snapshots:
'@leichtgewicht/ip-codec@2.0.5': {}
'@noble/hashes@1.8.0': {}
'@nodelib/fs.scandir@2.1.5':
dependencies:
'@nodelib/fs.stat': 2.0.5
@@ -6919,10 +6911,6 @@ snapshots:
mkdirp: 1.0.4
rimraf: 3.0.2
'@paralleldrive/cuid2@2.3.1':
dependencies:
'@noble/hashes': 1.8.0
'@parcel/watcher-android-arm64@2.5.1':
optional: true
@@ -7085,7 +7073,7 @@ snapshots:
'@types/cookiejar@2.1.5': {}
'@types/dagre@0.7.54': {}
'@types/dagre@0.7.42': {}
'@types/deepmerge@2.2.3':
dependencies:
@@ -7593,7 +7581,7 @@ snapshots:
arg@4.1.3: {}
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/a1c32a45e83fdda4baafc7ca3105c3ead383f8ba(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12):
argo-ui@https://codeload.github.com/argoproj/argo-ui/tar.gz/2bfda77cec418c4123fe61e35f22d09432af15b7(@types/react@16.14.65)(jquery@3.7.1)(react-dom@16.14.0(react@16.14.0))(react@16.14.0)(what-input@5.2.12):
dependencies:
'@fortawesome/fontawesome-free': 6.7.2
'@tippy.js/react': 3.1.1(react-dom@16.14.0(react@16.14.0))(react@16.14.0)
@@ -8858,10 +8846,10 @@ snapshots:
hasown: 2.0.2
mime-types: 2.1.35
formidable@2.1.3:
formidable@2.1.2:
dependencies:
'@paralleldrive/cuid2': 2.3.1
dezalgo: 1.0.4
hexoid: 1.0.0
once: 1.4.0
qs: 6.15.0
@@ -9025,6 +9013,8 @@ snapshots:
he@1.2.0: {}
hexoid@1.0.0: {}
history@4.10.1:
dependencies:
'@babel/runtime': 7.27.6
@@ -11234,7 +11224,7 @@ snapshots:
debug: 4.4.1
fast-safe-stringify: 2.1.1
form-data: 4.0.4
formidable: 2.1.3
formidable: 2.1.2
methods: 1.1.2
mime: 2.6.0
qs: 6.15.0

View File

@@ -112,7 +112,7 @@ function getGraphSize(nodes: dagre.Node[]): {width: number; height: number} {
return {width, height};
}
function groupNodes(nodes: ResourceTreeNode[], graph: dagre.graphlib.Graph<{[key: string]: any}>) {
function groupNodes(nodes: ResourceTreeNode[], graph: dagre.graphlib.Graph) {
function getNodeGroupingInfo(nodeId: string) {
const node = graph.node(nodeId);
return {
@@ -280,7 +280,7 @@ function renderFilteredNode(node: {count: number} & dagre.Node, onClearFilter: (
);
}
function renderGroupedNodes(props: ApplicationResourceTreeProps, node: {count: number; groupedNodeIds: string[]} & dagre.Node & ResourceTreeNode) {
function renderGroupedNodes(props: ApplicationResourceTreeProps, node: {count: number} & dagre.Node & ResourceTreeNode) {
const indicators = new Array<number>();
let count = Math.min(node.count - 1, 3);
while (count > 0) {
@@ -333,7 +333,7 @@ function renderTrafficNode(node: dagre.Node) {
);
}
function renderLoadBalancerNode(node: dagre.Node & {label: string; color: string; kind: string}) {
function renderLoadBalancerNode(node: dagre.Node & {label: string; color: string}) {
return (
<div
className='application-resource-tree__node application-resource-tree__node--load-balancer'
@@ -400,12 +400,7 @@ function processPodGroup(targetPodGroup: ResourceTreeNode, child: ResourceTreeNo
}
}
function renderPodGroup(
props: ApplicationResourceTreeProps,
id: string,
node: ResourceTreeNode & dagre.Node & {groupedNodeIds?: string[]},
childMap: Map<string, ResourceTreeNode[]>
) {
function renderPodGroup(props: ApplicationResourceTreeProps, id: string, node: ResourceTreeNode & dagre.Node, childMap: Map<string, ResourceTreeNode[]>) {
const fullName = nodeKey(node);
let comparisonStatus: models.SyncStatusCode = null;
let healthState: models.HealthStatus = null;
@@ -942,7 +937,7 @@ function findNetworkTargets(nodes: ResourceTreeNode[], networkingInfo: models.Re
return result;
}
export const ApplicationResourceTree = (props: ApplicationResourceTreeProps) => {
const graph = new dagre.graphlib.Graph<{[key: string]: any}>();
const graph = new dagre.graphlib.Graph();
graph.setGraph({nodesep: 25, rankdir: 'LR', marginy: 45, marginx: -100, ranksep: 80});
graph.setDefaultEdgeLabel(() => ({}));
const overridesCount = getAppOverridesCount(props.app);
@@ -1030,12 +1025,7 @@ export const ApplicationResourceTree = (props: ApplicationResourceTreeProps) =>
}
}, [podCount]);
function filterGraph(
app: models.AbstractApplication,
filteredIndicatorParent: string,
graphNodesFilter: dagre.graphlib.Graph<{[key: string]: any}>,
predicate: (node: ResourceTreeNode) => boolean
) {
function filterGraph(app: models.AbstractApplication, filteredIndicatorParent: string, graphNodesFilter: dagre.graphlib.Graph, predicate: (node: ResourceTreeNode) => boolean) {
const appKey = appNodeKey(app);
let filtered = 0;
graphNodesFilter.nodes().forEach(nodeId => {

View File

@@ -1311,58 +1311,23 @@ func (mgr *SettingsManager) GetSettings() (*ArgoCDSettings, error) {
return &settings, nil
}
// isRepositorySecret reports whether obj is a repository credential secret
// (argocd.argoproj.io/secret-type=repository). Only repository credential changes
// need to invalidate the project cache; cluster changes flow through the cluster
// informer. Unwraps cache.DeletedFinalStateUnknown tombstones for DeleteFunc handlers.
// Unknown types return false (fail-closed).
func isRepositorySecret(obj any) bool {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
repoSelector := labels.SelectorFromSet(labels.Set{common.LabelKeySecretType: common.LabelValueSecretTypeRepository})
if s, ok := obj.(metav1.Object); ok {
return repoSelector.Matches(labels.Set(s.GetLabels()))
}
return false
}
// isSettingsObject reports whether obj carries app.kubernetes.io/part-of=argocd,
// the label that identifies secrets and configmaps that participate in ArgoCD's
// settings system (OIDC config, webhook secrets, $secretName:key template references).
// Unwraps cache.DeletedFinalStateUnknown tombstones for DeleteFunc handlers.
// Unknown types return false (fail-closed).
func isSettingsObject(obj any) bool {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
settingsSelector := labels.SelectorFromSet(labels.Set{"app.kubernetes.io/part-of": "argocd"})
if s, ok := obj.(metav1.Object); ok {
return settingsSelector.Matches(labels.Set(s.GetLabels()))
}
return false
}
// isArgoCDConfigMap reports whether obj is the argocd-cm ConfigMap. Only argocd-cm
// carries settings that affect project cache validity (the "globalProjects" key, read
// by GetGlobalProjectsSettings). Unwraps cache.DeletedFinalStateUnknown tombstones for
// DeleteFunc handlers. Unknown types return false (fail-closed).
func isArgoCDConfigMap(obj any) bool {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
if metaObj, ok := obj.(metav1.Object); ok {
return metaObj.GetName() == common.ArgoCDConfigMapName
}
return false
}
func (mgr *SettingsManager) initialize(ctx context.Context) error {
tweakConfigMap := func(options *metav1.ListOptions) {
cmLabelSelector := fields.ParseSelectorOrDie(partOfArgoCDSelector)
options.LabelSelector = cmLabelSelector.String()
}
eventHandler := cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ any) {
mgr.onRepoOrClusterChanged()
},
AddFunc: func(_ any) {
mgr.onRepoOrClusterChanged()
},
DeleteFunc: func(_ any) {
mgr.onRepoOrClusterChanged()
},
}
indexers := cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
ByProjectRepoIndexer: byProjectIndexerFunc(common.LabelValueSecretTypeRepository),
@@ -1377,65 +1342,17 @@ func (mgr *SettingsManager) initialize(ctx context.Context) error {
log.Error(err)
}
// ConfigMap informer: filtered to app.kubernetes.io/part-of=argocd (see tweakConfigMap).
// Only argocd-cm carries settings that affect project cache validity: the "globalProjects"
// key controls which AppProjects are treated as global (merged into virtual projects via
// GetGlobalProjectsSettings). Other part-of=argocd configmaps (argocd-rbac-cm, etc.) have
// no path into project cache construction and don't need to trigger invalidation.
_, err = cmInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, obj any) {
if isArgoCDConfigMap(obj) {
mgr.onRepoOrClusterChanged()
}
},
AddFunc: func(obj any) {
if isArgoCDConfigMap(obj) {
mgr.onRepoOrClusterChanged()
}
},
DeleteFunc: func(obj any) {
if isArgoCDConfigMap(obj) {
mgr.onRepoOrClusterChanged()
}
},
})
_, err = cmInformer.AddEventHandler(eventHandler)
if err != nil {
log.Error(err)
}
// Secrets informer: filtered to argocd.argoproj.io/secret-type != cluster,
// so cluster secrets are excluded (handled by the cluster informer below).
// Only repository credential changes affect project-repo bindings and need
// to invalidate the project cache.
_, err = secretsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, obj any) {
if isRepositorySecret(obj) {
mgr.onRepoOrClusterChanged()
}
},
AddFunc: func(obj any) {
if isRepositorySecret(obj) {
mgr.onRepoOrClusterChanged()
}
},
DeleteFunc: func(obj any) {
if isRepositorySecret(obj) {
mgr.onRepoOrClusterChanged()
}
},
})
_, err = secretsInformer.AddEventHandler(eventHandler)
if err != nil {
log.Error(err)
}
// Cluster informer: filtered to argocd.argoproj.io/secret-type=cluster,
// so every event represents a cluster credential change, which always
// warrants a settings reload.
_, err = clusterInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ any) { mgr.onRepoOrClusterChanged() },
AddFunc: func(_ any) { mgr.onRepoOrClusterChanged() },
DeleteFunc: func(_ any) { mgr.onRepoOrClusterChanged() },
})
_, err = clusterInformer.AddEventHandler(eventHandler)
if err != nil {
log.Error(err)
}
@@ -1472,28 +1389,19 @@ func (mgr *SettingsManager) initialize(ctx context.Context) error {
}
}
now := time.Now()
// handler notifies subscribers of settings changes. Guarded by isSettingsObject
// so that only changes to app.kubernetes.io/part-of=argocd objects (the documented
// contract for secrets/configmaps that participate in ArgoCD settings) trigger a
// full GetSettings() reload. This prevents spurious reloads caused by the informer
// resync period delivering synthetic UPDATE events for unrelated objects.
handler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
if isSettingsObject(obj) {
if metaObj, ok := obj.(metav1.Object); ok {
if metaObj.GetCreationTimestamp().After(now) {
tryNotify()
}
if metaObj, ok := obj.(metav1.Object); ok {
if metaObj.GetCreationTimestamp().After(now) {
tryNotify()
}
}
},
UpdateFunc: func(oldObj, newObj any) {
if isSettingsObject(newObj) {
oldMeta, oldOk := oldObj.(metav1.Common)
newMeta, newOk := newObj.(metav1.Common)
if oldOk && newOk && oldMeta.GetResourceVersion() != newMeta.GetResourceVersion() {
tryNotify()
}
oldMeta, oldOk := oldObj.(metav1.Common)
newMeta, newOk := newObj.(metav1.Common)
if oldOk && newOk && oldMeta.GetResourceVersion() != newMeta.GetResourceVersion() {
tryNotify()
}
},
}

View File

@@ -20,7 +20,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
@@ -2436,169 +2435,3 @@ func TestSecretsInformerExcludesClusterSecrets(t *testing.T) {
}
})
}
func TestIsRepositorySecret(t *testing.T) {
tests := []struct {
name string
obj any
expected bool
}{
{
name: "repository secret matches",
obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeRepository},
}},
expected: true,
},
{
name: "unlabeled secret does not match",
obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{}},
expected: false,
},
{
name: "cluster secret does not match",
obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
}},
expected: false,
},
{
name: "tombstone wrapping repository secret matches",
obj: cache.DeletedFinalStateUnknown{
Obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeRepository},
}},
},
expected: true,
},
{
name: "tombstone wrapping non-repository secret does not match",
obj: cache.DeletedFinalStateUnknown{
Obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{}},
},
expected: false,
},
{
name: "unknown type does not match",
obj: "unexpected-type",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, isRepositorySecret(tt.obj))
})
}
}
func TestIsSettingsObject(t *testing.T) {
tests := []struct {
name string
obj any
expected bool
}{
{
name: "secret with part-of=argocd matches",
obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
}},
expected: true,
},
{
name: "unlabeled secret does not match",
obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{}},
expected: false,
},
{
name: "secret with different part-of value does not match",
obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app.kubernetes.io/part-of": "other-app"},
}},
expected: false,
},
{
name: "configmap with part-of=argocd matches",
obj: &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
}},
expected: true,
},
{
name: "tombstone wrapping labeled secret matches",
obj: cache.DeletedFinalStateUnknown{
Obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
}},
},
expected: true,
},
{
name: "tombstone wrapping unlabeled secret does not match",
obj: cache.DeletedFinalStateUnknown{
Obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{}},
},
expected: false,
},
{
name: "unknown type does not match",
obj: "unexpected-type",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, isSettingsObject(tt.obj))
})
}
}
func TestIsArgoCDConfigMap(t *testing.T) {
tests := []struct {
name string
obj any
expected bool
}{
{
name: "argocd-cm matches",
obj: &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
}},
expected: true,
},
{
name: "other configmap does not match",
obj: &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDRBACConfigMapName,
}},
expected: false,
},
{
name: "tombstone wrapping argocd-cm matches",
obj: cache.DeletedFinalStateUnknown{
Obj: &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
}},
},
expected: true,
},
{
name: "tombstone wrapping other configmap does not match",
obj: cache.DeletedFinalStateUnknown{
Obj: &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDRBACConfigMapName,
}},
},
expected: false,
},
{
name: "unknown type does not match",
obj: "unexpected-type",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, isArgoCDConfigMap(tt.obj))
})
}
}