mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-04-04 07:48:48 +02:00
Compare commits
20 Commits
dependabot
...
release-3.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f50abb6596 | ||
|
|
a244f7cb7a | ||
|
|
f4e7a6e604 | ||
|
|
dfa079b5e3 | ||
|
|
8550f60a05 | ||
|
|
d29ec76295 | ||
|
|
249b91d75b | ||
|
|
ed4c63ba83 | ||
|
|
cbdc3f1397 | ||
|
|
b66dea4282 | ||
|
|
aced2b1b36 | ||
|
|
ea71adbae5 | ||
|
|
5ed403cf60 | ||
|
|
9044c6c0ff | ||
|
|
3157fb15a4 | ||
|
|
e70034a44b | ||
|
|
5deef68eaf | ||
|
|
21e13a621e | ||
|
|
226178c1a5 | ||
|
|
d91a2ab3bf |
@@ -1851,7 +1851,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
logCtx = logCtx.WithField(k, v.Milliseconds())
|
||||
}
|
||||
|
||||
ctrl.normalizeApplication(origApp, app)
|
||||
ctrl.normalizeApplication(app)
|
||||
ts.AddCheckpoint("normalize_application_ms")
|
||||
|
||||
tree, err := ctrl.setAppManagedResources(destCluster, app, compareResult)
|
||||
@@ -2090,7 +2090,8 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
}
|
||||
|
||||
// normalizeApplication normalizes an application.spec and additionally persists updates if it changed
|
||||
func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Application) {
|
||||
func (ctrl *ApplicationController) normalizeApplication(app *appv1.Application) {
|
||||
orig := app.DeepCopy()
|
||||
app.Spec = *argo.NormalizeApplicationSpec(&app.Spec)
|
||||
logCtx := log.WithFields(applog.GetAppLogFields(app))
|
||||
|
||||
|
||||
@@ -132,11 +132,11 @@ func (c *clusterInfoUpdater) getUpdatedClusterInfo(ctx context.Context, apps []*
|
||||
continue
|
||||
}
|
||||
}
|
||||
destCluster, err := argo.GetDestinationCluster(ctx, a.Spec.Destination, c.db)
|
||||
destServer, err := argo.GetDestinationServer(ctx, a.Spec.Destination, c.db)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if destCluster.Server == cluster.Server {
|
||||
if destServer == cluster.Server {
|
||||
appCount++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +101,121 @@ func TestClusterSecretUpdater(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetUpdatedClusterInfo_AppCount(t *testing.T) {
|
||||
const fakeNamespace = "fake-ns"
|
||||
const clusterServer = "https://prod.example.com"
|
||||
const clusterName = "prod"
|
||||
|
||||
emptyArgoCDConfigMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
|
||||
},
|
||||
Data: map[string]string{},
|
||||
}
|
||||
argoCDSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
|
||||
},
|
||||
Data: map[string][]byte{"admin.password": nil, "server.secretkey": nil},
|
||||
}
|
||||
clusterSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "prod-cluster",
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"name": []byte(clusterName),
|
||||
"server": []byte(clusterServer),
|
||||
"config": []byte("{}"),
|
||||
},
|
||||
}
|
||||
|
||||
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecret, clusterSecret)
|
||||
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
|
||||
argoDB := db.NewDB(fakeNamespace, settingsManager, kubeclientset)
|
||||
|
||||
apps := []*v1alpha1.Application{
|
||||
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Name: clusterName}}},
|
||||
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Server: clusterServer}}},
|
||||
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Server: "https://other.example.com"}}},
|
||||
}
|
||||
|
||||
updater := &clusterInfoUpdater{db: argoDB, namespace: fakeNamespace}
|
||||
cluster := v1alpha1.Cluster{Server: clusterServer}
|
||||
|
||||
info := updater.getUpdatedClusterInfo(t.Context(), apps, cluster, nil, metav1.Now())
|
||||
|
||||
assert.Equal(t, int64(2), info.ApplicationsCount)
|
||||
}
|
||||
|
||||
func TestGetUpdatedClusterInfo_AmbiguousName(t *testing.T) {
|
||||
const fakeNamespace = "fake-ns"
|
||||
const clusterServer = "https://prod.example.com"
|
||||
const clusterName = "prod"
|
||||
|
||||
emptyArgoCDConfigMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
|
||||
},
|
||||
Data: map[string]string{},
|
||||
}
|
||||
argoCDSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
|
||||
},
|
||||
Data: map[string][]byte{"admin.password": nil, "server.secretkey": nil},
|
||||
}
|
||||
makeClusterSecret := func(secretName, server string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"name": []byte(clusterName),
|
||||
"server": []byte(server),
|
||||
"config": []byte("{}"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Two secrets share the same cluster name
|
||||
kubeclientset := fake.NewClientset(
|
||||
emptyArgoCDConfigMap, argoCDSecret,
|
||||
makeClusterSecret("prod-cluster-1", clusterServer),
|
||||
makeClusterSecret("prod-cluster-2", "https://prod2.example.com"),
|
||||
)
|
||||
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
|
||||
argoDB := db.NewDB(fakeNamespace, settingsManager, kubeclientset)
|
||||
|
||||
apps := []*v1alpha1.Application{
|
||||
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Name: clusterName}}},
|
||||
}
|
||||
|
||||
updater := &clusterInfoUpdater{db: argoDB, namespace: fakeNamespace}
|
||||
cluster := v1alpha1.Cluster{Server: clusterServer}
|
||||
|
||||
info := updater.getUpdatedClusterInfo(t.Context(), apps, cluster, nil, metav1.Now())
|
||||
|
||||
assert.Equal(t, int64(0), info.ApplicationsCount, "ambiguous name should not count app")
|
||||
}
|
||||
|
||||
func TestUpdateClusterLabels(t *testing.T) {
|
||||
shouldNotBeInvoked := func(_ context.Context, _ *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
|
||||
shouldNotHappen := errors.New("if an error happens here, something's wrong")
|
||||
|
||||
@@ -76,6 +76,21 @@ func isPostDeleteHook(obj *unstructured.Unstructured) bool {
|
||||
return isHookOfType(obj, PostDeleteHookType)
|
||||
}
|
||||
|
||||
// hasGitOpsEngineSyncPhaseHook is true when gitops-engine would run the resource during a sync
|
||||
// phase (PreSync, Sync, PostSync, SyncFail). PreDelete/PostDelete are not sync phases;
|
||||
// without this check, state reconciliation drops such resources
|
||||
// entirely because isPreDeleteHook/isPostDeleteHook match any comma-separated value.
|
||||
// HookTypeSkip is omitted as it is not a sync phase.
|
||||
func hasGitOpsEngineSyncPhaseHook(obj *unstructured.Unstructured) bool {
|
||||
for _, t := range hook.Types(obj) {
|
||||
switch t {
|
||||
case common.HookTypePreSync, common.HookTypeSync, common.HookTypePostSync, common.HookTypeSyncFail:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// executeHooks is a generic function to execute hooks of a specified type
|
||||
func (ctrl *ApplicationController) executeHooks(hookType HookType, app *appv1.Application, proj *appv1.AppProject, liveObjs map[kube.ResourceKey]*unstructured.Unstructured, config *rest.Config, logCtx *log.Entry) (bool, error) {
|
||||
appLabelKey, err := ctrl.settingsMgr.GetAppInstanceLabelKey()
|
||||
|
||||
@@ -192,6 +192,92 @@ func TestIsPostDeleteHook(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestPartitionTargetObjsForSync covers partitionTargetObjsForSync in state.go.
|
||||
func TestPartitionTargetObjsForSync(t *testing.T) {
|
||||
newObj := func(name string, annot map[string]string) *unstructured.Unstructured {
|
||||
u := &unstructured.Unstructured{}
|
||||
u.SetName(name)
|
||||
u.SetAnnotations(annot)
|
||||
return u
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
in []*unstructured.Unstructured
|
||||
wantNames []string
|
||||
wantPreDelete bool
|
||||
wantPostDelete bool
|
||||
}{
|
||||
{
|
||||
name: "PostSync with PreDelete and PostDelete in same annotation stays in sync set",
|
||||
in: []*unstructured.Unstructured{
|
||||
newObj("combined", map[string]string{"argocd.argoproj.io/hook": "PostSync,PreDelete,PostDelete"}),
|
||||
},
|
||||
wantNames: []string{"combined"},
|
||||
wantPreDelete: true,
|
||||
wantPostDelete: true,
|
||||
},
|
||||
{
|
||||
name: "PreDelete-only manifest excluded from sync",
|
||||
in: []*unstructured.Unstructured{
|
||||
newObj("pre-del", map[string]string{"argocd.argoproj.io/hook": "PreDelete"}),
|
||||
},
|
||||
wantNames: nil,
|
||||
wantPreDelete: true,
|
||||
wantPostDelete: false,
|
||||
},
|
||||
{
|
||||
name: "PostDelete-only manifest excluded from sync",
|
||||
in: []*unstructured.Unstructured{
|
||||
newObj("post-del", map[string]string{"argocd.argoproj.io/hook": "PostDelete"}),
|
||||
},
|
||||
wantNames: nil,
|
||||
wantPreDelete: false,
|
||||
wantPostDelete: true,
|
||||
},
|
||||
{
|
||||
name: "Helm pre-delete only excluded from sync",
|
||||
in: []*unstructured.Unstructured{
|
||||
newObj("helm-pre-del", map[string]string{"helm.sh/hook": "pre-delete"}),
|
||||
},
|
||||
wantNames: nil,
|
||||
wantPreDelete: true,
|
||||
wantPostDelete: false,
|
||||
},
|
||||
{
|
||||
name: "Helm pre-install with pre-delete stays in sync (sync-phase hook wins)",
|
||||
in: []*unstructured.Unstructured{
|
||||
newObj("helm-mixed", map[string]string{"helm.sh/hook": "pre-install,pre-delete"}),
|
||||
},
|
||||
wantNames: []string{"helm-mixed"},
|
||||
wantPreDelete: true,
|
||||
wantPostDelete: false,
|
||||
},
|
||||
{
|
||||
name: "Non-hook resource unchanged",
|
||||
in: []*unstructured.Unstructured{
|
||||
newObj("pod", map[string]string{"app": "x"}),
|
||||
},
|
||||
wantNames: []string{"pod"},
|
||||
wantPreDelete: false,
|
||||
wantPostDelete: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, hasPre, hasPost := partitionTargetObjsForSync(tt.in)
|
||||
var names []string
|
||||
for _, o := range got {
|
||||
names = append(names, o.GetName())
|
||||
}
|
||||
assert.Equal(t, tt.wantNames, names)
|
||||
assert.Equal(t, tt.wantPreDelete, hasPre, "hasPreDeleteHooks")
|
||||
assert.Equal(t, tt.wantPostDelete, hasPost, "hasPostDeleteHooks")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiHookOfType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -543,6 +543,28 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
|
||||
return ns != nil && ns.GetKind() == kubeutil.NamespaceKind && ns.GetName() == app.Spec.Destination.Namespace && app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.ManagedNamespaceMetadata != nil
|
||||
}
|
||||
|
||||
// partitionTargetObjsForSync returns the manifest subset passed to gitops-engine sync, and whether
|
||||
// the full manifest set declared PreDelete and/or PostDelete hooks (for finalizer handling).
|
||||
// Uses isPreDeleteHook / isPostDeleteHook / hasGitOpsEngineSyncPhaseHook from hook.go.
|
||||
func partitionTargetObjsForSync(targetObjs []*unstructured.Unstructured) (syncObjs []*unstructured.Unstructured, hasPreDeleteHooks, hasPostDeleteHooks bool) {
|
||||
for _, obj := range targetObjs {
|
||||
if isPreDeleteHook(obj) {
|
||||
hasPreDeleteHooks = true
|
||||
if !hasGitOpsEngineSyncPhaseHook(obj) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if isPostDeleteHook(obj) {
|
||||
hasPostDeleteHooks = true
|
||||
if !hasGitOpsEngineSyncPhaseHook(obj) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
syncObjs = append(syncObjs, obj)
|
||||
}
|
||||
return syncObjs, hasPreDeleteHooks, hasPostDeleteHooks
|
||||
}
|
||||
|
||||
// CompareAppState compares application git state to the live app state, using the specified
|
||||
// revision and supplied source. If revision or overrides are empty, then compares against
|
||||
// revision and overrides in the app spec.
|
||||
@@ -770,24 +792,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
}
|
||||
}
|
||||
hasPreDeleteHooks := false
|
||||
hasPostDeleteHooks := false
|
||||
// Filter out PreDelete and PostDelete hooks from targetObjs since they should not be synced
|
||||
// as regular resources. They are only executed during deletion.
|
||||
var targetObjsForSync []*unstructured.Unstructured
|
||||
for _, obj := range targetObjs {
|
||||
if isPreDeleteHook(obj) {
|
||||
hasPreDeleteHooks = true
|
||||
// Skip PreDelete hooks - they are not synced, only executed during deletion
|
||||
continue
|
||||
}
|
||||
if isPostDeleteHook(obj) {
|
||||
hasPostDeleteHooks = true
|
||||
// Skip PostDelete hooks - they are not synced, only executed after deletion
|
||||
continue
|
||||
}
|
||||
targetObjsForSync = append(targetObjsForSync, obj)
|
||||
}
|
||||
targetObjsForSync, hasPreDeleteHooks, hasPostDeleteHooks := partitionTargetObjsForSync(targetObjs)
|
||||
|
||||
reconciliation := sync.Reconcile(targetObjsForSync, liveObjByKey, app.Spec.Destination.Namespace, infoProvider)
|
||||
ts.AddCheckpoint("live_ms")
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
|
||||
version.
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 3.4 | v1.35, v1.34, v1.33, v1.32 |
|
||||
| 3.3 | v1.35, v1.34, v1.33, v1.32 |
|
||||
| 3.2 | v1.34, v1.33, v1.32, v1.31 |
|
||||
|
||||
@@ -90,6 +90,241 @@ source:
|
||||
ignoreMissingValueFiles: true
|
||||
```
|
||||
|
||||
## Glob Patterns in Value Files
|
||||
|
||||
Glob patterns can be used in `valueFiles` entries to match multiple files at once. This is useful
|
||||
when the set of environment-specific override files is not known in advance, or when you want to
|
||||
pick up new files automatically without updating the Application spec.
|
||||
|
||||
```bash
|
||||
# Single quotes prevent the shell from expanding the glob before Argo CD receives it
|
||||
argocd app set helm-guestbook --values 'envs/*.yaml'
|
||||
```
|
||||
|
||||
In the declarative syntax:
|
||||
|
||||
```yaml
|
||||
source:
|
||||
helm:
|
||||
valueFiles:
|
||||
- envs/*.yaml
|
||||
```
|
||||
|
||||
### Supported pattern syntax
|
||||
|
||||
Glob expansion uses the [doublestar](https://github.com/bmatcuk/doublestar) library.
|
||||
|
||||
| Pattern | Description |
|
||||
|---------|-------------|
|
||||
| `*` | Matches any sequence of non-separator characters within a single directory level |
|
||||
| `?` | Matches any single non-separator character |
|
||||
| `[abc]` | Matches one of the characters listed inside the brackets |
|
||||
| `[a-z]` | Matches any character in the given range |
|
||||
| `**` | Matches any sequence of characters including `/` (recursive across directory levels) |
|
||||
|
||||
### How files are passed to Helm
|
||||
|
||||
Each matched file is passed to `helm template` as a separate `--values <path>` flag, in the same
|
||||
order they appear after expansion. This is identical to listing each file individually in
|
||||
`valueFiles`. Argo CD does the expansion before invoking Helm.
|
||||
|
||||
Matched files are expanded **in-place** within the `valueFiles` list and sorted in **lexical
|
||||
(alphabetical) order**. Because Helm gives higher precedence to later `--values` flags, lexical
|
||||
order determines which file wins when the same key appears in multiple files.
|
||||
|
||||
```
|
||||
envs/
|
||||
a.yaml # sets foo: a-value
|
||||
b.yaml # sets foo: b-value
|
||||
```
|
||||
|
||||
```yaml
|
||||
# envs/*.yaml expands to: envs/a.yaml, envs/b.yaml (lexical order)
|
||||
# b.yaml is last → foo = "b-value"
|
||||
source:
|
||||
helm:
|
||||
valueFiles:
|
||||
- envs/*.yaml
|
||||
```
|
||||
|
||||
When you have multiple entries in `valueFiles`, the relative order between entries is preserved.
|
||||
Glob expansion only reorders files within a single pattern:
|
||||
|
||||
```yaml
|
||||
valueFiles:
|
||||
- base.yaml # passed first
|
||||
- overrides/*.yaml # expanded in lexical order, passed after base.yaml
|
||||
- final.yaml # passed last, highest precedence
|
||||
```
|
||||
|
||||
### Recursive matching with `**`
|
||||
|
||||
Use `**` to match files at any depth below a directory:
|
||||
|
||||
```yaml
|
||||
# envs/**/*.yaml processes each directory's own files before descending into subdirectories,
|
||||
# with directories and files sorted alphabetically at each level.
|
||||
#
|
||||
# envs/a.yaml ← 'a' (flat file in envs/)
|
||||
# envs/z.yaml ← 'z' (flat file in envs/, processed before descending)
|
||||
# envs/nested/c.yaml ← inside envs/nested/, processed after envs/ flat files
|
||||
#
|
||||
# nested/c.yaml is last → foo = "nested-value"
|
||||
source:
|
||||
helm:
|
||||
valueFiles:
|
||||
- envs/**/*.yaml
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> `**` matches zero or more path segments, so `envs/**/*.yaml` also matches files directly
|
||||
> inside `envs/` (not just subdirectories). doublestar traverses directories in lexical order
|
||||
> and processes each directory's own files (alphabetically) before descending into its
|
||||
> subdirectories. This means `envs/z.yaml` always comes before `envs/nested/c.yaml`, even
|
||||
> though `'n' < 'z'` alphabetically. To make ordering fully explicit and predictable,
|
||||
> use numeric prefixes (see [Naming conventions](#naming-conventions)).
|
||||
|
||||
### Using environment variables in glob patterns
|
||||
|
||||
[Build environment variables](./build-environment.md) are substituted **before** the glob is
|
||||
evaluated, so you can construct patterns dynamically:
|
||||
|
||||
```yaml
|
||||
source:
|
||||
helm:
|
||||
valueFiles:
|
||||
- envs/$ARGOCD_APP_NAME/*.yaml
|
||||
```
|
||||
|
||||
This lets a single Application template expand to the right set of files per app name.
|
||||
|
||||
### Glob patterns with multiple sources
|
||||
|
||||
Glob patterns work with [value files from an external repository](./multiple_sources.md#helm-value-files-from-external-git-repository).
|
||||
The `$ref` variable is resolved first to the external repo's root, and the rest of the pattern is
|
||||
evaluated within that repo's directory tree:
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
- repoURL: https://git.example.com/my-configs.git
|
||||
ref: configs
|
||||
- repoURL: https://git.example.com/my-chart.git
|
||||
path: chart
|
||||
helm:
|
||||
valueFiles:
|
||||
- $configs/envs/*.yaml # matches files in the 'my-configs' repo under envs/
|
||||
```
|
||||
|
||||
### Naming conventions
|
||||
|
||||
Because files are sorted lexically, the sort order controls merge precedence. A common pattern is
|
||||
to use a numeric prefix to make the intended order explicit:
|
||||
|
||||
```
|
||||
values/
|
||||
00-defaults.yaml
|
||||
10-region.yaml
|
||||
20-env.yaml
|
||||
30-override.yaml
|
||||
```
|
||||
|
||||
```yaml
|
||||
valueFiles:
|
||||
- values/*.yaml
|
||||
# expands to: 00-defaults.yaml, 10-region.yaml, 20-env.yaml, 30-override.yaml
|
||||
# 30-override.yaml has the highest precedence
|
||||
```
|
||||
|
||||
Without a prefix, pure alphabetical ordering applies. Be careful with names that sort
|
||||
unexpectedly, for example `values-10.yaml` sorts before `values-9.yaml` because `"1"` < `"9"`
|
||||
lexically.
|
||||
|
||||
### Constraints and limitations
|
||||
|
||||
**Path boundary**: Glob patterns cannot match files outside the repository root, even with
|
||||
patterns like `../../secrets/*.yaml`. Argo CD resolves the pattern's base path against the
|
||||
repository root before expanding it, and any match that would escape the root is rejected.
|
||||
|
||||
**Symlinks**: Argo CD follows symlinks when checking the path boundary. A symlink that lives
|
||||
inside the repository but points to a target outside the repository root is rejected, even though
|
||||
the symlink's own path is within the repo. This check applies to every file produced by glob
|
||||
expansion, including multi-hop symlink chains. Symlinks that resolve to a target still inside the
|
||||
repository are allowed.
|
||||
|
||||
**Absolute paths**: A path starting with `/` is treated as relative to the **repository root**,
|
||||
not the filesystem root. The pattern `/configs/*.yaml` matches files in the `configs/` directory
|
||||
at the top of the repository.
|
||||
|
||||
**Remote URLs are not glob-expanded**: Entries that are remote URLs (e.g.
|
||||
`https://raw.githubusercontent.com/.../values.yaml`) are passed to Helm as-is. Glob characters
|
||||
in a URL have no special meaning and will cause the URL to fail if the literal characters are not
|
||||
part of the URL.
|
||||
|
||||
**Shell quoting on the CLI**: Shells expand glob patterns before passing arguments to programs.
|
||||
Always quote patterns to prevent unintended shell expansion:
|
||||
|
||||
```bash
|
||||
# Correct: single quotes pass the literal pattern to Argo CD
|
||||
argocd app set myapp --values 'envs/*.yaml'
|
||||
|
||||
# Incorrect: the shell expands *.yaml against the current directory first
|
||||
argocd app set myapp --values envs/*.yaml
|
||||
```
|
||||
|
||||
### Deduplication
|
||||
|
||||
Each file is included only once, but **explicit entries take priority over glob matches** when
|
||||
determining position. If a file appears both in a glob pattern and as an explicit entry, the glob
|
||||
skips it and the explicit entry places it at its declared position.
|
||||
|
||||
```yaml
|
||||
valueFiles:
|
||||
- envs/*.yaml # expands to base.yaml, prod.yaml — but prod.yaml is listed explicitly below,
|
||||
# so the glob skips it: only base.yaml is added here
|
||||
- envs/prod.yaml # placed here at the end, giving it highest Helm precedence
|
||||
```
|
||||
|
||||
This means you can use a glob to pick up all files in a directory and then pin a specific file to
|
||||
the end (highest precedence) by listing it explicitly after the glob.
|
||||
|
||||
If the same file (same absolute path) is matched by two glob patterns, it is included at the
|
||||
position of the first match. Subsequent glob matches for that exact path are silently dropped.
|
||||
Files with the same name but at different paths are treated as distinct files and are always included.
|
||||
|
||||
```yaml
|
||||
valueFiles:
|
||||
- envs/*.yaml # matches envs/base.yaml, envs/prod.yaml
|
||||
- envs/**/*.yaml # envs/prod.yaml already matched above and is skipped;
|
||||
# envs/nested/prod.yaml is a different path and is still included
|
||||
```
|
||||
|
||||
### No-match behavior
|
||||
|
||||
If a glob pattern matches no files, Argo CD saves the Application spec (the spec is not invalid and
|
||||
the files may be added to the repository later) and surfaces a `ComparisonError` condition on the
|
||||
Application:
|
||||
|
||||
```
|
||||
values file glob "nonexistent/*.yaml" matched no files
|
||||
```
|
||||
|
||||
The app will remain in a degraded state until the pattern matches at least one file or the pattern
|
||||
is removed. No spec update is required once the files are added to the repository.
|
||||
|
||||
To silently skip a pattern that matches no files instead of raising an error, combine the glob with
|
||||
`ignoreMissingValueFiles`:
|
||||
|
||||
```yaml
|
||||
source:
|
||||
helm:
|
||||
valueFiles:
|
||||
- envs/*.yaml
|
||||
ignoreMissingValueFiles: true
|
||||
```
|
||||
|
||||
This is useful for implementing a default/override pattern where override files may not exist in
|
||||
every environment.
|
||||
|
||||
## Values
|
||||
|
||||
Argo CD supports the equivalent of a values file directly in the Application manifest using the `source.helm.valuesObject` key.
|
||||
|
||||
191
gitops-engine/pkg/cache/cluster.go
vendored
191
gitops-engine/pkg/cache/cluster.go
vendored
@@ -92,6 +92,15 @@ const (
|
||||
RespectRbacStrict
|
||||
)
|
||||
|
||||
// callState tracks whether action() has been called on a resource during hierarchy iteration.
|
||||
type callState int
|
||||
|
||||
const (
|
||||
notCalled callState = iota // action() has not been called yet
|
||||
inProgress // action() is currently being processed (in call stack)
|
||||
completed // action() has been called and processing is complete
|
||||
)
|
||||
|
||||
type apiMeta struct {
|
||||
namespaced bool
|
||||
// watchCancel stops the watch of all resources for this API. This gets called when the cache is invalidated or when
|
||||
@@ -211,7 +220,7 @@ func NewClusterCache(config *rest.Config, opts ...UpdateSettingsFunc) *clusterCa
|
||||
listRetryLimit: 1,
|
||||
listRetryUseBackoff: false,
|
||||
listRetryFunc: ListRetryFuncNever,
|
||||
parentUIDToChildren: make(map[types.UID][]kube.ResourceKey),
|
||||
parentUIDToChildren: make(map[types.UID]map[kube.ResourceKey]struct{}),
|
||||
}
|
||||
for i := range opts {
|
||||
opts[i](cache)
|
||||
@@ -271,10 +280,11 @@ type clusterCache struct {
|
||||
|
||||
respectRBAC int
|
||||
|
||||
// Parent-to-children index for O(1) hierarchy traversal
|
||||
// Maps any resource's UID to its direct children's ResourceKeys
|
||||
// Eliminates need for O(n) graph building during hierarchy traversal
|
||||
parentUIDToChildren map[types.UID][]kube.ResourceKey
|
||||
// Parent-to-children index for O(1) child lookup during hierarchy traversal
|
||||
// Maps any resource's UID to a set of its direct children's ResourceKeys
|
||||
// Using a set eliminates O(k) duplicate checking on insertions
|
||||
// Used for cross-namespace hierarchy traversal; namespaced traversal still builds a graph
|
||||
parentUIDToChildren map[types.UID]map[kube.ResourceKey]struct{}
|
||||
}
|
||||
|
||||
type clusterCacheSync struct {
|
||||
@@ -495,27 +505,35 @@ func (c *clusterCache) setNode(n *Resource) {
|
||||
for k, v := range ns {
|
||||
// update child resource owner references
|
||||
if n.isInferredParentOf != nil && mightHaveInferredOwner(v) {
|
||||
v.setOwnerRef(n.toOwnerRef(), n.isInferredParentOf(k))
|
||||
shouldBeParent := n.isInferredParentOf(k)
|
||||
v.setOwnerRef(n.toOwnerRef(), shouldBeParent)
|
||||
// Update index inline for inferred ref changes.
|
||||
// Note: The removal case (shouldBeParent=false) is currently unreachable for
|
||||
// StatefulSet→PVC relationships because Kubernetes makes volumeClaimTemplates
|
||||
// immutable. We include it for defensive correctness and future-proofing.
|
||||
if n.Ref.UID != "" {
|
||||
if shouldBeParent {
|
||||
c.addToParentUIDToChildren(n.Ref.UID, k)
|
||||
} else {
|
||||
c.removeFromParentUIDToChildren(n.Ref.UID, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
if mightHaveInferredOwner(n) && v.isInferredParentOf != nil {
|
||||
n.setOwnerRef(v.toOwnerRef(), v.isInferredParentOf(n.ResourceKey()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rebuildParentToChildrenIndex rebuilds the parent-to-children index after a full sync
|
||||
// This is called after initial sync to ensure all parent-child relationships are tracked
|
||||
func (c *clusterCache) rebuildParentToChildrenIndex() {
|
||||
// Clear existing index
|
||||
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
|
||||
|
||||
// Rebuild parent-to-children index from all resources with owner refs
|
||||
for _, resource := range c.resources {
|
||||
key := resource.ResourceKey()
|
||||
for _, ownerRef := range resource.OwnerRefs {
|
||||
if ownerRef.UID != "" {
|
||||
c.addToParentUIDToChildren(ownerRef.UID, key)
|
||||
childKey := n.ResourceKey()
|
||||
shouldBeParent := v.isInferredParentOf(childKey)
|
||||
n.setOwnerRef(v.toOwnerRef(), shouldBeParent)
|
||||
// Update index inline for inferred ref changes.
|
||||
// Note: The removal case (shouldBeParent=false) is currently unreachable for
|
||||
// StatefulSet→PVC relationships because Kubernetes makes volumeClaimTemplates
|
||||
// immutable. We include it for defensive correctness and future-proofing.
|
||||
if v.Ref.UID != "" {
|
||||
if shouldBeParent {
|
||||
c.addToParentUIDToChildren(v.Ref.UID, childKey)
|
||||
} else {
|
||||
c.removeFromParentUIDToChildren(v.Ref.UID, childKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -524,31 +542,29 @@ func (c *clusterCache) rebuildParentToChildrenIndex() {
|
||||
|
||||
// addToParentUIDToChildren adds a child to the parent-to-children index
|
||||
func (c *clusterCache) addToParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
|
||||
// Check if child is already in the list to avoid duplicates
|
||||
children := c.parentUIDToChildren[parentUID]
|
||||
for _, existing := range children {
|
||||
if existing == childKey {
|
||||
return // Already exists, no need to add
|
||||
}
|
||||
// Get or create the set for this parent
|
||||
childrenSet := c.parentUIDToChildren[parentUID]
|
||||
if childrenSet == nil {
|
||||
childrenSet = make(map[kube.ResourceKey]struct{})
|
||||
c.parentUIDToChildren[parentUID] = childrenSet
|
||||
}
|
||||
c.parentUIDToChildren[parentUID] = append(children, childKey)
|
||||
// Add child to set (O(1) operation, automatically handles duplicates)
|
||||
childrenSet[childKey] = struct{}{}
|
||||
}
|
||||
|
||||
// removeFromParentUIDToChildren removes a child from the parent-to-children index
|
||||
func (c *clusterCache) removeFromParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
|
||||
children := c.parentUIDToChildren[parentUID]
|
||||
for i, existing := range children {
|
||||
if existing == childKey {
|
||||
// Remove by swapping with last element and truncating
|
||||
children[i] = children[len(children)-1]
|
||||
c.parentUIDToChildren[parentUID] = children[:len(children)-1]
|
||||
childrenSet := c.parentUIDToChildren[parentUID]
|
||||
if childrenSet == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Clean up empty entries
|
||||
if len(c.parentUIDToChildren[parentUID]) == 0 {
|
||||
delete(c.parentUIDToChildren, parentUID)
|
||||
}
|
||||
return
|
||||
}
|
||||
// Remove child from set (O(1) operation)
|
||||
delete(childrenSet, childKey)
|
||||
|
||||
// Clean up empty sets to avoid memory leaks
|
||||
if len(childrenSet) == 0 {
|
||||
delete(c.parentUIDToChildren, parentUID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1005,7 +1021,7 @@ func (c *clusterCache) sync() error {
|
||||
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
|
||||
c.resources = make(map[kube.ResourceKey]*Resource)
|
||||
c.namespacedResources = make(map[schema.GroupKind]bool)
|
||||
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
|
||||
c.parentUIDToChildren = make(map[types.UID]map[kube.ResourceKey]struct{})
|
||||
config := c.config
|
||||
version, err := c.kubectl.GetServerVersion(config)
|
||||
if err != nil {
|
||||
@@ -1104,9 +1120,6 @@ func (c *clusterCache) sync() error {
|
||||
return fmt.Errorf("failed to sync cluster %s: %w", c.config.Host, err)
|
||||
}
|
||||
|
||||
// Rebuild orphaned children index after all resources are loaded
|
||||
c.rebuildParentToChildrenIndex()
|
||||
|
||||
c.log.Info("Cluster successfully synced")
|
||||
return nil
|
||||
}
|
||||
@@ -1186,8 +1199,11 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
// Track visited resources to avoid cycles
|
||||
visited := make(map[kube.ResourceKey]int)
|
||||
// Track whether action() has been called on each resource (notCalled/inProgress/completed).
|
||||
// This is shared across processNamespaceHierarchy and processCrossNamespaceChildren.
|
||||
// Note: This is distinct from 'crossNSTraversed' in processCrossNamespaceChildren, which tracks
|
||||
// whether we've traversed a cluster-scoped key's cross-namespace children.
|
||||
actionCallState := make(map[kube.ResourceKey]callState)
|
||||
|
||||
// Group keys by namespace for efficient processing
|
||||
keysPerNamespace := make(map[string][]kube.ResourceKey)
|
||||
@@ -1203,12 +1219,18 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
|
||||
for namespace, namespaceKeys := range keysPerNamespace {
|
||||
nsNodes := c.nsIndex[namespace]
|
||||
graph := buildGraph(nsNodes)
|
||||
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, visited, action)
|
||||
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, actionCallState, action)
|
||||
}
|
||||
|
||||
// Process pre-computed cross-namespace children
|
||||
if clusterKeys, ok := keysPerNamespace[""]; ok {
|
||||
c.processCrossNamespaceChildren(clusterKeys, visited, action)
|
||||
// Track which cluster-scoped keys have had their cross-namespace children traversed.
|
||||
// This is distinct from 'actionCallState' - a resource may have had action() called
|
||||
// (i.e., its actionCallState is in the completed state) but not yet had its cross-namespace
|
||||
// children traversed. This prevents infinite recursion when resources have circular
|
||||
// ownerReferences.
|
||||
crossNSTraversed := make(map[kube.ResourceKey]bool)
|
||||
c.processCrossNamespaceChildren(clusterKeys, actionCallState, crossNSTraversed, action)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1216,12 +1238,21 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
|
||||
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries.
|
||||
// It also handles multi-level hierarchies where cluster-scoped resources own other cluster-scoped resources
|
||||
// that in turn own namespaced resources (e.g., Provider -> ProviderRevision -> Deployment in Crossplane).
|
||||
// The crossNSTraversed map tracks which keys have already been processed to prevent infinite recursion
|
||||
// from circular ownerReferences (e.g., a resource that owns itself).
|
||||
func (c *clusterCache) processCrossNamespaceChildren(
|
||||
clusterScopedKeys []kube.ResourceKey,
|
||||
visited map[kube.ResourceKey]int,
|
||||
actionCallState map[kube.ResourceKey]callState,
|
||||
crossNSTraversed map[kube.ResourceKey]bool,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
for _, clusterKey := range clusterScopedKeys {
|
||||
// Skip if already processed (cycle detection)
|
||||
if crossNSTraversed[clusterKey] {
|
||||
continue
|
||||
}
|
||||
crossNSTraversed[clusterKey] = true
|
||||
|
||||
// Get cluster-scoped resource to access its UID
|
||||
clusterResource := c.resources[clusterKey]
|
||||
if clusterResource == nil {
|
||||
@@ -1229,23 +1260,24 @@ func (c *clusterCache) processCrossNamespaceChildren(
|
||||
}
|
||||
|
||||
// Use parent-to-children index for O(1) lookup of direct children
|
||||
childKeys := c.parentUIDToChildren[clusterResource.Ref.UID]
|
||||
for _, childKey := range childKeys {
|
||||
childrenSet := c.parentUIDToChildren[clusterResource.Ref.UID]
|
||||
for childKey := range childrenSet {
|
||||
child := c.resources[childKey]
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
alreadyVisited := visited[childKey] != 0
|
||||
alreadyProcessed := actionCallState[childKey] != notCalled
|
||||
|
||||
// If child is cluster-scoped and was already visited by processNamespaceHierarchy,
|
||||
// If child is cluster-scoped and action() was already called by processNamespaceHierarchy,
|
||||
// we still need to recursively check for its cross-namespace children.
|
||||
// This handles multi-level hierarchies like: ClusterScoped -> ClusterScoped -> Namespaced
|
||||
// (e.g., Crossplane's Provider -> ProviderRevision -> Deployment)
|
||||
if alreadyVisited {
|
||||
if alreadyProcessed {
|
||||
if childKey.Namespace == "" {
|
||||
// Recursively process cross-namespace children of this cluster-scoped child
|
||||
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
|
||||
// The crossNSTraversed map prevents infinite recursion on circular ownerReferences
|
||||
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, actionCallState, crossNSTraversed, action)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -1258,16 +1290,16 @@ func (c *clusterCache) processCrossNamespaceChildren(
|
||||
|
||||
// Process this child
|
||||
if action(child, nsNodes) {
|
||||
visited[childKey] = 1
|
||||
actionCallState[childKey] = inProgress
|
||||
// Recursively process descendants using index-based traversal
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, actionCallState, action)
|
||||
|
||||
// If this child is also cluster-scoped, recursively process its cross-namespace children
|
||||
if childKey.Namespace == "" {
|
||||
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
|
||||
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, actionCallState, crossNSTraversed, action)
|
||||
}
|
||||
|
||||
visited[childKey] = 2
|
||||
actionCallState[childKey] = completed
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1278,14 +1310,14 @@ func (c *clusterCache) processCrossNamespaceChildren(
|
||||
func (c *clusterCache) iterateChildrenUsingIndex(
|
||||
parent *Resource,
|
||||
nsNodes map[kube.ResourceKey]*Resource,
|
||||
visited map[kube.ResourceKey]int,
|
||||
actionCallState map[kube.ResourceKey]callState,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
// Look up direct children of this parent using the index
|
||||
childKeys := c.parentUIDToChildren[parent.Ref.UID]
|
||||
for _, childKey := range childKeys {
|
||||
if visited[childKey] != 0 {
|
||||
continue // Already visited or in progress
|
||||
childrenSet := c.parentUIDToChildren[parent.Ref.UID]
|
||||
for childKey := range childrenSet {
|
||||
if actionCallState[childKey] != notCalled {
|
||||
continue // action() already called or in progress
|
||||
}
|
||||
|
||||
child := c.resources[childKey]
|
||||
@@ -1300,10 +1332,10 @@ func (c *clusterCache) iterateChildrenUsingIndex(
|
||||
}
|
||||
|
||||
if action(child, nsNodes) {
|
||||
visited[childKey] = 1
|
||||
actionCallState[childKey] = inProgress
|
||||
// Recursively process this child's descendants
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
|
||||
visited[childKey] = 2
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, actionCallState, action)
|
||||
actionCallState[childKey] = completed
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1313,22 +1345,19 @@ func (c *clusterCache) processNamespaceHierarchy(
|
||||
namespaceKeys []kube.ResourceKey,
|
||||
nsNodes map[kube.ResourceKey]*Resource,
|
||||
graph map[kube.ResourceKey]map[types.UID]*Resource,
|
||||
visited map[kube.ResourceKey]int,
|
||||
actionCallState map[kube.ResourceKey]callState,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
for _, key := range namespaceKeys {
|
||||
visited[key] = 0
|
||||
}
|
||||
for _, key := range namespaceKeys {
|
||||
res := c.resources[key]
|
||||
if visited[key] == 2 || !action(res, nsNodes) {
|
||||
if actionCallState[key] == completed || !action(res, nsNodes) {
|
||||
continue
|
||||
}
|
||||
visited[key] = 1
|
||||
actionCallState[key] = inProgress
|
||||
if _, ok := graph[key]; ok {
|
||||
for _, child := range graph[key] {
|
||||
if visited[child.ResourceKey()] == 0 && action(child, nsNodes) {
|
||||
child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
|
||||
if actionCallState[child.ResourceKey()] == notCalled && action(child, nsNodes) {
|
||||
child.iterateChildrenV2(graph, nsNodes, actionCallState, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
|
||||
if err != nil {
|
||||
c.log.V(2).Info(err.Error())
|
||||
return false
|
||||
@@ -1338,7 +1367,7 @@ func (c *clusterCache) processNamespaceHierarchy(
|
||||
}
|
||||
}
|
||||
}
|
||||
visited[key] = 2
|
||||
actionCallState[key] = completed
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1606,6 +1635,10 @@ func (c *clusterCache) onNodeRemoved(key kube.ResourceKey) {
|
||||
for k, v := range ns {
|
||||
if mightHaveInferredOwner(v) && existing.isInferredParentOf(k) {
|
||||
v.setOwnerRef(existing.toOwnerRef(), false)
|
||||
// Update index inline when removing inferred ref
|
||||
if existing.Ref.UID != "" {
|
||||
c.removeFromParentUIDToChildren(existing.Ref.UID, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
454
gitops-engine/pkg/cache/cluster_test.go
vendored
454
gitops-engine/pkg/cache/cluster_test.go
vendored
@@ -416,6 +416,128 @@ func TestStatefulSetOwnershipInferred(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestStatefulSetPVC_ParentToChildrenIndex verifies that inferred StatefulSet → PVC
|
||||
// relationships are correctly captured in the parentUIDToChildren index during initial sync.
|
||||
//
|
||||
// The index is updated inline when inferred owner refs are added in setNode()
|
||||
// (see the inferred parent handling section in clusterCache.setNode).
|
||||
func TestStatefulSetPVC_ParentToChildrenIndex(t *testing.T) {
|
||||
stsUID := types.UID("sts-uid-123")
|
||||
|
||||
// StatefulSet with volumeClaimTemplate named "data"
|
||||
sts := &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind},
|
||||
ObjectMeta: metav1.ObjectMeta{UID: stsUID, Name: "web", Namespace: "default"},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "data"},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
// PVCs that match the StatefulSet's volumeClaimTemplate pattern: <template>-<sts>-<ordinal>
|
||||
// These have NO explicit owner references - the relationship is INFERRED
|
||||
pvc0 := &corev1.PersistentVolumeClaim{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "pvc-0-uid", Name: "data-web-0", Namespace: "default"},
|
||||
}
|
||||
pvc1 := &corev1.PersistentVolumeClaim{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "pvc-1-uid", Name: "data-web-1", Namespace: "default"},
|
||||
}
|
||||
|
||||
// Create cluster with all resources
|
||||
// Must add PersistentVolumeClaim to API resources since it's not in the default set
|
||||
cluster := newCluster(t, sts, pvc0, pvc1).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: kube.PersistentVolumeClaimKind},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"},
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the parentUIDToChildren index contains the inferred relationships
|
||||
cluster.lock.RLock()
|
||||
defer cluster.lock.RUnlock()
|
||||
|
||||
pvc0Key := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "data-web-0"}
|
||||
pvc1Key := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "data-web-1"}
|
||||
|
||||
children, ok := cluster.parentUIDToChildren[stsUID]
|
||||
require.True(t, ok, "StatefulSet should have entry in parentUIDToChildren index")
|
||||
require.Contains(t, children, pvc0Key, "PVC data-web-0 should be in StatefulSet's children (inferred relationship)")
|
||||
require.Contains(t, children, pvc1Key, "PVC data-web-1 should be in StatefulSet's children (inferred relationship)")
|
||||
|
||||
// Also verify the OwnerRefs were set correctly on the PVCs
|
||||
pvc0Resource := cluster.resources[pvc0Key]
|
||||
require.NotNil(t, pvc0Resource)
|
||||
require.Len(t, pvc0Resource.OwnerRefs, 1, "PVC0 should have inferred owner ref")
|
||||
require.Equal(t, stsUID, pvc0Resource.OwnerRefs[0].UID, "PVC0 owner should be the StatefulSet")
|
||||
|
||||
pvc1Resource := cluster.resources[pvc1Key]
|
||||
require.NotNil(t, pvc1Resource)
|
||||
require.Len(t, pvc1Resource.OwnerRefs, 1, "PVC1 should have inferred owner ref")
|
||||
require.Equal(t, stsUID, pvc1Resource.OwnerRefs[0].UID, "PVC1 owner should be the StatefulSet")
|
||||
}
|
||||
|
||||
// TestStatefulSetPVC_WatchEvent_IndexUpdated verifies that when a PVC is added
|
||||
// via watch event (after initial sync), both the inferred owner reference AND
|
||||
// the parentUIDToChildren index are updated correctly.
|
||||
//
|
||||
// This tests the inline index update logic in setNode() which updates the index
|
||||
// immediately when inferred owner refs are added.
|
||||
func TestStatefulSetPVC_WatchEvent_IndexUpdated(t *testing.T) {
|
||||
stsUID := types.UID("sts-uid-456")
|
||||
|
||||
// StatefulSet with volumeClaimTemplate
|
||||
sts := &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind},
|
||||
ObjectMeta: metav1.ObjectMeta{UID: stsUID, Name: "db", Namespace: "default"},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "storage"},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
// Create cluster with ONLY the StatefulSet - PVC will be added via watch event
|
||||
cluster := newCluster(t, sts).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: kube.PersistentVolumeClaimKind},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"},
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
// PVC that matches the StatefulSet's volumeClaimTemplate pattern
|
||||
// Added via watch event AFTER initial sync
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "pvc-watch-uid", Name: "storage-db-0", Namespace: "default"},
|
||||
}
|
||||
|
||||
// Simulate watch event adding the PVC
|
||||
cluster.lock.Lock()
|
||||
cluster.setNode(cluster.newResource(mustToUnstructured(pvc)))
|
||||
cluster.lock.Unlock()
|
||||
|
||||
cluster.lock.RLock()
|
||||
defer cluster.lock.RUnlock()
|
||||
|
||||
pvcKey := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "storage-db-0"}
|
||||
|
||||
// Verify the OwnerRef IS correctly set
|
||||
pvcResource := cluster.resources[pvcKey]
|
||||
require.NotNil(t, pvcResource, "PVC should exist in cache")
|
||||
require.Len(t, pvcResource.OwnerRefs, 1, "PVC should have inferred owner ref from StatefulSet")
|
||||
require.Equal(t, stsUID, pvcResource.OwnerRefs[0].UID, "Owner should be the StatefulSet")
|
||||
|
||||
// Verify the index IS updated for inferred refs via watch events
|
||||
children, indexUpdated := cluster.parentUIDToChildren[stsUID]
|
||||
require.True(t, indexUpdated, "Index should be updated when inferred refs are added via watch events")
|
||||
require.Contains(t, children, pvcKey, "PVC should be in StatefulSet's children (inferred relationship)")
|
||||
}
|
||||
|
||||
func TestEnsureSyncedSingleNamespace(t *testing.T) {
|
||||
obj1 := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -2189,3 +2311,335 @@ func TestIterateHierarchyV2_NoDuplicatesCrossNamespace(t *testing.T) {
|
||||
assert.Equal(t, 1, visitCount["namespaced-child"], "namespaced child should be visited once")
|
||||
assert.Equal(t, 1, visitCount["cluster-child"], "cluster child should be visited once")
|
||||
}
|
||||
|
||||
func TestIterateHierarchyV2_CircularOwnerReference_NoStackOverflow(t *testing.T) {
|
||||
// Test that self-referencing resources (circular ownerReferences) don't cause stack overflow.
|
||||
// This reproduces the bug reported in https://github.com/argoproj/argo-cd/issues/26783
|
||||
// where a resource with an ownerReference pointing to itself caused infinite recursion.
|
||||
|
||||
// Create a cluster-scoped resource that owns itself (self-referencing)
|
||||
selfReferencingResource := &corev1.Namespace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "self-referencing",
|
||||
UID: "self-ref-uid",
|
||||
ResourceVersion: "1",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "self-referencing",
|
||||
UID: "self-ref-uid", // Points to itself
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, selfReferencingResource).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
visitCount := 0
|
||||
// This should complete without stack overflow
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(selfReferencingResource))},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
visitCount++
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// The self-referencing resource should be visited exactly once
|
||||
assert.Equal(t, 1, visitCount, "self-referencing resource should be visited exactly once")
|
||||
}
|
||||
|
||||
func TestIterateHierarchyV2_CircularOwnerChain_NoStackOverflow(t *testing.T) {
|
||||
// Test that circular ownership chains (A -> B -> A) don't cause stack overflow.
|
||||
// This is a more complex case where two resources own each other.
|
||||
|
||||
resourceA := &corev1.Namespace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "resource-a",
|
||||
UID: "uid-a",
|
||||
ResourceVersion: "1",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "resource-b",
|
||||
UID: "uid-b", // A is owned by B
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
resourceB := &corev1.Namespace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "resource-b",
|
||||
UID: "uid-b",
|
||||
ResourceVersion: "1",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "resource-a",
|
||||
UID: "uid-a", // B is owned by A
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, resourceA, resourceB).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
visitCount := make(map[string]int)
|
||||
// This should complete without stack overflow
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(resourceA))},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
visitCount[resource.Ref.Name]++
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Each resource in the circular chain should be visited exactly once
|
||||
assert.Equal(t, 1, visitCount["resource-a"], "resource-a should be visited exactly once")
|
||||
assert.Equal(t, 1, visitCount["resource-b"], "resource-b should be visited exactly once")
|
||||
}
|
||||
|
||||
// BenchmarkSync_ParentToChildrenIndex measures the overhead of parent-to-children index
|
||||
// operations during sync. This benchmark was created to investigate performance regression
|
||||
// reported in https://github.com/argoproj/argo-cd/issues/26863
|
||||
//
|
||||
// The index is now maintained with O(1) operations (set-based) and updated inline
|
||||
// in setNode() for both explicit and inferred owner refs. No rebuild is needed.
|
||||
//
|
||||
// This benchmark measures sync performance with resources that have owner references
|
||||
// to quantify the index-building overhead at different scales.
|
||||
func BenchmarkSync_ParentToChildrenIndex(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalResources int
|
||||
pctWithOwnerRefs int // Percentage of resources with owner references
|
||||
}{
|
||||
// Baseline: no owner refs (index operations are no-ops)
|
||||
{"1000res_0pctOwnerRefs", 1000, 0},
|
||||
{"5000res_0pctOwnerRefs", 5000, 0},
|
||||
{"10000res_0pctOwnerRefs", 10000, 0},
|
||||
|
||||
// Typical case: ~80% of resources have owner refs (pods owned by RS, RS owned by Deployment)
|
||||
{"1000res_80pctOwnerRefs", 1000, 80},
|
||||
{"5000res_80pctOwnerRefs", 5000, 80},
|
||||
{"10000res_80pctOwnerRefs", 10000, 80},
|
||||
|
||||
// Heavy case: all resources have owner refs
|
||||
{"1000res_100pctOwnerRefs", 1000, 100},
|
||||
{"5000res_100pctOwnerRefs", 5000, 100},
|
||||
{"10000res_100pctOwnerRefs", 10000, 100},
|
||||
|
||||
// Stress test: larger scale
|
||||
{"20000res_80pctOwnerRefs", 20000, 80},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
resources := make([]runtime.Object, 0, tc.totalResources)
|
||||
|
||||
// Create parent resources (deployments) - these won't have owner refs
|
||||
numParents := tc.totalResources / 10 // 10% are parents
|
||||
if numParents < 1 {
|
||||
numParents = 1
|
||||
}
|
||||
parentUIDs := make([]types.UID, numParents)
|
||||
for i := 0; i < numParents; i++ {
|
||||
uid := types.UID(fmt.Sprintf("deploy-uid-%d", i))
|
||||
parentUIDs[i] = uid
|
||||
resources = append(resources, &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("deploy-%d", i),
|
||||
Namespace: "default",
|
||||
UID: uid,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Create child resources (pods) - some with owner refs
|
||||
numChildren := tc.totalResources - numParents
|
||||
numWithOwnerRefs := (numChildren * tc.pctWithOwnerRefs) / 100
|
||||
|
||||
for i := 0; i < numChildren; i++ {
|
||||
pod := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%d", i),
|
||||
Namespace: "default",
|
||||
UID: types.UID(fmt.Sprintf("pod-uid-%d", i)),
|
||||
},
|
||||
}
|
||||
|
||||
// Add owner refs to the first numWithOwnerRefs pods
|
||||
if i < numWithOwnerRefs {
|
||||
parentIdx := i % numParents
|
||||
pod.OwnerReferences = []metav1.OwnerReference{{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: fmt.Sprintf("deploy-%d", parentIdx),
|
||||
UID: parentUIDs[parentIdx],
|
||||
}}
|
||||
}
|
||||
|
||||
resources = append(resources, pod)
|
||||
}
|
||||
|
||||
cluster := newCluster(b, resources...)
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
// sync() reinitializes resources, parentUIDToChildren, etc. at the start,
|
||||
// so no manual reset is needed here.
|
||||
err := cluster.sync()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkUpdateParentUIDToChildren measures the cost of incremental index updates
|
||||
// during setNode. This is called for EVERY resource during sync. The index uses
|
||||
// set-based storage so add/remove operations are O(1) regardless of children count.
|
||||
func BenchmarkUpdateParentUIDToChildren(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
childrenPerParent int
|
||||
}{
|
||||
{"10children", 10},
|
||||
{"50children", 50},
|
||||
{"100children", 100},
|
||||
{"500children", 500},
|
||||
{"1000children", 1000},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
cluster := newCluster(b)
|
||||
err := cluster.EnsureSynced()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
parentUID := types.UID("parent-uid")
|
||||
|
||||
// Pre-populate with existing children
|
||||
childrenSet := make(map[kube.ResourceKey]struct{})
|
||||
for i := 0; i < tc.childrenPerParent; i++ {
|
||||
childKey := kube.ResourceKey{
|
||||
Group: "",
|
||||
Kind: "Pod",
|
||||
Namespace: "default",
|
||||
Name: fmt.Sprintf("existing-child-%d", i),
|
||||
}
|
||||
childrenSet[childKey] = struct{}{}
|
||||
}
|
||||
cluster.parentUIDToChildren[parentUID] = childrenSet
|
||||
|
||||
// Create a new child key to add
|
||||
newChildKey := kube.ResourceKey{
|
||||
Group: "",
|
||||
Kind: "Pod",
|
||||
Namespace: "default",
|
||||
Name: "new-child",
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
// Simulate adding a new child - O(1) set insertion
|
||||
cluster.addToParentUIDToChildren(parentUID, newChildKey)
|
||||
// Remove it so we can add it again in the next iteration
|
||||
cluster.removeFromParentUIDToChildren(parentUID, newChildKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkIncrementalIndexBuild measures the cost of incremental index updates
|
||||
// via addToParentUIDToChildren during sync. The index uses O(1) set-based operations.
|
||||
//
|
||||
// This benchmark was created to investigate issue #26863 and verify the fix.
|
||||
func BenchmarkIncrementalIndexBuild(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numParents int
|
||||
childrenPerParent int
|
||||
}{
|
||||
{"100parents_10children", 100, 10},
|
||||
{"100parents_50children", 100, 50},
|
||||
{"100parents_100children", 100, 100},
|
||||
{"1000parents_10children", 1000, 10},
|
||||
{"1000parents_100children", 1000, 100},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
// Benchmark incremental approach (what happens during setNode)
|
||||
b.Run(tc.name+"_incremental", func(b *testing.B) {
|
||||
cluster := newCluster(b)
|
||||
err := cluster.EnsureSynced()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Prepare parent UIDs and child keys
|
||||
type childInfo struct {
|
||||
parentUID types.UID
|
||||
childKey kube.ResourceKey
|
||||
}
|
||||
children := make([]childInfo, 0, tc.numParents*tc.childrenPerParent)
|
||||
for p := 0; p < tc.numParents; p++ {
|
||||
parentUID := types.UID(fmt.Sprintf("parent-%d", p))
|
||||
for c := 0; c < tc.childrenPerParent; c++ {
|
||||
children = append(children, childInfo{
|
||||
parentUID: parentUID,
|
||||
childKey: kube.ResourceKey{
|
||||
Kind: "Pod",
|
||||
Namespace: "default",
|
||||
Name: fmt.Sprintf("child-%d-%d", p, c),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
// Clear the index
|
||||
cluster.parentUIDToChildren = make(map[types.UID]map[kube.ResourceKey]struct{})
|
||||
|
||||
// Simulate incremental adds (O(1) set insertions)
|
||||
for _, child := range children {
|
||||
cluster.addToParentUIDToChildren(child.parentUID, child.childKey)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
16
gitops-engine/pkg/cache/resource.go
vendored
16
gitops-engine/pkg/cache/resource.go
vendored
@@ -76,16 +76,16 @@ func (r *Resource) toOwnerRef() metav1.OwnerReference {
|
||||
}
|
||||
|
||||
// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource.
|
||||
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, actionCallState map[kube.ResourceKey]callState, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
key := r.ResourceKey()
|
||||
if visited[key] == 2 {
|
||||
if actionCallState[key] == completed {
|
||||
return
|
||||
}
|
||||
// this indicates that we've started processing this node's children
|
||||
visited[key] = 1
|
||||
actionCallState[key] = inProgress
|
||||
defer func() {
|
||||
// this indicates that we've finished processing this node's children
|
||||
visited[key] = 2
|
||||
actionCallState[key] = completed
|
||||
}()
|
||||
children, ok := graph[key]
|
||||
if !ok || children == nil {
|
||||
@@ -94,13 +94,13 @@ func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*R
|
||||
for _, child := range children {
|
||||
childKey := child.ResourceKey()
|
||||
// For cross-namespace relationships, child might not be in ns, so use it directly from graph
|
||||
switch visited[childKey] {
|
||||
case 1:
|
||||
switch actionCallState[childKey] {
|
||||
case inProgress:
|
||||
// Since we encountered a node that we're currently processing, we know we have a circular dependency.
|
||||
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
|
||||
case 0:
|
||||
case notCalled:
|
||||
if action(nil, child, ns) {
|
||||
child.iterateChildrenV2(graph, ns, visited, action)
|
||||
child.iterateChildrenV2(graph, ns, actionCallState, action)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
go.mod
2
go.mod
@@ -102,7 +102,7 @@ require (
|
||||
golang.org/x/term v0.41.0
|
||||
golang.org/x/time v0.15.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57
|
||||
google.golang.org/grpc v1.79.2
|
||||
google.golang.org/grpc v1.79.3
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -1404,8 +1404,8 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU=
|
||||
google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
||||
google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=
|
||||
google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.4.0-rc4
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.4.0-rc4
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
12
manifests/core-install-with-hydrator.yaml
generated
12
manifests/core-install-with-hydrator.yaml
generated
@@ -31332,7 +31332,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31473,7 +31473,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31601,7 +31601,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31910,7 +31910,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31963,7 +31963,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32366,7 +32366,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
10
manifests/core-install.yaml
generated
10
manifests/core-install.yaml
generated
@@ -31300,7 +31300,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31429,7 +31429,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31738,7 +31738,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31791,7 +31791,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32194,7 +32194,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.4.0-rc4
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.4.0-rc4
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
18
manifests/ha/install-with-hydrator.yaml
generated
18
manifests/ha/install-with-hydrator.yaml
generated
@@ -32758,7 +32758,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32899,7 +32899,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33057,7 +33057,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -33159,7 +33159,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -33283,7 +33283,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33618,7 +33618,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33671,7 +33671,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -34100,7 +34100,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34532,7 +34532,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/install.yaml
generated
16
manifests/ha/install.yaml
generated
@@ -32728,7 +32728,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32887,7 +32887,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32989,7 +32989,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -33113,7 +33113,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33448,7 +33448,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33501,7 +33501,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33930,7 +33930,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34362,7 +34362,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
@@ -2005,7 +2005,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2146,7 +2146,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2304,7 +2304,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2406,7 +2406,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2530,7 +2530,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2865,7 +2865,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2918,7 +2918,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3347,7 +3347,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3779,7 +3779,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/namespace-install.yaml
generated
16
manifests/ha/namespace-install.yaml
generated
@@ -1975,7 +1975,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2134,7 +2134,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2236,7 +2236,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2360,7 +2360,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2695,7 +2695,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2748,7 +2748,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3177,7 +3177,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3609,7 +3609,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/install-with-hydrator.yaml
generated
18
manifests/install-with-hydrator.yaml
generated
@@ -31776,7 +31776,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31917,7 +31917,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32075,7 +32075,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32177,7 +32177,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32279,7 +32279,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32588,7 +32588,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32641,7 +32641,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33068,7 +33068,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33500,7 +33500,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/install.yaml
generated
16
manifests/install.yaml
generated
@@ -31744,7 +31744,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31903,7 +31903,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32005,7 +32005,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32107,7 +32107,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32416,7 +32416,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32469,7 +32469,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32896,7 +32896,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33328,7 +33328,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/namespace-install-with-hydrator.yaml
generated
18
manifests/namespace-install-with-hydrator.yaml
generated
@@ -1023,7 +1023,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1164,7 +1164,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1322,7 +1322,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1424,7 +1424,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1526,7 +1526,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1835,7 +1835,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1888,7 +1888,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2315,7 +2315,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2747,7 +2747,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/namespace-install.yaml
generated
16
manifests/namespace-install.yaml
generated
@@ -991,7 +991,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1150,7 +1150,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1252,7 +1252,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1354,7 +1354,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1663,7 +1663,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1716,7 +1716,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2143,7 +2143,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2575,7 +2575,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.4.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/TomOnTime/utfutil"
|
||||
"github.com/bmatcuk/doublestar/v4"
|
||||
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
@@ -656,6 +657,13 @@ func (s *Service) GenerateManifest(ctx context.Context, q *apiclient.ManifestReq
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Convert typed errors to gRPC status codes so callers can use status.Code()
|
||||
// rather than string matching.
|
||||
var globNoMatch *GlobNoMatchError
|
||||
if errors.As(err, &globNoMatch) {
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
@@ -1301,7 +1309,7 @@ func helmTemplate(appPath string, repoRoot string, env *v1alpha1.Env, q *apiclie
|
||||
return nil, "", fmt.Errorf("error getting helm repos: %w", err)
|
||||
}
|
||||
|
||||
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials)
|
||||
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error initializing helm app object: %w", err)
|
||||
}
|
||||
@@ -1376,19 +1384,55 @@ func getResolvedValueFiles(
|
||||
gitRepoPaths utilio.TempPaths,
|
||||
ignoreMissingValueFiles bool,
|
||||
) ([]pathutil.ResolvedFilePath, error) {
|
||||
// Pre-collect resolved paths for all explicit (non-glob) entries. This allows glob
|
||||
// expansion to skip files that also appear explicitly, so the explicit entry controls
|
||||
// the final position. For example, with ["*.yaml", "c.yaml"], c.yaml is excluded from
|
||||
// the glob expansion and placed at the end where it was explicitly listed.
|
||||
explicitPaths := make(map[pathutil.ResolvedFilePath]struct{})
|
||||
for _, rawValueFile := range rawValueFiles {
|
||||
referencedSource := getReferencedSource(rawValueFile, refSources)
|
||||
var resolved pathutil.ResolvedFilePath
|
||||
var err error
|
||||
if referencedSource != nil {
|
||||
resolved, err = getResolvedRefValueFile(rawValueFile, env, allowedValueFilesSchemas, referencedSource.Repo.Repo, gitRepoPaths)
|
||||
} else {
|
||||
resolved, _, err = pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, env.Envsubst(rawValueFile), allowedValueFilesSchemas)
|
||||
}
|
||||
if err != nil {
|
||||
continue // resolution errors will be surfaced in the main loop below
|
||||
}
|
||||
if !isGlobPath(string(resolved)) {
|
||||
explicitPaths[resolved] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var resolvedValueFiles []pathutil.ResolvedFilePath
|
||||
seen := make(map[pathutil.ResolvedFilePath]struct{})
|
||||
appendUnique := func(p pathutil.ResolvedFilePath) {
|
||||
if _, ok := seen[p]; !ok {
|
||||
seen[p] = struct{}{}
|
||||
resolvedValueFiles = append(resolvedValueFiles, p)
|
||||
}
|
||||
}
|
||||
for _, rawValueFile := range rawValueFiles {
|
||||
isRemote := false
|
||||
var resolvedPath pathutil.ResolvedFilePath
|
||||
var err error
|
||||
|
||||
referencedSource := getReferencedSource(rawValueFile, refSources)
|
||||
// effectiveRoot is the repository root used for the symlink boundary check
|
||||
// on glob matches. For ref-source paths this is the external repo's checkout
|
||||
// directory; for local paths it is the main repo root.
|
||||
effectiveRoot := repoRoot
|
||||
if referencedSource != nil {
|
||||
// If the $-prefixed path appears to reference another source, do env substitution _after_ resolving that source.
|
||||
resolvedPath, err = getResolvedRefValueFile(rawValueFile, env, allowedValueFilesSchemas, referencedSource.Repo.Repo, gitRepoPaths)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error resolving value file path: %w", err)
|
||||
}
|
||||
if refRepoPath := gitRepoPaths.GetPathIfExists(git.NormalizeGitURL(referencedSource.Repo.Repo)); refRepoPath != "" {
|
||||
effectiveRoot = refRepoPath
|
||||
}
|
||||
} else {
|
||||
// This will resolve val to an absolute path (or a URL)
|
||||
resolvedPath, isRemote, err = pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, env.Envsubst(rawValueFile), allowedValueFilesSchemas)
|
||||
@@ -1397,6 +1441,38 @@ func getResolvedValueFiles(
|
||||
}
|
||||
}
|
||||
|
||||
// If the resolved path contains a glob pattern, expand it to all matching files.
|
||||
// doublestar.FilepathGlob is used (consistent with AppSet generators) because it supports
|
||||
// ** for recursive matching in addition to all standard glob patterns (*,?,[).
|
||||
// Matches are returned in lexical order, which determines helm's merge precedence
|
||||
// (later files override earlier ones). Glob patterns are only expanded for local files;
|
||||
// remote value file URLs (e.g. https://...) are passed through as-is.
|
||||
// If the glob matches no files and ignoreMissingValueFiles is true, skip it silently.
|
||||
// Otherwise, return an error — consistent with how missing non-glob value files are handled.
|
||||
if !isRemote && isGlobPath(string(resolvedPath)) {
|
||||
matches, err := doublestar.FilepathGlob(string(resolvedPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error expanding glob pattern %q: %w", rawValueFile, err)
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
if ignoreMissingValueFiles {
|
||||
log.Debugf(" %s values file glob matched no files", rawValueFile)
|
||||
continue
|
||||
}
|
||||
return nil, &GlobNoMatchError{Pattern: rawValueFile}
|
||||
}
|
||||
if err := verifyGlobMatchesWithinRoot(matches, effectiveRoot); err != nil {
|
||||
return nil, fmt.Errorf("glob pattern %q: %w", rawValueFile, err)
|
||||
}
|
||||
for _, match := range matches {
|
||||
// Skip files that are also listed explicitly - they will be placed
|
||||
// at their explicit position rather than the glob's position.
|
||||
if _, isExplicit := explicitPaths[pathutil.ResolvedFilePath(match)]; !isExplicit {
|
||||
appendUnique(pathutil.ResolvedFilePath(match))
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !isRemote {
|
||||
_, err = os.Stat(string(resolvedPath))
|
||||
if os.IsNotExist(err) {
|
||||
@@ -1407,8 +1483,9 @@ func getResolvedValueFiles(
|
||||
}
|
||||
}
|
||||
|
||||
resolvedValueFiles = append(resolvedValueFiles, resolvedPath)
|
||||
appendUnique(resolvedPath)
|
||||
}
|
||||
log.Infof("resolved value files: %v", resolvedValueFiles)
|
||||
return resolvedValueFiles, nil
|
||||
}
|
||||
|
||||
@@ -1478,6 +1555,61 @@ func getRepoCredential(repoCredentials []*v1alpha1.RepoCreds, repoURL string) *v
|
||||
return nil
|
||||
}
|
||||
|
||||
// GlobNoMatchError is returned when a glob pattern in valueFiles matches no files.
|
||||
// It is a runtime condition (the files may be added later), not a spec error.
|
||||
type GlobNoMatchError struct {
|
||||
Pattern string
|
||||
}
|
||||
|
||||
func (e *GlobNoMatchError) Error() string {
|
||||
return fmt.Sprintf("values file glob %q matched no files", e.Pattern)
|
||||
}
|
||||
|
||||
// isGlobPath reports whether path contains any glob metacharacters
|
||||
// supported by doublestar: *, ?, or [. The ** pattern is covered by *.
|
||||
func isGlobPath(path string) bool {
|
||||
return strings.ContainsAny(path, "*?[")
|
||||
}
|
||||
|
||||
// verifyGlobMatchesWithinRoot resolves symlinks for each glob match and verifies
|
||||
// that the resolved target is within effectiveRoot. It protects against symlinks
|
||||
// inside the repository that point to targets outside it.
|
||||
//
|
||||
// doublestar.FilepathGlob uses os.Lstat, so it returns the path of the symlink
|
||||
// itself (which lives inside the repo) rather than the symlink target. If the
|
||||
// target is outside the repo, Helm would still follow the link and read the
|
||||
// external file. This function catches that case before the paths reach Helm.
|
||||
//
|
||||
// Both effectiveRoot and each match are canonicalized via filepath.EvalSymlinks
|
||||
// so the prefix comparison is correct on systems where the working directory is
|
||||
// itself under a symlink chain (e.g. /var -> /private/var on macOS).
|
||||
func verifyGlobMatchesWithinRoot(matches []string, effectiveRoot string) error {
|
||||
absRoot, err := filepath.Abs(effectiveRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving repo root: %w", err)
|
||||
}
|
||||
canonicalRoot, err := filepath.EvalSymlinks(absRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving symlinks in repo root: %w", err)
|
||||
}
|
||||
requiredRootPath := canonicalRoot
|
||||
if !strings.HasSuffix(requiredRootPath, string(os.PathSeparator)) {
|
||||
requiredRootPath += string(os.PathSeparator)
|
||||
}
|
||||
for _, match := range matches {
|
||||
realMatch, err := filepath.EvalSymlinks(match)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving symlink for glob match %q: %w", match, err)
|
||||
}
|
||||
// Allow the match to resolve exactly to the root (realMatch+sep == requiredRootPath)
|
||||
// or to any path beneath it (HasPrefix).
|
||||
if realMatch+string(os.PathSeparator) != requiredRootPath && !strings.HasPrefix(realMatch, requiredRootPath) {
|
||||
return fmt.Errorf("glob match %q resolved to outside repository root", match)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type (
|
||||
GenerateManifestOpt func(*generateManifestOpt)
|
||||
generateManifestOpt struct {
|
||||
@@ -2287,7 +2419,7 @@ func (s *Service) populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials)
|
||||
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3895,6 +3895,567 @@ func Test_getResolvedValueFiles(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getResolvedValueFiles_glob(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
paths := utilio.NewRandomizedTempPaths(tempDir)
|
||||
paths.Add(git.NormalizeGitURL("https://github.com/org/repo1"), path.Join(tempDir, "repo1"))
|
||||
|
||||
// main-repo files
|
||||
require.NoError(t, os.MkdirAll(path.Join(tempDir, "main-repo", "prod", "nested"), 0o755))
|
||||
require.NoError(t, os.MkdirAll(path.Join(tempDir, "main-repo", "staging"), 0o755))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "a.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "b.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "staging", "e.yaml"), []byte{}, 0o644))
|
||||
|
||||
// main-repo envs: used to verify depth-order with ** (z.yaml sorts after nested/ alphabetically
|
||||
// but is still returned before nested/c.yaml because doublestar matches depth-0 files first).
|
||||
require.NoError(t, os.MkdirAll(path.Join(tempDir, "main-repo", "envs", "nested"), 0o755))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "envs", "a.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "envs", "z.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"), []byte{}, 0o644))
|
||||
|
||||
// repo1 files
|
||||
require.NoError(t, os.MkdirAll(path.Join(tempDir, "repo1", "prod", "nested"), 0o755))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "repo1", "prod", "x.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "repo1", "prod", "y.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(path.Join(tempDir, "repo1", "prod", "nested", "z.yaml"), []byte{}, 0o644))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
rawPath string
|
||||
env *v1alpha1.Env
|
||||
refSources map[string]*v1alpha1.RefTarget
|
||||
expectedPaths []string
|
||||
ignoreMissingValueFiles bool
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "local glob matches multiple files",
|
||||
rawPath: "prod/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
expectedPaths: []string{
|
||||
// the order is a.yaml before b.yaml
|
||||
// since doublestar.FilepathGlob returns lexical order
|
||||
path.Join(tempDir, "main-repo", "prod", "a.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "b.yaml"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "local glob matches no files returns error",
|
||||
rawPath: "dev/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
expectedPaths: nil,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "local glob matches no files with ignoreMissingValueFiles set to true",
|
||||
rawPath: "dev/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
ignoreMissingValueFiles: true,
|
||||
expectedPaths: nil,
|
||||
},
|
||||
{
|
||||
name: "referenced glob matches multiple files in external repo",
|
||||
rawPath: "$ref/prod/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{
|
||||
"$ref": {
|
||||
Repo: v1alpha1.Repository{
|
||||
Repo: "https://github.com/org/repo1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPaths: []string{
|
||||
path.Join(tempDir, "repo1", "prod", "x.yaml"),
|
||||
path.Join(tempDir, "repo1", "prod", "y.yaml"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ref glob with env var in path",
|
||||
rawPath: "$ref/$ENV/*.yaml",
|
||||
env: &v1alpha1.Env{
|
||||
&v1alpha1.EnvEntry{
|
||||
Name: "ENV",
|
||||
Value: "prod",
|
||||
},
|
||||
},
|
||||
refSources: map[string]*v1alpha1.RefTarget{
|
||||
"$ref": {
|
||||
Repo: v1alpha1.Repository{
|
||||
Repo: "https://github.com/org/repo1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPaths: []string{
|
||||
path.Join(tempDir, "repo1", "prod", "x.yaml"),
|
||||
path.Join(tempDir, "repo1", "prod", "y.yaml"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "local glob single match",
|
||||
rawPath: "prod/a*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
expectedPaths: []string{path.Join(tempDir, "main-repo", "prod", "a.yaml")},
|
||||
},
|
||||
{
|
||||
name: "recursive glob matches files at all depths under a subdirectory",
|
||||
// ** matches zero or more path segments, so prod/**/*.yaml covers both
|
||||
// prod/*.yaml (zero intermediate segments) and prod/nested/*.yaml (one segment), etc.
|
||||
rawPath: "prod/**/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
// lexical order: prod/a.yaml, prod/b.yaml, prod/nested/c.yaml, prod/nested/d.yaml
|
||||
expectedPaths: []string{
|
||||
path.Join(tempDir, "main-repo", "prod", "a.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "b.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "recursive glob from repo root matches yaml files across all directories",
|
||||
rawPath: "**/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
// doublestar traverses directories in lexical order, processing each directory's
|
||||
// own files before its subdirectories. So the order is:
|
||||
// envs/ flat files → envs/nested/ files → prod/ flat files → prod/nested/ files → staging/ files
|
||||
expectedPaths: []string{
|
||||
path.Join(tempDir, "main-repo", "envs", "a.yaml"),
|
||||
path.Join(tempDir, "main-repo", "envs", "z.yaml"),
|
||||
path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "a.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "b.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"),
|
||||
path.Join(tempDir, "main-repo", "staging", "e.yaml"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "recursive glob anchored to a named subdirectory matches at any depth",
|
||||
rawPath: "**/nested/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
expectedPaths: []string{
|
||||
path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"),
|
||||
path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "recursive glob with no matches and ignoreMissingValueFiles skips silently",
|
||||
rawPath: "**/nonexistent/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
ignoreMissingValueFiles: true,
|
||||
expectedPaths: nil,
|
||||
},
|
||||
{
|
||||
name: "recursive glob with no matches returns error",
|
||||
rawPath: "**/nonexistent/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
expectedPaths: nil,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
// z.yaml sorts after "nested/" alphabetically by full path, but doublestar processes
|
||||
// each directory's own files before descending into subdirectories. So for envs/**/*.yaml:
|
||||
// envs/ flat files (a, z) come before envs/nested/ files (c), giving:
|
||||
// a.yaml, z.yaml, nested/c.yaml — not a.yaml, nested/c.yaml, z.yaml.
|
||||
name: "** depth-order: flat files before nested even when flat file sorts after nested/ alphabetically",
|
||||
rawPath: "envs/**/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
expectedPaths: []string{
|
||||
path.Join(tempDir, "main-repo", "envs", "a.yaml"),
|
||||
path.Join(tempDir, "main-repo", "envs", "z.yaml"),
|
||||
path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "recursive glob in external ref repo",
|
||||
rawPath: "$ref/prod/**/*.yaml",
|
||||
env: &v1alpha1.Env{},
|
||||
refSources: map[string]*v1alpha1.RefTarget{
|
||||
"$ref": {
|
||||
Repo: v1alpha1.Repository{
|
||||
Repo: "https://github.com/org/repo1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPaths: []string{
|
||||
// doublestar matches zero path segments before recursing into subdirectories,
|
||||
// so flat files (x, y) come before nested ones (nested/z).
|
||||
path.Join(tempDir, "repo1", "prod", "x.yaml"),
|
||||
path.Join(tempDir, "repo1", "prod", "y.yaml"),
|
||||
path.Join(tempDir, "repo1", "prod", "nested", "z.yaml"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
repoPath := path.Join(tempDir, "main-repo")
|
||||
resolvedPaths, err := getResolvedValueFiles(repoPath, repoPath, tt.env, []string{}, []string{tt.rawPath}, tt.refSources, paths, tt.ignoreMissingValueFiles)
|
||||
if tt.expectedErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resolvedPaths, len(tt.expectedPaths))
|
||||
|
||||
for i, p := range tt.expectedPaths {
|
||||
assert.Equal(t, p, string(resolvedPaths[i]))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Deduplication: first occurrence of a resolved path wins. Subsequent references to the
|
||||
// same file, whether explicit or via glob are silently dropped. This preserves the
|
||||
// merge-precedence position set by the first mention of each file.
|
||||
t.Run("glob then explicit: explicit entry placed at end, giving it highest Helm precedence", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
repoPath := path.Join(tempDir, "main-repo")
|
||||
resolvedPaths, err := getResolvedValueFiles(
|
||||
repoPath, repoPath,
|
||||
&v1alpha1.Env{}, []string{},
|
||||
[]string{
|
||||
"envs/*.yaml", // glob - z.yaml is explicit so skipped; only a.yaml added
|
||||
"envs/z.yaml", // explicit - placed last, highest precedence
|
||||
},
|
||||
map[string]*v1alpha1.RefTarget{}, paths, false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resolvedPaths, 2)
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "envs", "a.yaml"), string(resolvedPaths[0]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "envs", "z.yaml"), string(resolvedPaths[1]))
|
||||
})
|
||||
|
||||
t.Run("explicit path before glob: explicit position is kept, glob re-match is dropped", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
repoPath := path.Join(tempDir, "main-repo")
|
||||
resolvedPaths, err := getResolvedValueFiles(
|
||||
repoPath, repoPath,
|
||||
&v1alpha1.Env{}, []string{},
|
||||
[]string{
|
||||
"prod/a.yaml", // explicit locks in position 0
|
||||
"prod/*.yaml", // glob - a.yaml already seen, only b.yaml is new
|
||||
},
|
||||
map[string]*v1alpha1.RefTarget{}, paths, false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resolvedPaths, 2)
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[0]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[1]))
|
||||
})
|
||||
|
||||
t.Run("glob before explicit path: explicit position wins, glob skips the explicitly listed file", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
repoPath := path.Join(tempDir, "main-repo")
|
||||
resolvedPaths, err := getResolvedValueFiles(
|
||||
repoPath, repoPath,
|
||||
&v1alpha1.Env{}, []string{},
|
||||
[]string{
|
||||
"prod/*.yaml", // glob - a.yaml is explicit so skipped; only b.yaml added (pos 0)
|
||||
"prod/a.yaml", // explicit - placed here at pos 1 (highest precedence)
|
||||
},
|
||||
map[string]*v1alpha1.RefTarget{}, paths, false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resolvedPaths, 2)
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[0]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[1]))
|
||||
})
|
||||
|
||||
t.Run("two overlapping globs: second glob only adds files not matched by first", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
repoPath := path.Join(tempDir, "main-repo")
|
||||
resolvedPaths, err := getResolvedValueFiles(
|
||||
repoPath, repoPath,
|
||||
&v1alpha1.Env{}, []string{},
|
||||
[]string{
|
||||
"prod/*.yaml", // adds a.yaml, b.yaml
|
||||
"prod/**/*.yaml", // a.yaml, b.yaml already seen; adds nested/c.yaml, nested/d.yaml
|
||||
},
|
||||
map[string]*v1alpha1.RefTarget{}, paths, false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resolvedPaths, 4)
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[0]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[1]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"), string(resolvedPaths[2]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"), string(resolvedPaths[3]))
|
||||
})
|
||||
|
||||
t.Run("explicit paths take priority: globs skip explicitly listed files, which are placed at their explicit positions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
repoPath := path.Join(tempDir, "main-repo")
|
||||
resolvedPaths, err := getResolvedValueFiles(
|
||||
repoPath, repoPath,
|
||||
&v1alpha1.Env{}, []string{},
|
||||
[]string{
|
||||
"prod/a.yaml", // explicit - pos 0
|
||||
"prod/*.yaml", // a.yaml and b.yaml are both explicit, skipped entirely
|
||||
"prod/b.yaml", // explicit - pos 1
|
||||
"prod/**/*.yaml", // a.yaml, b.yaml, nested/c.yaml all explicit and skipped; nested/d.yaml added - pos 2
|
||||
"prod/nested/c.yaml", // explicit - pos 3
|
||||
},
|
||||
map[string]*v1alpha1.RefTarget{}, paths, false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resolvedPaths, 4)
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[0]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[1]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"), string(resolvedPaths[2]))
|
||||
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"), string(resolvedPaths[3]))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_verifyGlobMatchesWithinRoot(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
repoDir := filepath.Join(tempDir, "repo")
|
||||
outsideDir := filepath.Join(tempDir, "outside")
|
||||
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(repoDir, "values", "sub"), 0o755))
|
||||
require.NoError(t, os.MkdirAll(outsideDir, 0o755))
|
||||
|
||||
// Files used as symlink targets
|
||||
inRepoFile := filepath.Join(repoDir, "values", "real.yaml")
|
||||
outsideFile := filepath.Join(outsideDir, "secret.yaml")
|
||||
require.NoError(t, os.WriteFile(inRepoFile, []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(outsideFile, []byte("password: hunter2"), 0o644))
|
||||
|
||||
// Symlink inside repo → file inside repo (safe)
|
||||
inRepoLink := filepath.Join(repoDir, "values", "inrepo-link.yaml")
|
||||
require.NoError(t, os.Symlink(inRepoFile, inRepoLink))
|
||||
|
||||
// Symlink inside repo → file outside repo (escape)
|
||||
escapeLink := filepath.Join(repoDir, "values", "escape-link.yaml")
|
||||
require.NoError(t, os.Symlink(outsideFile, escapeLink))
|
||||
|
||||
// Two-hop symlink: inside repo → another symlink (still inside) → file inside repo
|
||||
hop1 := filepath.Join(repoDir, "values", "hop1.yaml")
|
||||
require.NoError(t, os.Symlink(inRepoLink, hop1)) // hop1 → inRepoLink → real.yaml
|
||||
|
||||
// Two-hop symlink: inside repo → another symlink (inside repo) → file outside repo
|
||||
hop2 := filepath.Join(repoDir, "values", "hop2.yaml")
|
||||
require.NoError(t, os.Symlink(escapeLink, hop2)) // hop2 → escape-link → secret.yaml
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
matches []string
|
||||
expectErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "regular file inside root passes",
|
||||
matches: []string{inRepoFile},
|
||||
},
|
||||
{
|
||||
name: "symlink inside root pointing to file inside root passes",
|
||||
matches: []string{inRepoLink},
|
||||
},
|
||||
{
|
||||
name: "two-hop chain that stays within root passes",
|
||||
matches: []string{hop1},
|
||||
},
|
||||
{
|
||||
name: "symlink pointing directly outside root is rejected",
|
||||
matches: []string{escapeLink},
|
||||
expectErr: true,
|
||||
errContains: "resolved to outside repository root",
|
||||
},
|
||||
{
|
||||
name: "two-hop chain that escapes root is rejected",
|
||||
matches: []string{hop2},
|
||||
expectErr: true,
|
||||
errContains: "resolved to outside repository root",
|
||||
},
|
||||
{
|
||||
name: "multiple matches all inside root pass",
|
||||
matches: []string{inRepoFile, inRepoLink, hop1},
|
||||
},
|
||||
{
|
||||
name: "one bad match in a list fails the whole call",
|
||||
matches: []string{inRepoFile, escapeLink},
|
||||
expectErr: true,
|
||||
errContains: "resolved to outside repository root",
|
||||
},
|
||||
{
|
||||
name: "empty matches list is a no-op",
|
||||
matches: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := verifyGlobMatchesWithinRoot(tt.matches, repoDir)
|
||||
if tt.expectErr {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.errContains)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test_getResolvedValueFiles_glob_symlink_escape is an integration-level check
|
||||
// that verifyGlobMatchesWithinRoot is wired into glob expansion correctly: a
|
||||
// symlink inside the repo pointing outside must cause getResolvedValueFiles to
|
||||
// return an error rather than silently including the external file.
|
||||
func Test_getResolvedValueFiles_glob_symlink_escape(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
paths := utilio.NewRandomizedTempPaths(tempDir)
|
||||
|
||||
repoDir := filepath.Join(tempDir, "repo")
|
||||
outsideDir := filepath.Join(tempDir, "outside")
|
||||
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(repoDir, "values"), 0o755))
|
||||
require.NoError(t, os.MkdirAll(outsideDir, 0o755))
|
||||
|
||||
require.NoError(t, os.WriteFile(filepath.Join(repoDir, "values", "base.yaml"), []byte{}, 0o644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(outsideDir, "secret.yaml"), []byte("password: hunter2"), 0o644))
|
||||
require.NoError(t, os.Symlink(filepath.Join(outsideDir, "secret.yaml"), filepath.Join(repoDir, "values", "escape.yaml")))
|
||||
|
||||
_, err := getResolvedValueFiles(repoDir, repoDir, &v1alpha1.Env{}, []string{}, []string{"values/*.yaml"}, map[string]*v1alpha1.RefTarget{}, paths, false)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "resolved to outside repository root")
|
||||
}
|
||||
|
||||
func Test_isGlobPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
path: "prod/*.yaml",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
path: "prod/?.yaml",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
path: "prod[ab].yaml",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
path: "prod/**/*.yaml",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
path: "prod/values.yaml",
|
||||
},
|
||||
{
|
||||
path: "values.yaml",
|
||||
},
|
||||
{
|
||||
path: "",
|
||||
},
|
||||
{
|
||||
path: "/absolute/path/to/*.yaml",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
path: "/absolute/path/to/values.yaml",
|
||||
},
|
||||
{
|
||||
path: "*",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
path: "?",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
path: "[",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, isGlobPath(tt.path))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getReferencedSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
refTarget := &v1alpha1.RefTarget{
|
||||
Repo: v1alpha1.Repository{
|
||||
Repo: "https://github.com/org/repo1",
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
rawValueFile string
|
||||
refSources map[string]*v1alpha1.RefTarget
|
||||
expected *v1alpha1.RefTarget
|
||||
}{
|
||||
{
|
||||
name: "ref with file path found in map",
|
||||
rawValueFile: "$ref/values.yaml",
|
||||
refSources: map[string]*v1alpha1.RefTarget{
|
||||
"$ref": refTarget,
|
||||
},
|
||||
expected: refTarget,
|
||||
},
|
||||
{
|
||||
name: "ref with file path not in map",
|
||||
rawValueFile: "$ref/values.yaml",
|
||||
refSources: map[string]*v1alpha1.RefTarget{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "bare ref without file path found in map",
|
||||
rawValueFile: "$ref",
|
||||
refSources: map[string]*v1alpha1.RefTarget{
|
||||
"$ref": refTarget,
|
||||
},
|
||||
expected: refTarget,
|
||||
},
|
||||
{
|
||||
name: "empty string returns nil",
|
||||
rawValueFile: "",
|
||||
refSources: map[string]*v1alpha1.RefTarget{
|
||||
"$ref": refTarget,
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "no $ prefix returns nil",
|
||||
rawValueFile: "values.yaml",
|
||||
refSources: map[string]*v1alpha1.RefTarget{
|
||||
"$ref": refTarget,
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
result := getReferencedSource(tt.rawValueFile, tt.refSources)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorGetGitDirectories(t *testing.T) {
|
||||
// test not using the cache
|
||||
root := "./testdata/git-files-dirs"
|
||||
|
||||
@@ -210,21 +210,9 @@ func (s *terminalHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
http.Error(w, "Pod not running", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var findContainer bool
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if container == c.Name {
|
||||
findContainer = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !findContainer {
|
||||
fieldLog.Warn("terminal container not found")
|
||||
http.Error(w, "Cannot find container", http.StatusBadRequest)
|
||||
if !containerRunning(pod, container) {
|
||||
fieldLog.Warn("terminal container not running")
|
||||
http.Error(w, "container find running", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -273,6 +261,20 @@ func podExists(treeNodes []appv1.ResourceNode, podName, namespace string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func containerRunning(pod *corev1.Pod, containerName string) bool {
|
||||
return containerStatusRunning(pod.Status.ContainerStatuses, containerName) ||
|
||||
containerStatusRunning(pod.Status.InitContainerStatuses, containerName)
|
||||
}
|
||||
|
||||
func containerStatusRunning(statuses []corev1.ContainerStatus, containerName string) bool {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == containerName {
|
||||
return statuses[i].State.Running != nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const EndOfTransmission = "\u0004"
|
||||
|
||||
// PtyHandler is what remotecommand expects from a pty
|
||||
|
||||
@@ -5,9 +5,12 @@ import (
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/argo"
|
||||
@@ -79,6 +82,115 @@ func TestPodExists(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerRunning(t *testing.T) {
|
||||
for _, tcase := range []struct {
|
||||
name string
|
||||
pod *corev1.Pod
|
||||
containerName string
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "empty container",
|
||||
pod: &corev1.Pod{},
|
||||
containerName: "",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "container not found",
|
||||
pod: &corev1.Pod{},
|
||||
containerName: "not-found",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "container running",
|
||||
pod: &corev1.Pod{
|
||||
Status: corev1.PodStatus{
|
||||
ContainerStatuses: []corev1.ContainerStatus{
|
||||
{
|
||||
Name: "test",
|
||||
State: corev1.ContainerState{
|
||||
Running: &corev1.ContainerStateRunning{
|
||||
StartedAt: metav1.NewTime(time.Now()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
containerName: "test",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "init container running",
|
||||
pod: &corev1.Pod{
|
||||
Status: corev1.PodStatus{
|
||||
ContainerStatuses: []corev1.ContainerStatus{
|
||||
{
|
||||
Name: "test",
|
||||
State: corev1.ContainerState{
|
||||
Running: &corev1.ContainerStateRunning{
|
||||
StartedAt: metav1.NewTime(time.Now()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
InitContainerStatuses: []corev1.ContainerStatus{
|
||||
{
|
||||
Name: "test-init",
|
||||
State: corev1.ContainerState{
|
||||
Running: &corev1.ContainerStateRunning{
|
||||
StartedAt: metav1.NewTime(time.Now()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
containerName: "test-init",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "container not running",
|
||||
pod: &corev1.Pod{
|
||||
Status: corev1.PodStatus{
|
||||
ContainerStatuses: []corev1.ContainerStatus{
|
||||
{
|
||||
Name: "test",
|
||||
State: corev1.ContainerState{
|
||||
Running: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
containerName: "test",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "init container not running",
|
||||
pod: &corev1.Pod{
|
||||
Status: corev1.PodStatus{
|
||||
InitContainerStatuses: []corev1.ContainerStatus{
|
||||
{
|
||||
Name: "test-init",
|
||||
State: corev1.ContainerState{
|
||||
Running: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
containerName: "test-init",
|
||||
expectedResult: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
result := containerRunning(tcase.pod, tcase.containerName)
|
||||
assert.Equalf(t, tcase.expectedResult, result, "Expected result %v, but got %v", tcase.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidPodName(t *testing.T) {
|
||||
for _, tcase := range []struct {
|
||||
name string
|
||||
|
||||
@@ -334,8 +334,6 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts, appsetOpts Applicatio
|
||||
appsetLister := appFactory.Argoproj().V1alpha1().ApplicationSets().Lister()
|
||||
|
||||
userStateStorage := util_session.NewUserStateStorage(opts.RedisClient)
|
||||
ssoClientApp, err := oidc.NewClientApp(settings, opts.DexServerAddr, opts.DexTLSConfig, opts.BaseHRef, cacheutil.NewRedisCache(opts.RedisClient, settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
|
||||
errorsutil.CheckError(err)
|
||||
sessionMgr := util_session.NewSessionManager(settingsMgr, projLister, opts.DexServerAddr, opts.DexTLSConfig, userStateStorage)
|
||||
enf := rbac.NewEnforcer(opts.KubeClientset, opts.Namespace, common.ArgoCDRBACConfigMapName, nil)
|
||||
enf.EnableEnforce(!opts.DisableAuth)
|
||||
@@ -383,7 +381,6 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts, appsetOpts Applicatio
|
||||
a := &ArgoCDServer{
|
||||
ArgoCDServerOpts: opts,
|
||||
ApplicationSetOpts: appsetOpts,
|
||||
ssoClientApp: ssoClientApp,
|
||||
log: logger,
|
||||
settings: settings,
|
||||
sessionMgr: sessionMgr,
|
||||
@@ -586,6 +583,10 @@ func (server *ArgoCDServer) Run(ctx context.Context, listeners *Listeners) {
|
||||
if server.RedisClient != nil {
|
||||
cacheutil.CollectMetrics(server.RedisClient, metricsServ, server.userStateStorage.GetLockObject())
|
||||
}
|
||||
// OIDC config needs to be refreshed at each server restart
|
||||
ssoClientApp, err := oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
|
||||
errorsutil.CheckError(err)
|
||||
server.ssoClientApp = ssoClientApp
|
||||
|
||||
// Don't init storage until after CollectMetrics. CollectMetrics adds hooks to the Redis client, and Init
|
||||
// reads those hooks. If this is called first, there may be a data race.
|
||||
|
||||
@@ -488,6 +488,100 @@ func TestGracefulShutdown(t *testing.T) {
|
||||
assert.True(t, shutdown)
|
||||
}
|
||||
|
||||
func TestOIDCRefresh(t *testing.T) {
|
||||
port, err := test.GetFreePort()
|
||||
require.NoError(t, err)
|
||||
mockRepoClient := &mocks.Clientset{RepoServerServiceClient: &mocks.RepoServerServiceClient{}}
|
||||
cm := test.NewFakeConfigMap()
|
||||
cm.Data["oidc.config"] = `
|
||||
name: Test OIDC
|
||||
issuer: $oidc.myoidc.issuer
|
||||
clientID: $oidc.myoidc.clientId
|
||||
clientSecret: $oidc.myoidc.clientSecret
|
||||
`
|
||||
secret := test.NewFakeSecret()
|
||||
issuerURL := "http://oidc.127.0.0.1.nip.io"
|
||||
updatedIssuerURL := "http://newoidc.127.0.0.1.nip.io"
|
||||
secret.Data["oidc.myoidc.issuer"] = []byte(issuerURL)
|
||||
secret.Data["oidc.myoidc.clientId"] = []byte("myClientId")
|
||||
secret.Data["oidc.myoidc.clientSecret"] = []byte("myClientSecret")
|
||||
|
||||
kubeclientset := fake.NewSimpleClientset(cm, secret)
|
||||
redis, redisCloser := test.NewInMemoryRedis()
|
||||
defer redisCloser()
|
||||
s := NewServer(
|
||||
t.Context(),
|
||||
ArgoCDServerOpts{
|
||||
ListenPort: port,
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
KubeClientset: kubeclientset,
|
||||
AppClientset: apps.NewSimpleClientset(),
|
||||
RepoClientset: mockRepoClient,
|
||||
RedisClient: redis,
|
||||
},
|
||||
ApplicationSetOpts{},
|
||||
)
|
||||
projInformerCancel := test.StartInformer(s.projInformer)
|
||||
defer projInformerCancel()
|
||||
appInformerCancel := test.StartInformer(s.appInformer)
|
||||
defer appInformerCancel()
|
||||
appsetInformerCancel := test.StartInformer(s.appsetInformer)
|
||||
defer appsetInformerCancel()
|
||||
clusterInformerCancel := test.StartInformer(s.clusterInformer)
|
||||
defer clusterInformerCancel()
|
||||
|
||||
shutdown := false
|
||||
|
||||
lns, err := s.Listen()
|
||||
require.NoError(t, err)
|
||||
runCtx := t.Context()
|
||||
|
||||
var wg gosync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(shutdown *bool) {
|
||||
defer wg.Done()
|
||||
s.Run(runCtx, lns)
|
||||
*shutdown = true
|
||||
}(&shutdown)
|
||||
|
||||
for !s.available.Load() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
assert.True(t, s.available.Load())
|
||||
assert.Equal(t, issuerURL, s.ssoClientApp.IssuerURL())
|
||||
|
||||
// Update oidc config
|
||||
secret.Data["oidc.myoidc.issuer"] = []byte(updatedIssuerURL)
|
||||
secret.ResourceVersion = "12345"
|
||||
_, err = kubeclientset.CoreV1().Secrets(test.FakeArgoCDNamespace).Update(runCtx, secret, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for graceful shutdown
|
||||
wg.Wait()
|
||||
for s.available.Load() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
assert.False(t, s.available.Load())
|
||||
|
||||
shutdown = false
|
||||
wg.Add(1)
|
||||
go func(shutdown *bool) {
|
||||
defer wg.Done()
|
||||
s.Run(runCtx, lns)
|
||||
*shutdown = true
|
||||
}(&shutdown)
|
||||
|
||||
for !s.available.Load() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
assert.True(t, s.available.Load())
|
||||
assert.Equal(t, updatedIssuerURL, s.ssoClientApp.IssuerURL())
|
||||
|
||||
s.stopCh <- syscall.SIGINT
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestAuthenticate(t *testing.T) {
|
||||
type testData struct {
|
||||
test string
|
||||
|
||||
@@ -156,6 +156,79 @@ func TestHelmIgnoreMissingValueFiles(t *testing.T) {
|
||||
Expect(ErrorRegex("Error: open .*does-not-exist-values.yaml: no such file or directory", ""))
|
||||
}
|
||||
|
||||
// TestHelmGlobValueFiles verifies that a glob pattern in valueFiles expands to all matching
|
||||
// files and that they are applied in lexical order (last file wins in helm merging).
|
||||
// envs/*.yaml expands to envs/a.yaml then envs/b.yaml - b.yaml is last, so foo = "b-value".
|
||||
func TestHelmGlobValueFiles(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "HELM")
|
||||
ctx := Given(t)
|
||||
ctx.Path("helm-glob-values").
|
||||
When().
|
||||
CreateApp().
|
||||
AppSet("--values", "envs/*.yaml").
|
||||
Sync().
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
val := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(),
|
||||
"get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string)
|
||||
assert.Equal(t, "b-value", val)
|
||||
})
|
||||
}
|
||||
|
||||
// TestHelmRecursiveGlobValueFiles verifies that the ** double-star pattern recursively
|
||||
// matches files at any depth. envs/**/*.yaml expands (zero-segments first) to:
|
||||
// envs/a.yaml, envs/b.yaml, envs/nested/c.yaml - c.yaml is last, so foo = "c-value".
|
||||
func TestHelmRecursiveGlobValueFiles(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "HELM")
|
||||
ctx := Given(t)
|
||||
ctx.Path("helm-glob-values").
|
||||
When().
|
||||
CreateApp().
|
||||
AppSet("--values", "envs/**/*.yaml").
|
||||
Sync().
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
val := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(),
|
||||
"get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string)
|
||||
assert.Equal(t, "c-value", val)
|
||||
})
|
||||
}
|
||||
|
||||
// TestHelmGlobValueFilesNoMatch verifies that a glob pattern with no matching files
|
||||
// surfaces as a comparison error on the application.
|
||||
func TestHelmGlobValueFilesNoMatch(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "HELM")
|
||||
Given(t).
|
||||
Path("helm-glob-values").
|
||||
When().
|
||||
CreateApp().
|
||||
AppSet("--values", "nonexistent/*.yaml").
|
||||
Then().
|
||||
Expect(Condition(ApplicationConditionComparisonError, `values file glob "nonexistent/*.yaml" matched no files`))
|
||||
}
|
||||
|
||||
// TestHelmGlobValueFilesIgnoreMissing verifies that a non-matching glob pattern is
|
||||
// silently skipped when ignoreMissingValueFiles is set, and the app syncs successfully.
|
||||
func TestHelmGlobValueFilesIgnoreMissing(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "HELM")
|
||||
Given(t).
|
||||
Path("helm-glob-values").
|
||||
When().
|
||||
CreateApp().
|
||||
AppSet("--values", "nonexistent/*.yaml", "--ignore-missing-value-files").
|
||||
Sync().
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced))
|
||||
}
|
||||
|
||||
func TestHelmValuesMultipleUnset(t *testing.T) {
|
||||
Given(t).
|
||||
Path("helm").
|
||||
|
||||
3
test/e2e/testdata/helm-glob-values/Chart.yaml
vendored
Normal file
3
test/e2e/testdata/helm-glob-values/Chart.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
apiVersion: v2
|
||||
version: 1.0.0
|
||||
name: helm-glob-values
|
||||
1
test/e2e/testdata/helm-glob-values/envs/a.yaml
vendored
Normal file
1
test/e2e/testdata/helm-glob-values/envs/a.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
foo: a-value
|
||||
1
test/e2e/testdata/helm-glob-values/envs/b.yaml
vendored
Normal file
1
test/e2e/testdata/helm-glob-values/envs/b.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
foo: b-value
|
||||
1
test/e2e/testdata/helm-glob-values/envs/nested/c.yaml
vendored
Normal file
1
test/e2e/testdata/helm-glob-values/envs/nested/c.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
foo: c-value
|
||||
6
test/e2e/testdata/helm-glob-values/templates/config-map.yaml
vendored
Normal file
6
test/e2e/testdata/helm-glob-values/templates/config-map.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: my-map
|
||||
data:
|
||||
foo: {{.Values.foo}}
|
||||
1
test/e2e/testdata/helm-glob-values/values.yaml
vendored
Normal file
1
test/e2e/testdata/helm-glob-values/values.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
foo: default
|
||||
0
ui/dist/app/assets/images/resources/.gitkeep
vendored
Normal file
0
ui/dist/app/assets/images/resources/.gitkeep
vendored
Normal file
@@ -5,4 +5,5 @@ import "embed"
|
||||
// Embedded contains embedded UI resources
|
||||
//
|
||||
//go:embed dist/app
|
||||
//go:embed all:dist/app/assets/images/resources
|
||||
var Embedded embed.FS
|
||||
|
||||
@@ -8,6 +8,7 @@ import {services} from '../../../shared/services';
|
||||
import {
|
||||
ApplicationSyncWindowStatusIcon,
|
||||
ComparisonStatusIcon,
|
||||
formatApplicationSetProgressiveSyncStep,
|
||||
getAppDefaultSource,
|
||||
getAppDefaultSyncRevisionExtra,
|
||||
getAppOperationState,
|
||||
@@ -134,7 +135,7 @@ const ProgressiveSyncStatus = ({application}: {application: models.Application})
|
||||
<div className='application-status-panel__item-value' style={{color: getProgressiveSyncStatusColor(appResource.status)}}>
|
||||
{getProgressiveSyncStatusIcon({status: appResource.status})} {appResource.status}
|
||||
</div>
|
||||
{appResource?.step && <div className='application-status-panel__item-value'>Wave: {appResource.step}</div>}
|
||||
{appResource?.step !== undefined && <div className='application-status-panel__item-value'>{formatApplicationSetProgressiveSyncStep(appResource.step)}</div>}
|
||||
{lastTransitionTime && (
|
||||
<div className='application-status-panel__item-name' style={{marginBottom: '0.5em'}}>
|
||||
Last Transition: <br />
|
||||
|
||||
@@ -721,7 +721,7 @@ export const ApplicationSummary = (props: ApplicationSummaryProps) => {
|
||||
selfHeal ? 'Enable Self Heal?' : 'Disable Self Heal?',
|
||||
selfHeal
|
||||
? 'If checked, application will automatically sync when changes are detected'
|
||||
: 'Are you sure you want to enable automated self healing?',
|
||||
: 'If unchecked, application will not automatically sync when changes are detected',
|
||||
automated.prune,
|
||||
selfHeal,
|
||||
automated.enabled
|
||||
|
||||
@@ -1858,6 +1858,14 @@ export function getAppUrl(app: appModels.AbstractApplication): string {
|
||||
return `${basePath}/${app.metadata.namespace}/${app.metadata.name}`;
|
||||
}
|
||||
|
||||
/** RollingSync step for display; backend uses -1 when no step matches the app's labels. */
|
||||
export function formatApplicationSetProgressiveSyncStep(step: string | undefined): string {
|
||||
if (step === '-1') {
|
||||
return 'Step: unmatched label';
|
||||
}
|
||||
return `Step: ${step ?? ''}`;
|
||||
}
|
||||
|
||||
export const getProgressiveSyncStatusIcon = ({status, isButton}: {status: string; isButton?: boolean}) => {
|
||||
const getIconProps = () => {
|
||||
switch (status) {
|
||||
|
||||
@@ -910,6 +910,12 @@ func verifyGenerateManifests(
|
||||
// and not whether it actually contains any manifests.
|
||||
_, err = repoClient.GenerateManifest(ctx, &req)
|
||||
if err != nil {
|
||||
// A glob pattern matching no files is a runtime condition, not a spec error —
|
||||
// the files may be added later. Skip adding an InvalidSpecError here and let
|
||||
// the app controller surface it as a ComparisonError during reconciliation.
|
||||
if status.Code(err) == codes.NotFound && strings.Contains(err.Error(), "matched no files") {
|
||||
continue
|
||||
}
|
||||
errMessage := fmt.Sprintf("Unable to generate manifests in %s: %s", source.Path, err)
|
||||
conditions = append(conditions, argoappv1.ApplicationCondition{
|
||||
Type: argoappv1.ApplicationConditionInvalidSpecError,
|
||||
@@ -1068,39 +1074,51 @@ type ClusterGetter interface {
|
||||
GetClusterServersByName(ctx context.Context, server string) ([]string, error)
|
||||
}
|
||||
|
||||
// GetDestinationServer resolves the cluster server URL for the given destination without
|
||||
// fetching the full Cluster object. For server based destinations the URL is returned
|
||||
// directly (normalized). For name based destinations GetClusterServersByName is called.
|
||||
// An error is returned if the name is ambiguous or missing.
|
||||
func GetDestinationServer(ctx context.Context, destination argoappv1.ApplicationDestination, db ClusterGetter) (string, error) {
|
||||
if destination.Name != "" && destination.Server != "" {
|
||||
return "", fmt.Errorf("application destination can't have both name and server defined: %s %s", destination.Name, destination.Server)
|
||||
}
|
||||
if destination.Server != "" {
|
||||
return strings.TrimRight(destination.Server, "/"), nil
|
||||
}
|
||||
if destination.Name != "" {
|
||||
clusterURLs, err := db.GetClusterServersByName(ctx, destination.Name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error getting cluster by name %q: %w", destination.Name, err)
|
||||
}
|
||||
if len(clusterURLs) == 0 {
|
||||
return "", fmt.Errorf("there are no clusters with this name: %s", destination.Name)
|
||||
}
|
||||
if len(clusterURLs) > 1 {
|
||||
return "", fmt.Errorf("there are %d clusters with the same name: [%s]", len(clusterURLs), strings.Join(clusterURLs, " "))
|
||||
}
|
||||
return clusterURLs[0], nil
|
||||
}
|
||||
// nolint:staticcheck // Error constant is very old, shouldn't lowercase the first letter.
|
||||
return "", errors.New(ErrDestinationMissing)
|
||||
}
|
||||
|
||||
// GetDestinationCluster returns the cluster object based on the destination server or name. If both are provided or
|
||||
// both are empty, an error is returned. If the destination server is provided, the cluster is fetched by the server
|
||||
// URL. If the destination name is provided, the cluster is fetched by the name. If multiple clusters have the specified
|
||||
// name, an error is returned.
|
||||
func GetDestinationCluster(ctx context.Context, destination argoappv1.ApplicationDestination, db ClusterGetter) (*argoappv1.Cluster, error) {
|
||||
if destination.Name != "" && destination.Server != "" {
|
||||
return nil, fmt.Errorf("application destination can't have both name and server defined: %s %s", destination.Name, destination.Server)
|
||||
server, err := GetDestinationServer(ctx, destination, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if destination.Server != "" {
|
||||
cluster, err := db.GetCluster(ctx, destination.Server)
|
||||
if err != nil {
|
||||
cluster, err := db.GetCluster(ctx, server)
|
||||
if err != nil {
|
||||
if destination.Server != "" {
|
||||
return nil, fmt.Errorf("error getting cluster by server %q: %w", destination.Server, err)
|
||||
}
|
||||
return cluster, nil
|
||||
} else if destination.Name != "" {
|
||||
clusterURLs, err := db.GetClusterServersByName(ctx, destination.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting cluster by name %q: %w", destination.Name, err)
|
||||
}
|
||||
if len(clusterURLs) == 0 {
|
||||
return nil, fmt.Errorf("there are no clusters with this name: %s", destination.Name)
|
||||
}
|
||||
if len(clusterURLs) > 1 {
|
||||
return nil, fmt.Errorf("there are %d clusters with the same name: [%s]", len(clusterURLs), strings.Join(clusterURLs, " "))
|
||||
}
|
||||
cluster, err := db.GetCluster(ctx, clusterURLs[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting cluster by URL: %w", err)
|
||||
}
|
||||
return cluster, nil
|
||||
return nil, fmt.Errorf("error getting cluster by URL: %w", err)
|
||||
}
|
||||
// nolint:staticcheck // Error constant is very old, shouldn't lowercase the first letter.
|
||||
return nil, errors.New(ErrDestinationMissing)
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
func GetGlobalProjects(proj *argoappv1.AppProject, projLister applicationsv1.AppProjectLister, settingsManager *settings.SettingsManager) []*argoappv1.AppProject {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -26,7 +27,7 @@ import (
|
||||
|
||||
var (
|
||||
localCluster = appv1.Cluster{
|
||||
Name: "in-cluster",
|
||||
Name: appv1.KubernetesInClusterName,
|
||||
Server: appv1.KubernetesInternalAPIServerAddr,
|
||||
Info: appv1.ClusterInfo{
|
||||
ConnectionState: appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful},
|
||||
@@ -231,7 +232,10 @@ func (db *db) getClusterSecret(server string) (*corev1.Secret, error) {
|
||||
|
||||
// GetCluster returns a cluster from a query
|
||||
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
|
||||
informer := db.settingsMgr.GetClusterInformer()
|
||||
informer, err := db.settingsMgr.GetClusterInformer()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get cluster informer: %w", err)
|
||||
}
|
||||
if server == appv1.KubernetesInternalAPIServerAddr {
|
||||
argoSettings, err := db.settingsMgr.GetSettings()
|
||||
if err != nil {
|
||||
@@ -282,19 +286,27 @@ func (db *db) GetProjectClusters(_ context.Context, project string) ([]*appv1.Cl
|
||||
}
|
||||
|
||||
func (db *db) GetClusterServersByName(_ context.Context, name string) ([]string, error) {
|
||||
argoSettings, err := db.settingsMgr.GetSettings()
|
||||
informer, err := db.settingsMgr.GetClusterInformer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get cluster informer: %w", err)
|
||||
}
|
||||
|
||||
informer := db.settingsMgr.GetClusterInformer()
|
||||
servers, err := informer.GetClusterServersByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// attempt to short circuit if the in-cluster name is not involved
|
||||
if name != appv1.KubernetesInClusterName && !slices.Contains(servers, appv1.KubernetesInternalAPIServerAddr) {
|
||||
return servers, nil
|
||||
}
|
||||
|
||||
argoSettings, err := db.settingsMgr.GetSettings()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle local cluster special case
|
||||
if len(servers) == 0 && name == "in-cluster" && argoSettings.InClusterEnabled {
|
||||
if len(servers) == 0 && name == appv1.KubernetesInClusterName && argoSettings.InClusterEnabled {
|
||||
return []string{appv1.KubernetesInternalAPIServerAddr}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -554,6 +554,70 @@ func TestListClusters(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetClusterServersByName_IsInClusterEnabledLazyLoad(t *testing.T) {
|
||||
argoCDSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"admin.password": nil,
|
||||
"server.secretkey": nil,
|
||||
},
|
||||
}
|
||||
prodSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-cluster-secret",
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"name": []byte("prod"),
|
||||
"server": []byte("https://prod.example.com"),
|
||||
"config": []byte("{}"),
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
clusterName string
|
||||
wantErr bool
|
||||
wantServers []string
|
||||
}{
|
||||
{
|
||||
name: "non in-cluster name does not call IsInClusterEnabled()",
|
||||
clusterName: "prod",
|
||||
wantErr: false,
|
||||
wantServers: []string{"https://prod.example.com"},
|
||||
},
|
||||
{
|
||||
name: "in-cluster name calls IsInClusterEnabled()",
|
||||
clusterName: "in-cluster",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
// argocd-cm is intentionally absent: IsInClusterEnabled() fails if called.
|
||||
kubeclientset := fake.NewClientset(argoCDSecret, prodSecret)
|
||||
db := NewDB(fakeNamespace, settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace), kubeclientset)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
servers, err := db.GetClusterServersByName(t.Context(), tt.clusterName)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, tt.wantServers, servers)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClusterServersByName(t *testing.T) {
|
||||
emptyArgoCDConfigMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -685,10 +685,11 @@ func DiscoverGitHubAppInstallationID(ctx context.Context, appId int64, privateKe
|
||||
opts.Page = resp.NextPage
|
||||
}
|
||||
|
||||
// Cache all installation IDs
|
||||
// Cache each installation under its account's key so multiple orgs do not overwrite each other.
|
||||
for _, installation := range allInstallations {
|
||||
if installation.Account != nil && installation.Account.Login != nil && installation.ID != nil {
|
||||
githubInstallationIdCache.Set(cacheKey, *installation.ID, gocache.DefaultExpiration)
|
||||
instKey := fmt.Sprintf("%s:%s:%d", strings.ToLower(*installation.Account.Login), domain, appId)
|
||||
githubInstallationIdCache.Set(instKey, *installation.ID, gocache.DefaultExpiration)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -600,6 +600,35 @@ func TestDiscoverGitHubAppInstallationID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(98765), actualId)
|
||||
})
|
||||
|
||||
t.Run("returns correct installation ID when app is installed on multiple orgs", func(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasSuffix(r.URL.Path, "/app/installations") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
//nolint:errcheck
|
||||
json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 11111, "account": map[string]any{"login": "org-alpha"}},
|
||||
{"id": 22222, "account": map[string]any{"login": "target-org"}},
|
||||
{"id": 33333, "account": map[string]any{"login": "org-gamma"}},
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
t.Cleanup(func() {
|
||||
domain, _ := domainFromBaseURL(server.URL)
|
||||
for _, org := range []string{"org-alpha", "target-org", "org-gamma"} {
|
||||
githubInstallationIdCache.Delete(fmt.Sprintf("%s:%s:%d", org, domain, 12345))
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
actualId, err := DiscoverGitHubAppInstallationID(ctx, 12345, fakeGitHubAppPrivateKey, server.URL, "target-org")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(22222), actualId, "should return the installation ID for the requested org, not the last one in the list")
|
||||
})
|
||||
}
|
||||
|
||||
func TestExtractOrgFromRepoURL(t *testing.T) {
|
||||
|
||||
@@ -373,6 +373,7 @@ func (c *nativeHelmChart) loadRepoIndex(ctx context.Context, maxIndexSize int64)
|
||||
Proxy: proxy.GetCallback(c.proxy, c.noProxy),
|
||||
TLSClientConfig: tlsConf,
|
||||
DisableKeepAlives: true,
|
||||
ForceAttemptHTTP2: true,
|
||||
}
|
||||
client := http.Client{Transport: tr}
|
||||
resp, err := client.Do(req)
|
||||
@@ -492,6 +493,7 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) ([]string, error)
|
||||
Proxy: proxy.GetCallback(c.proxy, c.noProxy),
|
||||
TLSClientConfig: tlsConf,
|
||||
DisableKeepAlives: true,
|
||||
ForceAttemptHTTP2: true,
|
||||
}
|
||||
|
||||
// Wrap transport to add User-Agent header to all requests
|
||||
|
||||
@@ -2,6 +2,7 @@ package helm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -574,6 +576,68 @@ func TestGetTagsCaching(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetTagsUsesHTTP2(t *testing.T) {
|
||||
t.Run("should negotiate HTTP/2 when TLS is configured", func(t *testing.T) {
|
||||
var requestProtos []string
|
||||
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestProtos = append(requestProtos, r.Proto)
|
||||
t.Logf("called %s with proto %s", r.URL.Path, r.Proto)
|
||||
|
||||
responseTags := fakeTagsList{
|
||||
Tags: []string{"1.0.0"},
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
require.NoError(t, json.NewEncoder(w).Encode(responseTags))
|
||||
}))
|
||||
// httptest.NewTLSServer only advertises http/1.1 in ALPN, so we must
|
||||
// configure the server to also offer h2 for HTTP/2 negotiation to work.
|
||||
server.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}}
|
||||
server.StartTLS()
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
client := NewClient(server.URL, HelmCreds{InsecureSkipVerify: true}, true, "", "")
|
||||
|
||||
tags, err := client.GetTags("mychart", true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"1.0.0"}, tags)
|
||||
|
||||
// Verify that at least one request used HTTP/2. When ForceAttemptHTTP2 is
|
||||
// not set on the Transport, Go's TLS stack won't negotiate h2 even though
|
||||
// the server supports it, because a custom TLSClientConfig disables the
|
||||
// automatic HTTP/2 setup.
|
||||
require.NotEmpty(t, requestProtos, "expected at least one request to the server")
|
||||
hasHTTP2 := slices.Contains(requestProtos, "HTTP/2.0")
|
||||
assert.True(t, hasHTTP2, "expected at least one HTTP/2 request, but got protocols: %v", requestProtos)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadRepoIndexUsesHTTP2(t *testing.T) {
|
||||
t.Run("should negotiate HTTP/2 when fetching index", func(t *testing.T) {
|
||||
var requestProto string
|
||||
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestProto = r.Proto
|
||||
t.Logf("called %s with proto %s", r.URL.Path, r.Proto)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(`apiVersion: v1
|
||||
entries: {}
|
||||
`))
|
||||
}))
|
||||
server.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}}
|
||||
server.StartTLS()
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
client := NewClient(server.URL, HelmCreds{InsecureSkipVerify: true}, false, "", "")
|
||||
|
||||
_, err := client.GetIndex(false, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "HTTP/2.0", requestProto, "expected HTTP/2 request for index fetch, but got %s", requestProto)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserAgentIsSet(t *testing.T) {
|
||||
t.Run("Default User-Agent for traditional Helm repo", func(t *testing.T) {
|
||||
// Create a test server that captures the User-Agent header
|
||||
|
||||
@@ -327,8 +327,12 @@ func (c *Cmd) PullOCI(repo string, chart string, version string, destination str
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *Cmd) dependencyBuild() (string, error) {
|
||||
out, _, err := c.run(context.Background(), "dependency", "build")
|
||||
func (c *Cmd) dependencyBuild(insecure bool) (string, error) {
|
||||
args := []string{"dependency", "build"}
|
||||
if insecure {
|
||||
args = append(args, "--insecure-skip-tls-verify")
|
||||
}
|
||||
out, _, err := c.run(context.Background(), args...)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to build dependencies: %w", err)
|
||||
}
|
||||
|
||||
@@ -135,6 +135,36 @@ func TestRegistryLogin(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDependencyBuild(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
insecure bool
|
||||
expectedOut string
|
||||
}{
|
||||
{
|
||||
name: "without insecure",
|
||||
insecure: false,
|
||||
expectedOut: "helm dependency build",
|
||||
},
|
||||
{
|
||||
name: "with insecure",
|
||||
insecure: true,
|
||||
expectedOut: "helm dependency build --insecure-skip-tls-verify",
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c, err := newCmdWithVersion(".", false, "", "", func(cmd *exec.Cmd, _ func(_ string) string) (string, error) {
|
||||
return strings.Join(cmd.Args, " "), nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
out, err := c.dependencyBuild(tc.insecure)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedOut, out)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistryLogout(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -43,20 +43,21 @@ type Helm interface {
|
||||
}
|
||||
|
||||
// NewHelmApp create a new wrapper to run commands on the `helm` command-line tool.
|
||||
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool) (Helm, error) {
|
||||
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool, insecure bool) (Helm, error) {
|
||||
cmd, err := NewCmd(workDir, version, proxy, noProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new helm command: %w", err)
|
||||
}
|
||||
cmd.IsLocal = isLocal
|
||||
|
||||
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials}, nil
|
||||
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials, insecure: insecure}, nil
|
||||
}
|
||||
|
||||
type helm struct {
|
||||
cmd Cmd
|
||||
repos []HelmRepository
|
||||
passCredentials bool
|
||||
insecure bool
|
||||
}
|
||||
|
||||
var _ Helm = &helm{}
|
||||
@@ -108,7 +109,7 @@ func (h *helm) DependencyBuild() error {
|
||||
}
|
||||
}
|
||||
h.repos = nil
|
||||
_, err := h.cmd.dependencyBuild()
|
||||
_, err := h.cmd.dependencyBuild(h.insecure)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build helm dependencies: %w", err)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func template(h Helm, opts *TemplateOpts) ([]*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
func TestHelmTemplateParams(t *testing.T) {
|
||||
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false)
|
||||
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
opts := TemplateOpts{
|
||||
Name: "test",
|
||||
@@ -58,7 +58,7 @@ func TestHelmTemplateValues(t *testing.T) {
|
||||
repoRoot := "./testdata/redis"
|
||||
repoRootAbs, err := filepath.Abs(repoRoot)
|
||||
require.NoError(t, err)
|
||||
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false)
|
||||
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
require.NoError(t, err)
|
||||
@@ -84,7 +84,7 @@ func TestHelmGetParams(t *testing.T) {
|
||||
repoRoot := "./testdata/redis"
|
||||
repoRootAbs, err := filepath.Abs(repoRoot)
|
||||
require.NoError(t, err)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
params, err := h.GetParameters(nil, repoRootAbs, repoRootAbs)
|
||||
require.NoError(t, err)
|
||||
@@ -97,7 +97,7 @@ func TestHelmGetParamsValueFiles(t *testing.T) {
|
||||
repoRoot := "./testdata/redis"
|
||||
repoRootAbs, err := filepath.Abs(repoRoot)
|
||||
require.NoError(t, err)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
|
||||
require.NoError(t, err)
|
||||
@@ -112,7 +112,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
|
||||
repoRoot := "./testdata/redis"
|
||||
repoRootAbs, err := filepath.Abs(repoRoot)
|
||||
require.NoError(t, err)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
valuesMissingPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-missing.yaml", nil)
|
||||
require.NoError(t, err)
|
||||
@@ -126,7 +126,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
|
||||
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
objs, err := template(h, &TemplateOpts{Name: "my-release"})
|
||||
@@ -144,7 +144,7 @@ func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHelmTemplateReleaseName(t *testing.T) {
|
||||
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
objs, err := template(h, &TemplateOpts{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
@@ -206,7 +206,7 @@ func Test_flatVals(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAPIVersions(t *testing.T) {
|
||||
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
objs, err := template(h, &TemplateOpts{})
|
||||
@@ -221,7 +221,7 @@ func TestAPIVersions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKubeVersionWithSymbol(t *testing.T) {
|
||||
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
objs, err := template(h, &TemplateOpts{KubeVersion: "1.30.11+IKS"})
|
||||
@@ -244,7 +244,7 @@ func TestKubeVersionWithSymbol(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSkipCrds(t *testing.T) {
|
||||
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
objs, err := template(h, &TemplateOpts{SkipCrds: false})
|
||||
@@ -261,7 +261,7 @@ func TestSkipCrds(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSkipTests(t *testing.T) {
|
||||
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
|
||||
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
objs, err := template(h, &TemplateOpts{SkipTests: false})
|
||||
|
||||
@@ -143,6 +143,7 @@ func NewClientWithLock(repoURL string, creds Creds, repoLock sync.KeyLock, proxy
|
||||
Proxy: proxy.GetCallback(proxyURL, noProxy),
|
||||
TLSClientConfig: tlsConf,
|
||||
DisableKeepAlives: true,
|
||||
ForceAttemptHTTP2: true,
|
||||
},
|
||||
/*
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
|
||||
@@ -5,16 +5,22 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"oras.land/oras-go/v2"
|
||||
"oras.land/oras-go/v2/content"
|
||||
@@ -761,6 +767,38 @@ func Test_nativeOCIClient_ResolveRevision(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewClientUsesHTTP2(t *testing.T) {
|
||||
t.Run("should negotiate HTTP/2 when TLS is configured", func(t *testing.T) {
|
||||
var requestProtos []string
|
||||
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestProtos = append(requestProtos, r.Proto)
|
||||
t.Logf("called %s with proto %s", r.URL.Path, r.Proto)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
// httptest.NewTLSServer only advertises http/1.1 in ALPN, so we must
|
||||
// configure the server to also offer h2 for HTTP/2 negotiation to work.
|
||||
server.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}}
|
||||
server.StartTLS()
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// NewClient expects oci://host/path format.
|
||||
repoURL := "oci://" + serverURL.Host + "/myorg/myrepo"
|
||||
client, err := NewClient(repoURL, Creds{InsecureSkipVerify: true}, "", "", nil,
|
||||
WithEventHandlers(fakeEventHandlers(t, serverURL.Host+"/myorg/myrepo")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// TestRepo pings the registry's /v2/ endpoint, exercising the transport.
|
||||
_, _ = client.TestRepo(t.Context())
|
||||
|
||||
require.NotEmpty(t, requestProtos, "expected at least one request to the server")
|
||||
hasHTTP2 := slices.Contains(requestProtos, "HTTP/2.0")
|
||||
assert.True(t, hasHTTP2, "expected at least one HTTP/2 request, but got protocols: %v", requestProtos)
|
||||
})
|
||||
}
|
||||
|
||||
func fakeEventHandlers(t *testing.T, repoURL string) EventHandlers {
|
||||
t.Helper()
|
||||
return EventHandlers{
|
||||
@@ -772,6 +810,9 @@ func fakeEventHandlers(t *testing.T, repoURL string) EventHandlers {
|
||||
OnGetTagsFail: func(repo string) func() {
|
||||
return func() { require.Equal(t, repoURL, repo) }
|
||||
},
|
||||
OnTestRepoFail: func(repo string) func() {
|
||||
return func() { require.Equal(t, repoURL, repo) }
|
||||
},
|
||||
OnExtractFail: func(repo string) func(revision string) {
|
||||
return func(_ string) { require.Equal(t, repoURL, repo) }
|
||||
},
|
||||
|
||||
@@ -1069,3 +1069,7 @@ func FormatAccessTokenCacheKey(sub string) string {
|
||||
func formatOidcTokenCacheKey(sub string, sid string) string {
|
||||
return fmt.Sprintf("%s_%s_%s", OidcTokenCachePrefix, sub, sid)
|
||||
}
|
||||
|
||||
func (a *ClientApp) IssuerURL() string {
|
||||
return a.issuerURL
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
//go:build race
|
||||
|
||||
package settings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@@ -42,7 +41,7 @@ func TestClusterInformer_ConcurrentAccess(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secret1)
|
||||
clientset := fake.NewClientset(secret1)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,16 +49,15 @@ func TestClusterInformer_ConcurrentAccess(t *testing.T) {
|
||||
cache.WaitForCacheSync(ctx.Done(), informer.HasSynced)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 100 {
|
||||
wg.Go(func() {
|
||||
cluster, err := informer.GetClusterByURL("https://cluster1.example.com")
|
||||
assert.NoError(t, err)
|
||||
// require calls t.FailNow(), which only stops the current goroutine, not the test
|
||||
assert.NoError(t, err) //nolint:testifylint
|
||||
assert.NotNil(t, cluster)
|
||||
// Modifying returned cluster should not affect others due to DeepCopy
|
||||
cluster.Name = "modified"
|
||||
}()
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
@@ -87,7 +85,7 @@ func TestClusterInformer_TransformErrors(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(badSecret)
|
||||
clientset := fake.NewClientset(badSecret)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -96,12 +94,12 @@ func TestClusterInformer_TransformErrors(t *testing.T) {
|
||||
|
||||
// GetClusterByURL should return not found since transform failed
|
||||
_, err = informer.GetClusterByURL("https://bad.example.com")
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not found")
|
||||
|
||||
// ListClusters should return an error because the cache contains a secret and not a cluster
|
||||
_, err = informer.ListClusters()
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "cluster cache contains unexpected type")
|
||||
}
|
||||
|
||||
@@ -140,7 +138,7 @@ func TestClusterInformer_TransformErrors_MixedSecrets(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(goodSecret, badSecret)
|
||||
clientset := fake.NewClientset(goodSecret, badSecret)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -154,7 +152,7 @@ func TestClusterInformer_TransformErrors_MixedSecrets(t *testing.T) {
|
||||
|
||||
// But ListClusters should fail because there's a bad secret in the cache
|
||||
_, err = informer.ListClusters()
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "cluster cache contains unexpected type")
|
||||
}
|
||||
|
||||
@@ -177,7 +175,7 @@ func TestClusterInformer_DynamicUpdates(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secret1)
|
||||
clientset := fake.NewClientset(secret1)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -235,7 +233,7 @@ func TestClusterInformer_URLNormalization(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secret)
|
||||
clientset := fake.NewClientset(secret)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -290,7 +288,7 @@ func TestClusterInformer_GetClusterServersByName(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secrets...)
|
||||
clientset := fake.NewClientset(secrets...)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -309,7 +307,7 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
var secrets []*corev1.Secret
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("cluster-%d", i),
|
||||
@@ -319,15 +317,15 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"server": []byte(fmt.Sprintf("https://cluster%d.example.com", i)),
|
||||
"name": []byte(fmt.Sprintf("cluster-%d", i)),
|
||||
"server": fmt.Appendf(nil, "https://cluster%d.example.com", i),
|
||||
"name": fmt.Appendf(nil, "cluster-%d", i),
|
||||
"config": []byte(`{"bearerToken":"token"}`),
|
||||
},
|
||||
}
|
||||
secrets = append(secrets, secret)
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset()
|
||||
clientset := fake.NewClientset()
|
||||
for _, secret := range secrets {
|
||||
_, err := clientset.CoreV1().Secrets("argocd").Create(t.Context(), secret, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -342,11 +340,11 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
var readErrors, updateErrors atomic.Int64
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
for i := range 50 {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < 100; j++ {
|
||||
for j := range 100 {
|
||||
clusterID := j % 10
|
||||
url := fmt.Sprintf("https://cluster%d.example.com", clusterID)
|
||||
|
||||
@@ -376,13 +374,13 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
|
||||
}(i)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < 20; j++ {
|
||||
for j := range 20 {
|
||||
secret := secrets[id%10].DeepCopy()
|
||||
secret.Data["name"] = []byte(fmt.Sprintf("updated-%d-%d", id, j))
|
||||
secret.Data["name"] = fmt.Appendf(nil, "updated-%d-%d", id, j)
|
||||
|
||||
_, err := clientset.CoreV1().Secrets("argocd").Update(t.Context(), secret, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
@@ -393,11 +391,9 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
|
||||
}(i)
|
||||
}
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < 50; j++ {
|
||||
for range 20 {
|
||||
wg.Go(func() {
|
||||
for range 50 {
|
||||
clusters, err := informer.ListClusters()
|
||||
if err != nil {
|
||||
readErrors.Add(1)
|
||||
@@ -412,7 +408,7 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -444,7 +440,7 @@ func TestClusterInformer_DeepCopyIsolation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secret)
|
||||
clientset := fake.NewClientset(secret)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -483,12 +479,13 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
|
||||
name: "Empty namespace - no clusters",
|
||||
secrets: []runtime.Object{},
|
||||
testFunc: func(t *testing.T, informer *ClusterInformer) {
|
||||
t.Helper()
|
||||
clusters, err := informer.ListClusters()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, clusters)
|
||||
|
||||
_, err = informer.GetClusterByURL("https://nonexistent.example.com")
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not found")
|
||||
},
|
||||
},
|
||||
@@ -511,9 +508,10 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
|
||||
},
|
||||
},
|
||||
testFunc: func(t *testing.T, informer *ClusterInformer) {
|
||||
t.Helper()
|
||||
cluster, err := informer.GetClusterByURL("https://noname.example.com")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", cluster.Name)
|
||||
assert.Empty(t, cluster.Name)
|
||||
|
||||
servers, err := informer.GetClusterServersByName("")
|
||||
require.NoError(t, err)
|
||||
@@ -539,6 +537,7 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
|
||||
},
|
||||
},
|
||||
testFunc: func(t *testing.T, informer *ClusterInformer) {
|
||||
t.Helper()
|
||||
cluster, err := informer.GetClusterByURL("https://cluster.example.com:8443/path/")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "special", cluster.Name)
|
||||
@@ -578,6 +577,7 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
|
||||
},
|
||||
},
|
||||
testFunc: func(t *testing.T, informer *ClusterInformer) {
|
||||
t.Helper()
|
||||
cluster, err := informer.GetClusterByURL("https://duplicate.example.com")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cluster)
|
||||
@@ -598,20 +598,21 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
|
||||
"server": []byte("https://many-ns.example.com"),
|
||||
"name": []byte("many-ns"),
|
||||
"namespaces": func() []byte {
|
||||
ns := ""
|
||||
for i := 0; i < 100; i++ {
|
||||
var sb strings.Builder
|
||||
for i := range 100 {
|
||||
if i > 0 {
|
||||
ns += ","
|
||||
sb.WriteString(",")
|
||||
}
|
||||
ns += fmt.Sprintf("namespace-%d", i)
|
||||
fmt.Fprintf(&sb, "namespace-%d", i)
|
||||
}
|
||||
return []byte(ns)
|
||||
return []byte(sb.String())
|
||||
}(),
|
||||
"config": []byte(`{}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
testFunc: func(t *testing.T, informer *ClusterInformer) {
|
||||
t.Helper()
|
||||
cluster, err := informer.GetClusterByURL("https://many-ns.example.com")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, cluster.Namespaces, 100)
|
||||
@@ -648,6 +649,7 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
|
||||
},
|
||||
},
|
||||
testFunc: func(t *testing.T, informer *ClusterInformer) {
|
||||
t.Helper()
|
||||
cluster, err := informer.GetClusterByURL("https://annotated.example.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -676,7 +678,7 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
clientset := fake.NewSimpleClientset(tt.secrets...)
|
||||
clientset := fake.NewClientset(tt.secrets...)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -720,7 +722,7 @@ func TestClusterInformer_SecretDeletion(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secret1, secret2)
|
||||
clientset := fake.NewClientset(secret1, secret2)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -737,7 +739,7 @@ func TestClusterInformer_SecretDeletion(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
_, err = informer.GetClusterByURL("https://cluster1.example.com")
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not found")
|
||||
|
||||
cluster2, err := informer.GetClusterByURL("https://cluster2.example.com")
|
||||
@@ -798,7 +800,7 @@ func TestClusterInformer_ComplexConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secret)
|
||||
clientset := fake.NewClientset(secret)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -811,8 +813,8 @@ func TestClusterInformer_ComplexConfig(t *testing.T) {
|
||||
assert.Equal(t, "admin", cluster.Config.Username)
|
||||
assert.Equal(t, "password123", cluster.Config.Password)
|
||||
assert.Equal(t, "bearer-token", cluster.Config.BearerToken)
|
||||
assert.True(t, cluster.Config.TLSClientConfig.Insecure)
|
||||
assert.Equal(t, "cluster.internal", cluster.Config.TLSClientConfig.ServerName)
|
||||
assert.True(t, cluster.Config.Insecure)
|
||||
assert.Equal(t, "cluster.internal", cluster.Config.ServerName)
|
||||
|
||||
assert.NotNil(t, cluster.Config.AWSAuthConfig)
|
||||
assert.Equal(t, "eks-cluster", cluster.Config.AWSAuthConfig.ClusterName)
|
||||
@@ -831,7 +833,7 @@ func BenchmarkClusterInformer_GetClusterByURL(b *testing.B) {
|
||||
defer cancel()
|
||||
|
||||
var secrets []runtime.Object
|
||||
for i := 0; i < 1000; i++ {
|
||||
for i := range 1000 {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("cluster-%d", i),
|
||||
@@ -841,15 +843,15 @@ func BenchmarkClusterInformer_GetClusterByURL(b *testing.B) {
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"server": []byte(fmt.Sprintf("https://cluster%d.example.com", i)),
|
||||
"name": []byte(fmt.Sprintf("cluster-%d", i)),
|
||||
"server": fmt.Appendf(nil, "https://cluster%d.example.com", i),
|
||||
"name": fmt.Appendf(nil, "cluster-%d", i),
|
||||
"config": []byte(`{"bearerToken":"token"}`),
|
||||
},
|
||||
}
|
||||
secrets = append(secrets, secret)
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(secrets...)
|
||||
clientset := fake.NewClientset(secrets...)
|
||||
informer, err := NewClusterInformer(clientset, "argocd")
|
||||
require.NoError(b, err)
|
||||
|
||||
|
||||
@@ -661,10 +661,11 @@ func (mgr *SettingsManager) GetSecretsInformer() (cache.SharedIndexInformer, err
|
||||
}
|
||||
|
||||
// GetClusterInformer returns the cluster cache for optimized cluster lookups.
|
||||
func (mgr *SettingsManager) GetClusterInformer() *ClusterInformer {
|
||||
// Ensure the settings manager is initialized
|
||||
_ = mgr.ensureSynced(false)
|
||||
return mgr.clusterInformer
|
||||
func (mgr *SettingsManager) GetClusterInformer() (*ClusterInformer, error) {
|
||||
if err := mgr.ensureSynced(false); err != nil {
|
||||
return nil, fmt.Errorf("error ensuring that the settings manager is synced: %w", err)
|
||||
}
|
||||
return mgr.clusterInformer, nil
|
||||
}
|
||||
|
||||
func (mgr *SettingsManager) updateSecret(callback func(*corev1.Secret) error) error {
|
||||
|
||||
Reference in New Issue
Block a user