Compare commits

..

8 Commits

Author SHA1 Message Date
Mangaal Meetei
0e729cce34 feat(cli): add appset-namespace for appset command (#27022)
Signed-off-by: Mangaal <angommeeteimangaal@gmail.com>
2026-04-01 13:37:33 +03:00
Nitish Kumar
fb1b240c9e docs: add missing content for Automatic Retry with a limit section (#27092)
Signed-off-by: nitishfy <justnitish06@gmail.com>
2026-03-31 20:04:10 +02:00
Anand Francis Joseph
c52bf66380 fix(appcontroller): application controller in core mode fails to sync when server.secretkey is missing (#26793)
Signed-off-by: anandf <anjoseph@redhat.com>
2026-03-31 13:26:11 -04:00
Jaewoo Choi
e00345bff7 docs: replace resource_hooks links with sync-waves (#26187)
Signed-off-by: choejwoo <jaewoo45@gmail.com>
2026-03-31 19:37:42 +03:00
Oliver Gondža
c3c12c1cad fix(commitserver): Static analysis fixes (#27085)
Signed-off-by: Oliver Gondža <ogondza@gmail.com>
2026-03-31 15:15:04 +02:00
Dan Garfield
e96063557a fix(docs): Fix formatting and clarity about requestedScopes in Keycloak integration docs (#27019)
Signed-off-by: Dan Garfield <dan.garfield@octopus.com>
Signed-off-by: Dan Garfield <dan@codefresh.io>
2026-03-31 12:44:29 +03:00
S Kevin Joe Harris
bfe5cfb587 chore: New gif for docs (#27081)
Signed-off-by: Kevin Joe Harris <kevinjoeharris1@gmail.com>
2026-03-31 11:31:20 +03:00
Blake Pettersson
393152ddad fix: pass repo.insecure flag to helm dependency build (#27078)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
2026-03-30 15:40:21 -07:00
26 changed files with 484 additions and 105 deletions

View File

@@ -40,6 +40,10 @@ var appSetExample = templates.Examples(`
# Delete an ApplicationSet
argocd appset delete APPSETNAME (APPSETNAME...)
# Namespace precedence for --appset-namespace (-N):
# - get/delete: if the argument is namespace/name, that namespace wins; -N is ignored.
# - create/generate: metadata.namespace in the YAML wins when set; -N applies only when the manifest omits namespace.
`)
// NewAppSetCommand returns a new instance of an `argocd appset` command
@@ -64,8 +68,9 @@ func NewAppSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
// NewApplicationSetGetCommand returns a new instance of an `argocd appset get` command
func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
showParams bool
output string
showParams bool
appSetNamespace string
)
command := &cobra.Command{
Use: "get APPSETNAME",
@@ -73,6 +78,13 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
Example: templates.Examples(`
# Get ApplicationSets
argocd appset get APPSETNAME
# Get ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset get APPSET_NAMESPACE/APPSETNAME
# Get ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset get --appset-namespace=APPSET_NAMESPACE APPSETNAME
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -85,7 +97,7 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
conn, appIf := acdClient.NewApplicationSetClientOrDie()
defer utilio.Close(conn)
appSetName, appSetNs := argo.ParseFromQualifiedName(args[0], "")
appSetName, appSetNs := argo.ParseFromQualifiedName(args[0], appSetNamespace)
appSet, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appSetName, AppsetNamespace: appSetNs})
errors.CheckError(err)
@@ -113,6 +125,7 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
command.Flags().BoolVar(&showParams, "show-params", false, "Show ApplicationSet parameters and overrides")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Only get ApplicationSet from a namespace (ignored when qualified name is provided)")
return command
}
@@ -121,6 +134,7 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
var (
output string
upsert, dryRun, wait bool
appSetNamespace string
)
command := &cobra.Command{
Use: "create",
@@ -129,6 +143,9 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
# Create ApplicationSets
argocd appset create <filename or URL> (<filename or URL>...)
# Create ApplicationSet in a specific namespace using
argocd appset create --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
# Dry-run AppSet creation to see what applications would be managed
argocd appset create --dry-run <filename or URL> -o json | jq -r '.status.resources[].name'
`),
@@ -157,6 +174,11 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
conn, appIf := argocdClient.NewApplicationSetClientOrDie()
defer utilio.Close(conn)
if appset.Namespace == "" && appSetNamespace != "" {
fmt.Printf("ApplicationSet YAML file does not have namespace; using --appset-namespace=%q.\n", appSetNamespace)
appset.Namespace = appSetNamespace
}
// Get app before creating to see if it is being updated or no change
existing, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appset.Name, AppsetNamespace: appset.Namespace})
if grpc.UnwrapGRPCStatus(err).Code() != codes.NotFound {
@@ -218,18 +240,23 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
command.Flags().BoolVar(&dryRun, "dry-run", false, "Allows to evaluate the ApplicationSet template on the server to get a preview of the applications that would be created")
command.Flags().BoolVar(&wait, "wait", false, "Wait until the ApplicationSet's resources are up to date. Will block indefinitely if the ApplicationSet has errors")
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Namespace where the ApplicationSet will be created in (ignored when provided YAML file has namespace set in metadata)")
return command
}
// NewApplicationSetGenerateCommand returns a new instance of an `argocd appset generate` command
func NewApplicationSetGenerateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var output string
var appSetNamespace string
command := &cobra.Command{
Use: "generate",
Short: "Generate apps of ApplicationSet rendered templates",
Example: templates.Examples(`
# Generate apps of ApplicationSet rendered templates
argocd appset generate <filename or URL> (<filename or URL>...)
# Generate apps of ApplicationSet rendered templates in a specific namespace
argocd appset generate --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -252,6 +279,11 @@ func NewApplicationSetGenerateCommand(clientOpts *argocdclient.ClientOptions) *c
errors.Fatal(errors.ErrorGeneric, fmt.Sprintf("Error generating apps for ApplicationSet %s. ApplicationSet does not have Name field set", appset))
}
if appset.Namespace == "" && appSetNamespace != "" {
fmt.Printf("ApplicationSet YAML file does not have namespace; using --appset-namespace=%q.\n", appSetNamespace)
appset.Namespace = appSetNamespace
}
conn, appIf := argocdClient.NewApplicationSetClientOrDie()
defer utilio.Close(conn)
@@ -286,6 +318,7 @@ func NewApplicationSetGenerateCommand(clientOpts *argocdclient.ClientOptions) *c
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Namespace used for generating Applications (ignored when provided YAML file has namespace set in metadata)")
return command
}
@@ -338,8 +371,9 @@ func NewApplicationSetListCommand(clientOpts *argocdclient.ClientOptions) *cobra
// NewApplicationSetDeleteCommand returns a new instance of an `argocd appset delete` command
func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
noPrompt bool
wait bool
noPrompt bool
wait bool
appSetNamespace string
)
command := &cobra.Command{
Use: "delete",
@@ -347,6 +381,12 @@ func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cob
Example: templates.Examples(`
# Delete an applicationset
argocd appset delete APPSETNAME (APPSETNAME...)
# Delete ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset delete APPSET_NAMESPACE/APPSETNAME
# Delete ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset delete --appset-namespace=APPSET_NAMESPACE APPSETNAME
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -375,7 +415,7 @@ func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cob
promptUtil := utils.NewPrompt(isTerminal && !noPrompt)
for _, appSetQualifiedName := range args {
appSetName, appSetNs := argo.ParseFromQualifiedName(appSetQualifiedName, "")
appSetName, appSetNs := argo.ParseFromQualifiedName(appSetQualifiedName, appSetNamespace)
appsetDeleteReq := applicationset.ApplicationSetDeleteRequest{
Name: appSetName,
@@ -412,6 +452,7 @@ func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cob
}
command.Flags().BoolVarP(&noPrompt, "yes", "y", false, "Turn off prompting to confirm cascaded deletion of Application resources")
command.Flags().BoolVar(&wait, "wait", false, "Wait until deletion of the applicationset(s) completes")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Namespace where the ApplicationSet will be deleted from (ignored when qualified name is provided)")
return command
}

View File

@@ -102,9 +102,6 @@ func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *app
}
}
// if no manifest changes then skip commit
if !atleastOneManifestChanged {
return false, nil
}
return atleastOneManifestChanged, nil
}
@@ -140,11 +137,13 @@ func writeReadme(root *os.Root, dirPath string, metadata hydrator.HydratorCommit
if err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create README file: %w", err)
}
defer func() {
err := readmeFile.Close()
if err != nil {
log.WithError(err).Error("failed to close README file")
}
}()
err = readmeTemplate.Execute(readmeFile, metadata)
closeErr := readmeFile.Close()
if closeErr != nil {
log.WithError(closeErr).Error("failed to close README file")
}
if err != nil {
return fmt.Errorf("failed to execute readme template: %w", err)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

After

Width:  |  Height:  |  Size: 23 MiB

View File

@@ -14,7 +14,7 @@ The Progressive Syncs feature set is intended to be light and flexible. The feat
- Progressive Syncs watch for the managed Application resources to become "Healthy" before proceeding to the next stage.
- Deployments, DaemonSets, StatefulSets, and [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) are all supported, because the Application enters a "Progressing" state while pods are being rolled out. In fact, any resource with a health check that can report a "Progressing" status is supported.
- [Argo CD Resource Hooks](../../user-guide/resource_hooks.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
- [Argo CD Resource Hooks](../../user-guide/sync-waves.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
## Enabling Progressive Syncs

View File

@@ -1,21 +1,21 @@
# Keycloak
Keycloak and ArgoCD integration can be configured in two ways with Client authentication and with PKCE.
Keycloak and Argo CD integration can be configured in two ways with Client authentication and with PKCE.
If you need to authenticate with __argo-cd command line__, you must choose PKCE way.
* [Keycloak and ArgoCD with Client authentication](#keycloak-and-argocd-with-client-authentication)
* [Keycloak and ArgoCD with PKCE](#keycloak-and-argocd-with-pkce)
* [Keycloak and Argo CD with Client authentication](#keycloak-and-argocd-with-client-authentication)
* [Keycloak and Argo CD with PKCE](#keycloak-and-argocd-with-pkce)
## Keycloak and ArgoCD with Client authentication
## Keycloak and Argo CD with Client authentication
These instructions will take you through the entire process of getting your ArgoCD application authenticating with Keycloak.
These instructions will take you through the entire process of getting your Argo CD application to authenticate with Keycloak.
You will create a client within Keycloak and configure ArgoCD to use Keycloak for authentication, using groups set in Keycloak
Start by creating a client within Keycloak and configure Argo CD to use Keycloak for authentication, using groups set in Keycloak
to determine privileges in Argo.
### Creating a new client in Keycloak
First we need to setup a new client.
First, setup a new client.
Start by logging into your keycloak server, select the realm you want to use (`master` by default)
and then go to __Clients__ and click the __Create client__ button at the top.
@@ -37,11 +37,11 @@ but it's not recommended in production).
Make sure to click __Save__.
There should be a tab called __Credentials__. You can copy the Client Secret that we'll use in our ArgoCD configuration.
There should be a tab called __Credentials__. You can copy the Client Secret that we'll use in our Argo CD configuration.
![Keycloak client secret](../../assets/keycloak-client-secret.png "Keycloak client secret")
### Configuring ArgoCD OIDC
### Configuring Argo CD OIDC
Let's start by storing the client secret you generated earlier in the argocd secret _argocd-secret_.
@@ -68,7 +68,7 @@ data:
clientID: argocd
clientSecret: $oidc.keycloak.clientSecret
refreshTokenThreshold: 2m
requestedScopes: ["openid", "profile", "email", "groups"]
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
```
Make sure that:
@@ -80,18 +80,18 @@ Make sure that:
- __requestedScopes__ contains the _groups_ claim if you didn't add it to the Default scopes
- __refreshTokenThreshold__ is less than the client token lifetime. If this setting is not less than the token lifetime, a new token will be obtained for every request. Keycloak sets the client token lifetime to 5 minutes by default.
## Keycloak and ArgoCD with PKCE
## Keycloak and Argo CD with PKCE
These instructions will take you through the entire process of getting your ArgoCD application authenticating with Keycloak.
These instructions will take you through the entire process of getting your Argo CD application authenticating with Keycloak.
You will create a client within Keycloak and configure ArgoCD to use Keycloak for authentication, using groups set in Keycloak
You will create a client within Keycloak and configure Argo CD to use Keycloak for authentication, using groups set in Keycloak
to determine privileges in Argo.
You will also be able to authenticate using argo-cd command line.
### Creating a new client in Keycloak
First we need to setup a new client.
First, setup a new client.
Start by logging into your keycloak server, select the realm you want to use (`master` by default)
and then go to __Clients__ and click the __Create client__ button at the top.
@@ -119,7 +119,7 @@ Now go to a tab called __Advanced__, look for parameter named __Proof Key for Co
![Keycloak configure client Step 2](../../assets/keycloak-configure-client-pkce_2.png "Keycloak configure client Step 2")
Make sure to click __Save__.
### Configuring ArgoCD OIDC
### Configuring Argo CD OIDC
Now we can configure the config map and add the oidc configuration to enable our keycloak authentication.
You can use `$ kubectl edit configmap argocd-cm`.
@@ -138,7 +138,7 @@ data:
clientID: argocd
enablePKCEAuthentication: true
refreshTokenThreshold: 2m
requestedScopes: ["openid", "profile", "email", "groups"]
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
```
Make sure that:
@@ -146,13 +146,13 @@ Make sure that:
- __issuer__ ends with the correct realm (in this example _master_)
- __issuer__ on Keycloak releases older than version 17 the URL must include /auth (in this example /auth/realms/master)
- __clientID__ is set to the Client ID you configured in Keycloak
- __enablePKCEAuthentication__ must be set to true to enable correct ArgoCD behaviour with PKCE
- __enablePKCEAuthentication__ must be set to true to enable correct Argo CD behaviour with PKCE
- __requestedScopes__ contains the _groups_ claim if you didn't add it to the Default scopes
- __refreshTokenThreshold__ is less than the client token lifetime. If this setting is not less than the token lifetime, a new token will be obtained for every request. Keycloak sets the client token lifetime to 5 minutes by default.
## Configuring the groups claim
In order for ArgoCD to provide the groups the user is in we need to configure a groups claim that can be included in the authentication token.
In order for Argo CD to provide the groups the user is in we need to configure a groups claim that can be included in the authentication token.
To do this we'll start by creating a new __Client Scope__ called _groups_.
@@ -174,7 +174,7 @@ Go back to the client we've created earlier and go to the Tab "Client Scopes".
Click on "Add client scope", choose the _groups_ scope and add it either to the __Default__ or to the __Optional__ Client Scope.
If you put it in the Optional
category you will need to make sure that ArgoCD requests the scope in its OIDC configuration.
category you will need to make sure that Argo CD requests the scope in its OIDC configuration.
Since we will always want group information, I recommend
using the Default category.
@@ -184,7 +184,7 @@ Create a group called _ArgoCDAdmins_ and have your current user join the group.
![Keycloak user group](../../assets/keycloak-user-group.png "Keycloak user group")
## Configuring ArgoCD Policy
## Configuring Argo CD Policy
Now that we have an authentication that provides groups we want to apply a policy to these groups.
We can modify the _argocd-rbac-cm_ ConfigMap using `$ kubectl edit configmap argocd-rbac-cm`.
@@ -205,7 +205,7 @@ In this example we give the role _role:admin_ to all users in the group _ArgoCDA
You can now login using our new Keycloak OIDC authentication:
![Keycloak ArgoCD login](../../assets/keycloak-login.png "Keycloak ArgoCD login")
![Keycloak Argo CD login](../../assets/keycloak-login.png "Keycloak Argo CD login")
If you have used PKCE method, you can also authenticate using command line:
```bash
@@ -219,7 +219,7 @@ Once done, you should see
![Authentication successful!](../../assets/keycloak-authentication-successful.png "Authentication successful!")
## Troubleshoot
If ArgoCD auth returns 401 or when the login attempt leads to the loop, then restart the argocd-server pod.
If Argo CD auth returns 401 or when the login attempt leads to the loop, then restart the argocd-server pod.
```
kubectl rollout restart deployment argocd-server -n argocd
```

View File

@@ -4,7 +4,7 @@ mkdocs-github-admonitions-plugin==0.1.1
# Thus pointing to the older version of mkdocs-material.
mkdocs-material==7.1.8
markdown_include==0.8.1
pygments==2.20.0
pygments==2.19.2
jinja2==3.1.6
markdown==3.10
pymdown-extensions==10.17.1

View File

@@ -6,7 +6,7 @@
|--------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| argocd.argoproj.io/application-set-refresh | ApplicationSet | `"true"` | Added when an ApplicationSet is requested to be refreshed by a webhook. The ApplicationSet controller will remove this annotation at the end of reconciliation. |
| argocd.argoproj.io/compare-options | any | [see compare options docs](compare-options.md) | Configures how an app's current state is compared to its desired state. |
| argocd.argoproj.io/hook | any | [see resource hooks docs](resource_hooks.md) | Used to configure [resource hooks](resource_hooks.md). |
| argocd.argoproj.io/hook | any | [see hooks docs](sync-waves.md) | Used to configure [resource hooks](sync-waves.md). |
| argocd.argoproj.io/hook-delete-policy | any | [see sync waves docs](sync-waves.md#hook-lifecycle-and-cleanup) | Used to set a [resource hook's deletion policy](sync-waves.md#hook-lifecycle-and-cleanup). |
| argocd.argoproj.io/manifest-generate-paths | Application | [see scaling docs](../operator-manual/high_availability.md#manifest-paths-annotation) | Used to avoid unnecessary Application refreshes, especially in mono-repos. |
| argocd.argoproj.io/managed-by-url | Application | A valid http(s) URL | Specifies the URL of the Argo CD instance managing the application. Used to correctly link to applications managed by a different Argo CD instance. See [managed-by-url docs](../operator-manual/managed-by-url.md) for details. |

View File

@@ -94,6 +94,26 @@ spec:
> [!NOTE]
> Disabling self-heal does not guarantee that live cluster changes in multi-source applications will persist. Although one of the resource's sources remains unchanged, changes in another can trigger `autosync`. To handle such cases, consider disabling `autosync`.
## Automatic Retry with a limit
Argo CD can automatically retry a failed sync operation using exponential backoff. To enable, configure the `retry` field in the sync policy:
```yaml
spec:
syncPolicy:
retry:
limit: 5 # number of retries (-1 for unlimited retries)
backoff:
duration: 5s # base duration between retries
factor: 2 # exponential backoff factor
maxDuration: 3m # maximum duration between retries
```
- `limit`: number of retry attempts. Set to `-1` for unlimited retries.
- `backoff.duration`: base wait time before the first retry.
- `backoff.factor`: multiplier applied after each failed attempt.
- `backoff.maxDuration`: maximum wait time between retries, regardless of the number of attempts.
## Automatic Retry Refresh on new revisions
This feature allows users to configure their applications to refresh on new revisions when the current sync is retrying. To enable automatic refresh during sync retries, run:

View File

@@ -22,6 +22,10 @@ argocd appset [flags]
# Delete an ApplicationSet
argocd appset delete APPSETNAME (APPSETNAME...)
# Namespace precedence for --appset-namespace (-N):
# - get/delete: if the argument is namespace/name, that namespace wins; -N is ignored.
# - create/generate: metadata.namespace in the YAML wins when set; -N applies only when the manifest omits namespace.
```
### Options

View File

@@ -14,6 +14,9 @@ argocd appset create [flags]
# Create ApplicationSets
argocd appset create <filename or URL> (<filename or URL>...)
# Create ApplicationSet in a specific namespace using
argocd appset create --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
# Dry-run AppSet creation to see what applications would be managed
argocd appset create --dry-run <filename or URL> -o json | jq -r '.status.resources[].name'
```
@@ -21,11 +24,12 @@ argocd appset create [flags]
### Options
```
--dry-run Allows to evaluate the ApplicationSet template on the server to get a preview of the applications that would be created
-h, --help help for create
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--upsert Allows to override ApplicationSet with the same name even if supplied ApplicationSet spec is different from existing spec
--wait Wait until the ApplicationSet's resources are up to date. Will block indefinitely if the ApplicationSet has errors
-N, --appset-namespace string Namespace where the ApplicationSet will be created in (ignored when provided YAML file has namespace set in metadata)
--dry-run Allows to evaluate the ApplicationSet template on the server to get a preview of the applications that would be created
-h, --help help for create
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--upsert Allows to override ApplicationSet with the same name even if supplied ApplicationSet spec is different from existing spec
--wait Wait until the ApplicationSet's resources are up to date. Will block indefinitely if the ApplicationSet has errors
```
### Options inherited from parent commands

View File

@@ -13,14 +13,21 @@ argocd appset delete [flags]
```
# Delete an applicationset
argocd appset delete APPSETNAME (APPSETNAME...)
# Delete ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset delete APPSET_NAMESPACE/APPSETNAME
# Delete ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset delete --appset-namespace=APPSET_NAMESPACE APPSETNAME
```
### Options
```
-h, --help help for delete
--wait Wait until deletion of the applicationset(s) completes
-y, --yes Turn off prompting to confirm cascaded deletion of Application resources
-N, --appset-namespace string Namespace where the ApplicationSet will be deleted from (ignored when qualified name is provided)
-h, --help help for delete
--wait Wait until deletion of the applicationset(s) completes
-y, --yes Turn off prompting to confirm cascaded deletion of Application resources
```
### Options inherited from parent commands

View File

@@ -13,13 +13,17 @@ argocd appset generate [flags]
```
# Generate apps of ApplicationSet rendered templates
argocd appset generate <filename or URL> (<filename or URL>...)
# Generate apps of ApplicationSet rendered templates in a specific namespace
argocd appset generate --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
```
### Options
```
-h, --help help for generate
-o, --output string Output format. One of: json|yaml|wide (default "wide")
-N, --appset-namespace string Namespace used for generating Applications (ignored when provided YAML file has namespace set in metadata)
-h, --help help for generate
-o, --output string Output format. One of: json|yaml|wide (default "wide")
```
### Options inherited from parent commands

View File

@@ -13,14 +13,21 @@ argocd appset get APPSETNAME [flags]
```
# Get ApplicationSets
argocd appset get APPSETNAME
# Get ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset get APPSET_NAMESPACE/APPSETNAME
# Get ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset get --appset-namespace=APPSET_NAMESPACE APPSETNAME
```
### Options
```
-h, --help help for get
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--show-params Show ApplicationSet parameters and overrides
-N, --appset-namespace string Only get ApplicationSet from a namespace (ignored when qualified name is provided)
-h, --help help for get
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--show-params Show ApplicationSet parameters and overrides
```
### Options inherited from parent commands

View File

@@ -500,7 +500,7 @@ source:
## Helm Hooks
Helm hooks are similar to [Argo CD hooks](resource_hooks.md). In Helm, a hook
Helm hooks are similar to [Argo CD hooks](sync-waves.md). In Helm, a hook
is any normal Kubernetes resource annotated with the `helm.sh/hook` annotation.
Argo CD supports many (most?) Helm hooks by mapping the Helm annotations onto Argo CD's own hook annotations:
@@ -541,7 +541,7 @@ Unsupported hooks are ignored. In Argo CD, hooks are created by using `kubectl a
* Annotate `pre-install` and `post-install` with `hook-weight: "-1"`. This will make sure it runs to success before any upgrade hooks.
* Annotate `pre-upgrade` and `post-upgrade` with `hook-delete-policy: before-hook-creation` to make sure it runs on every sync.
Read more about [Argo hooks](resource_hooks.md) and [Helm hooks](https://helm.sh/docs/topics/charts_hooks/).
Read more about [Argo hooks](sync-waves.md) and [Helm hooks](https://helm.sh/docs/topics/charts_hooks/).
## Random Data

View File

@@ -7,7 +7,7 @@ A *selective sync* is one where only some resources are sync'd. You can choose w
When doing so, bear in mind that:
* Your sync is not recorded in the history, and so rollback is not possible.
* [Hooks](resource_hooks.md) are not run.
* [Hooks](sync-waves.md) are not run.
## Selective Sync Option

View File

@@ -1333,7 +1333,7 @@ func helmTemplate(appPath string, repoRoot string, env *v1alpha1.Env, q *apiclie
return nil, "", fmt.Errorf("error getting helm repos: %w", err)
}
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials)
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
if err != nil {
return nil, "", fmt.Errorf("error initializing helm app object: %w", err)
}
@@ -2443,7 +2443,7 @@ func (s *Service) populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse,
if err != nil {
return err
}
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials)
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
if err != nil {
return err
}

View File

@@ -494,11 +494,11 @@ func (server *ArgoCDServer) logInClusterWarnings() error {
}
if len(inClusterSecrets) > 0 {
// Don't make this call unless we actually have in-cluster secrets, to save time.
dbSettings, err := server.settingsMgr.GetSettings()
inClusterEnabled, err := server.settingsMgr.IsInClusterEnabled()
if err != nil {
return fmt.Errorf("could not get DB settings: %w", err)
return fmt.Errorf("could not check if in-cluster is enabled: %w", err)
}
if !dbSettings.InClusterEnabled {
if !inClusterEnabled {
for _, clusterName := range inClusterSecrets {
log.Warnf("cluster %q uses in-cluster server address but it's disabled in Argo CD settings", clusterName)
}

View File

@@ -24,6 +24,10 @@ import (
"github.com/argoproj/argo-cd/v3/util/settings"
)
const (
errCheckingInClusterEnabled = "%s: error checking if in-cluster is enabled: %v"
)
var (
localCluster = appv1.Cluster{
Name: "in-cluster",
@@ -68,11 +72,10 @@ func (db *db) ListClusters(_ context.Context) (*appv1.ClusterList, error) {
clusterList := appv1.ClusterList{
Items: make([]appv1.Cluster, 0),
}
settings, err := db.settingsMgr.GetSettings()
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
if err != nil {
return nil, err
log.Warnf(errCheckingInClusterEnabled, "ListClusters", err)
}
inClusterEnabled := settings.InClusterEnabled
hasInClusterCredentials := false
for _, clusterSecret := range clusterSecrets {
cluster, err := SecretToCluster(clusterSecret)
@@ -98,11 +101,11 @@ func (db *db) ListClusters(_ context.Context) (*appv1.ClusterList, error) {
// CreateCluster creates a cluster
func (db *db) CreateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Cluster, error) {
if c.Server == appv1.KubernetesInternalAPIServerAddr {
settings, err := db.settingsMgr.GetSettings()
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
if err != nil {
return nil, err
log.Warnf(errCheckingInClusterEnabled, "CreateCluster", err)
}
if !settings.InClusterEnabled {
if !inClusterEnabled {
return nil, status.Errorf(codes.InvalidArgument, "cannot register cluster: in-cluster has been disabled")
}
}
@@ -148,13 +151,12 @@ func (db *db) WatchClusters(ctx context.Context,
handleModEvent func(oldCluster *appv1.Cluster, newCluster *appv1.Cluster),
handleDeleteEvent func(clusterServer string),
) error {
argoSettings, err := db.settingsMgr.GetSettings()
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
if err != nil {
return err
log.Warnf(errCheckingInClusterEnabled, "WatchClusters", err)
}
localCls := db.getLocalCluster()
if argoSettings.InClusterEnabled {
if inClusterEnabled {
localCls, err = db.GetCluster(ctx, appv1.KubernetesInternalAPIServerAddr)
if err != nil {
return fmt.Errorf("could not get local cluster: %w", err)
@@ -173,7 +175,7 @@ func (db *db) WatchClusters(ctx context.Context,
return
}
if cluster.Server == appv1.KubernetesInternalAPIServerAddr {
if argoSettings.InClusterEnabled {
if inClusterEnabled {
// change local cluster event to modified, since it cannot be added at runtime
handleModEvent(localCls, cluster)
localCls = cluster
@@ -201,7 +203,7 @@ func (db *db) WatchClusters(ctx context.Context,
},
func(secret *corev1.Secret) {
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr && argoSettings.InClusterEnabled {
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr && inClusterEnabled {
// change local cluster event to modified, since it cannot be deleted at runtime, unless disabled.
newLocalCls := db.getLocalCluster()
handleModEvent(localCls, newLocalCls)
@@ -233,11 +235,11 @@ func (db *db) getClusterSecret(server string) (*corev1.Secret, error) {
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
informer := db.settingsMgr.GetClusterInformer()
if server == appv1.KubernetesInternalAPIServerAddr {
argoSettings, err := db.settingsMgr.GetSettings()
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
if err != nil {
return nil, err
log.Warnf(errCheckingInClusterEnabled, "GetCluster", err)
}
if !argoSettings.InClusterEnabled {
if !inClusterEnabled {
return nil, status.Errorf(codes.NotFound, "cluster %q is disabled", server)
}
@@ -282,24 +284,24 @@ func (db *db) GetProjectClusters(_ context.Context, project string) ([]*appv1.Cl
}
func (db *db) GetClusterServersByName(_ context.Context, name string) ([]string, error) {
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, err
}
informer := db.settingsMgr.GetClusterInformer()
servers, err := informer.GetClusterServersByName(name)
if err != nil {
return nil, err
}
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
if err != nil {
log.Warnf(errCheckingInClusterEnabled, "GetClusterServersByName", err)
}
// Handle local cluster special case
if len(servers) == 0 && name == "in-cluster" && argoSettings.InClusterEnabled {
if len(servers) == 0 && name == "in-cluster" && inClusterEnabled {
return []string{appv1.KubernetesInternalAPIServerAddr}, nil
}
// Filter out disabled in-cluster
if !argoSettings.InClusterEnabled {
if !inClusterEnabled {
filtered := make([]string, 0, len(servers))
for _, s := range servers {
if s != appv1.KubernetesInternalAPIServerAddr {

View File

@@ -129,6 +129,44 @@ func TestWatchClusters_LocalClusterModifications(t *testing.T) {
assert.True(t, completed, "Failed due to timeout")
}
func TestWatchClusters_MissingServerSecretKey(t *testing.T) {
// !race:
// Intermittent failure when running with -race, likely due to race condition
// https://github.com/argoproj/argo-cd/issues/4755
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
completed := runWatchTest(t, db, []func(old *v1alpha1.Cluster, new *v1alpha1.Cluster){
func(old *v1alpha1.Cluster, new *v1alpha1.Cluster) {
assert.Nil(t, old)
assert.Equal(t, v1alpha1.KubernetesInternalAPIServerAddr, new.Server)
},
})
assert.True(t, completed, "WatchClusters should work even when server.secretkey is missing")
}
func TestWatchClusters_LocalClusterModificationsWhenDisabled(t *testing.T) {
// !race:
// Intermittent failure when running TestWatchClusters_LocalClusterModifications with -race, likely due to race condition

View File

@@ -661,6 +661,161 @@ func TestGetClusterServersByName(t *testing.T) {
})
}
func TestCreateCluster_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
t.Run("in-cluster creation succeeds when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: v1alpha1.KubernetesInternalAPIServerAddr,
Name: "in-cluster",
})
require.NoError(t, err)
})
t.Run("external cluster creation succeeds when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: "https://my-external-cluster",
Name: "external",
})
require.NoError(t, err)
})
t.Run("in-cluster creation rejected when explicitly disabled even with missing server.secretkey", func(t *testing.T) {
argoCDConfigMapWithInClusterDisabled := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{"cluster.inClusterEnabled": "false"},
}
kubeclientset := fake.NewClientset(argoCDConfigMapWithInClusterDisabled, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: v1alpha1.KubernetesInternalAPIServerAddr,
Name: "in-cluster",
})
require.Error(t, err)
require.Contains(t, err.Error(), "in-cluster has been disabled")
})
}
func TestListClusters_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
t.Run("lists clusters including implicit in-cluster when server.secretkey is missing", func(t *testing.T) {
externalClusterSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "mycluster",
Namespace: fakeNamespace,
Labels: map[string]string{
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
},
},
Data: map[string][]byte{
"server": []byte("https://my-external-cluster"),
"name": []byte("external"),
},
}
kubeclientset := fake.NewClientset(externalClusterSecret, emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
clusters, err := db.ListClusters(t.Context())
require.NoError(t, err)
require.Len(t, clusters.Items, 2)
})
}
func TestGetClusterServersByName_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
t.Run("returns in-cluster when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
servers, err := db.GetClusterServersByName(t.Context(), "in-cluster")
require.NoError(t, err)
require.ElementsMatch(t, []string{v1alpha1.KubernetesInternalAPIServerAddr}, servers)
})
}
// TestClusterRaceConditionClusterSecrets reproduces a race condition
// on the cluster secrets. The test isn't asserting anything because
// before the fix it would cause a panic from concurrent map iteration and map write

View File

@@ -327,8 +327,12 @@ func (c *Cmd) PullOCI(repo string, chart string, version string, destination str
return out, nil
}
func (c *Cmd) dependencyBuild() (string, error) {
out, _, err := c.run(context.Background(), "dependency", "build")
func (c *Cmd) dependencyBuild(insecure bool) (string, error) {
args := []string{"dependency", "build"}
if insecure {
args = append(args, "--insecure-skip-tls-verify")
}
out, _, err := c.run(context.Background(), args...)
if err != nil {
return "", fmt.Errorf("failed to build dependencies: %w", err)
}

View File

@@ -135,6 +135,36 @@ func TestRegistryLogin(t *testing.T) {
}
}
func TestDependencyBuild(t *testing.T) {
tests := []struct {
name string
insecure bool
expectedOut string
}{
{
name: "without insecure",
insecure: false,
expectedOut: "helm dependency build",
},
{
name: "with insecure",
insecure: true,
expectedOut: "helm dependency build --insecure-skip-tls-verify",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
c, err := newCmdWithVersion(".", false, "", "", func(cmd *exec.Cmd, _ func(_ string) string) (string, error) {
return strings.Join(cmd.Args, " "), nil
})
require.NoError(t, err)
out, err := c.dependencyBuild(tc.insecure)
require.NoError(t, err)
assert.Equal(t, tc.expectedOut, out)
})
}
}
func TestRegistryLogout(t *testing.T) {
tests := []struct {
name string

View File

@@ -43,20 +43,21 @@ type Helm interface {
}
// NewHelmApp create a new wrapper to run commands on the `helm` command-line tool.
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool) (Helm, error) {
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool, insecure bool) (Helm, error) {
cmd, err := NewCmd(workDir, version, proxy, noProxy)
if err != nil {
return nil, fmt.Errorf("failed to create new helm command: %w", err)
}
cmd.IsLocal = isLocal
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials}, nil
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials, insecure: insecure}, nil
}
type helm struct {
cmd Cmd
repos []HelmRepository
passCredentials bool
insecure bool
}
var _ Helm = &helm{}
@@ -108,7 +109,7 @@ func (h *helm) DependencyBuild() error {
}
}
h.repos = nil
_, err := h.cmd.dependencyBuild()
_, err := h.cmd.dependencyBuild(h.insecure)
if err != nil {
return fmt.Errorf("failed to build helm dependencies: %w", err)
}

View File

@@ -25,7 +25,7 @@ func template(h Helm, opts *TemplateOpts) ([]*unstructured.Unstructured, error)
}
func TestHelmTemplateParams(t *testing.T) {
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false)
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false, false)
require.NoError(t, err)
opts := TemplateOpts{
Name: "test",
@@ -58,7 +58,7 @@ func TestHelmTemplateValues(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false, false)
require.NoError(t, err)
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
require.NoError(t, err)
@@ -84,7 +84,7 @@ func TestHelmGetParams(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
require.NoError(t, err)
params, err := h.GetParameters(nil, repoRootAbs, repoRootAbs)
require.NoError(t, err)
@@ -97,7 +97,7 @@ func TestHelmGetParamsValueFiles(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
require.NoError(t, err)
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
require.NoError(t, err)
@@ -112,7 +112,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
require.NoError(t, err)
valuesMissingPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-missing.yaml", nil)
require.NoError(t, err)
@@ -126,7 +126,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
}
func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{Name: "my-release"})
@@ -144,7 +144,7 @@ func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
}
func TestHelmTemplateReleaseName(t *testing.T) {
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{Name: "test"})
require.NoError(t, err)
@@ -206,7 +206,7 @@ func Test_flatVals(t *testing.T) {
}
func TestAPIVersions(t *testing.T) {
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{})
@@ -221,7 +221,7 @@ func TestAPIVersions(t *testing.T) {
}
func TestKubeVersionWithSymbol(t *testing.T) {
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{KubeVersion: "1.30.11+IKS"})
@@ -244,7 +244,7 @@ func TestKubeVersionWithSymbol(t *testing.T) {
}
func TestSkipCrds(t *testing.T) {
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{SkipCrds: false})
@@ -261,7 +261,7 @@ func TestSkipCrds(t *testing.T) {
}
func TestSkipTests(t *testing.T) {
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{SkipTests: false})

View File

@@ -119,8 +119,6 @@ type ArgoCDSettings struct {
PasswordPattern string `json:"passwordPattern,omitempty"`
// BinaryUrls contains the URLs for downloading argocd binaries
BinaryUrls map[string]string `json:"binaryUrls,omitempty"`
// InClusterEnabled indicates whether to allow in-cluster server address
InClusterEnabled bool `json:"inClusterEnabled"`
// ServerRBACLogEnforceEnable temporary var indicates whether rbac will be enforced on logs
ServerRBACLogEnforceEnable bool `json:"serverRBACLogEnforceEnable"`
// MaxPodLogsToRender the maximum number of pod logs to render
@@ -561,6 +559,10 @@ const (
// application sync with impersonation feature is disabled by default.
defaultImpersonationEnabledFlag = false
// defaultInClusterEnabledFlag is the default value when the in-cluster setting
// cannot be read from the configmap or is not explicitly set by the user.
defaultInClusterEnabledFlag = true
)
var sourceTypeToEnableGenerationKey = map[v1alpha1.ApplicationSourceType]string{
@@ -1335,10 +1337,10 @@ func (mgr *SettingsManager) GetSettings() (*ArgoCDSettings, error) {
if err := mgr.updateSettingsFromSecret(&settings, argoCDSecret, secrets); err != nil {
errs = append(errs, err)
}
updateSettingsFromConfigMap(&settings, argoCDCM)
if len(errs) > 0 {
return &settings, errors.Join(errs...)
}
updateSettingsFromConfigMap(&settings, argoCDCM)
return &settings, nil
}
@@ -1527,7 +1529,6 @@ func updateSettingsFromConfigMap(settings *ArgoCDSettings, argoCDCM *corev1.Conf
settings.MaxPodLogsToRender = val
}
}
settings.InClusterEnabled = argoCDCM.Data[inClusterEnabledKey] != "false"
settings.ExecEnabled = argoCDCM.Data[execEnabledKey] == "true"
execShells := argoCDCM.Data[execShellsKey]
if execShells != "" {
@@ -2427,3 +2428,15 @@ func (mgr *SettingsManager) GetAllowedNodeLabels() []string {
}
return labelKeys
}
// IsInClusterEnabled returns false if in-cluster is explicitly disabled in argocd-cm configmap, true otherwise
func (mgr *SettingsManager) IsInClusterEnabled() (bool, error) {
argoCDCM, err := mgr.getConfigMap()
if err != nil {
return defaultInClusterEnabledFlag, fmt.Errorf("error checking %s property in configmap: %w", inClusterEnabledKey, err)
}
if inClusterEnabled, ok := argoCDCM.Data[inClusterEnabledKey]; ok {
return inClusterEnabled != "false", nil
}
return defaultInClusterEnabledFlag, nil
}

View File

@@ -178,6 +178,13 @@ func TestInClusterServerAddressEnabled(t *testing.T) {
}
func TestInClusterServerAddressEnabledByDefault(t *testing.T) {
_, settingsManager := fixtures(t.Context(), map[string]string{})
enabled, err := settingsManager.IsInClusterEnabled()
require.NoError(t, err)
require.True(t, enabled)
}
func TestGetSettings_InClusterIsEnabledWithMissingServerSecretKey(t *testing.T) {
kubeClient := fake.NewClientset(
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -198,15 +205,15 @@ func TestInClusterServerAddressEnabledByDefault(t *testing.T) {
},
},
Data: map[string][]byte{
"admin.password": nil,
"server.secretkey": nil,
"admin.password": nil,
},
},
)
settingsManager := NewSettingsManager(t.Context(), kubeClient, "default")
settings, err := settingsManager.GetSettings()
// IsInClusterEnabled reads ConfigMap directly and does not depend on server.secretkey
enabled, err := settingsManager.IsInClusterEnabled()
require.NoError(t, err)
assert.True(t, settings.InClusterEnabled)
require.True(t, enabled)
}
func TestGetAppInstanceLabelKey(t *testing.T) {
@@ -2164,6 +2171,49 @@ func TestIsImpersonationEnabled(t *testing.T) {
"when user enables the flag in argocd-cm config map, IsImpersonationEnabled() must not return any error")
}
func TestIsInClusterEnabled(t *testing.T) {
// When there is no argocd-cm itself,
// Then IsInClusterEnabled() must return true (default value) and an error with appropriate error message.
kubeClient := fake.NewClientset()
settingsManager := NewSettingsManager(t.Context(), kubeClient, "default")
enabled, err := settingsManager.IsInClusterEnabled()
require.True(t, enabled,
"with no argocd-cm config map, IsInClusterEnabled() must return true (default value)")
require.ErrorContains(t, err, "configmap \"argocd-cm\" not found",
"with no argocd-cm config map, IsInClusterEnabled() must return an error")
// When there is no in-cluster flag present in the argocd-cm,
// Then IsInClusterEnabled() must return true (default value) and nil error.
_, settingsManager = fixtures(t.Context(), map[string]string{})
enabled, err = settingsManager.IsInClusterEnabled()
require.True(t, enabled,
"with empty argocd-cm config map, IsInClusterEnabled() must return true (default value)")
require.NoError(t, err,
"with empty argocd-cm config map, IsInClusterEnabled() must not return any error")
// When user disables in-cluster explicitly,
// Then IsInClusterEnabled() must return false and nil error.
_, settingsManager = fixtures(t.Context(), map[string]string{
"cluster.inClusterEnabled": "false",
})
enabled, err = settingsManager.IsInClusterEnabled()
require.False(t, enabled,
"when user sets the flag to false in argocd-cm config map, IsInClusterEnabled() must return false")
require.NoError(t, err,
"when user sets the flag to false in argocd-cm config map, IsInClusterEnabled() must not return any error")
// When user enables in-cluster explicitly,
// Then IsInClusterEnabled() must return true and nil error.
_, settingsManager = fixtures(t.Context(), map[string]string{
"cluster.inClusterEnabled": "true",
})
enabled, err = settingsManager.IsInClusterEnabled()
require.True(t, enabled,
"when user sets the flag to true in argocd-cm config map, IsInClusterEnabled() must return true")
require.NoError(t, err,
"when user sets the flag to true in argocd-cm config map, IsInClusterEnabled() must not return any error")
}
func TestRequireOverridePrivilegeForRevisionSyncNoConfigMap(t *testing.T) {
// When there is no argocd-cm itself,
// Then RequireOverridePrivilegeForRevisionSync() must return false (default value) and an error with appropriate error message.