mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
25 Commits
hydrator-c
...
v2.13.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8340e1e43f | ||
|
|
1cddb8e607 | ||
|
|
262c8fa529 | ||
|
|
97a49a24cc | ||
|
|
a9a8d0e45f | ||
|
|
92de225ce5 | ||
|
|
a713e5023a | ||
|
|
ec60abd4d8 | ||
|
|
c6d9d50ee9 | ||
|
|
7244b8b40f | ||
|
|
8e81bb6c80 | ||
|
|
3bc2e1ae4c | ||
|
|
61f63f35ae | ||
|
|
5eb1f9bd16 | ||
|
|
4149f484bf | ||
|
|
0b2895977e | ||
|
|
99b30a87a6 | ||
|
|
9fc6ec116d | ||
|
|
f7f553f675 | ||
|
|
a9d9d07edd | ||
|
|
0f083c9e58 | ||
|
|
5392ca7e79 | ||
|
|
243ecc2f25 | ||
|
|
425b4087f3 | ||
|
|
74a367d10e |
1
USERS.md
1
USERS.md
@@ -41,6 +41,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
1. [Bedag Informatik AG](https://www.bedag.ch/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [Believable Bots](https://believablebots.io)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [BMW Group](https://www.bmwgroup.com/)
|
||||
|
||||
@@ -168,7 +168,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
|
||||
}
|
||||
return pullrequest.NewBitbucketServiceBearerToken(ctx, providerConfig.API, appToken, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
|
||||
return pullrequest.NewBitbucketServiceBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
|
||||
} else if providerConfig.BasicAuth != nil {
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
|
||||
2
assets/swagger.json
generated
2
assets/swagger.json
generated
@@ -6084,7 +6084,7 @@
|
||||
"properties": {
|
||||
"defaultServiceAccount": {
|
||||
"type": "string",
|
||||
"title": "ServiceAccountName to be used for impersonation during the sync operation"
|
||||
"title": "DefaultServiceAccount to be used for impersonation during the sync operation"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace specifies the target namespace for the application's resources.",
|
||||
|
||||
@@ -163,6 +163,7 @@ func NewCommand() *cobra.Command {
|
||||
}()
|
||||
|
||||
go ctrl.Run(ctx, processorsCount)
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -585,8 +585,8 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
|
||||
var status string
|
||||
var allow, deny, inactiveAllows bool
|
||||
if windows.HasWindows() {
|
||||
active := windows.Active()
|
||||
if active.HasWindows() {
|
||||
active, err := windows.Active()
|
||||
if err == nil && active.HasWindows() {
|
||||
for _, w := range *active {
|
||||
if w.Kind == "deny" {
|
||||
deny = true
|
||||
@@ -595,13 +595,14 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
|
||||
}
|
||||
}
|
||||
}
|
||||
if windows.InactiveAllows().HasWindows() {
|
||||
inactiveAllowWindows, err := windows.InactiveAllows()
|
||||
if err == nil && inactiveAllowWindows.HasWindows() {
|
||||
inactiveAllows = true
|
||||
}
|
||||
|
||||
s := windows.CanSync(true)
|
||||
if deny || !deny && !allow && inactiveAllows {
|
||||
if s {
|
||||
s, err := windows.CanSync(true)
|
||||
if err == nil && s {
|
||||
status = "Manual Allowed"
|
||||
} else {
|
||||
status = "Sync Denied"
|
||||
|
||||
@@ -48,6 +48,7 @@ type forwardCacheClient struct {
|
||||
err error
|
||||
redisHaProxyName string
|
||||
redisName string
|
||||
redisPassword string
|
||||
}
|
||||
|
||||
func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error {
|
||||
@@ -64,7 +65,7 @@ func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error)
|
||||
return
|
||||
}
|
||||
|
||||
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort)})
|
||||
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort), Password: c.redisPassword})
|
||||
c.client = cache.NewRedisCache(redisClient, time.Hour, c.compression)
|
||||
})
|
||||
if c.err != nil {
|
||||
@@ -251,12 +252,12 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running miniredis: %w", err)
|
||||
}
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression, redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName}), time.Hour)
|
||||
|
||||
redisOptions := &redis.Options{Addr: mr.Addr()}
|
||||
if err = common.SetOptionalRedisPasswordFromKubeConfig(ctx, kubeClientset, namespace, redisOptions); err != nil {
|
||||
log.Warnf("Failed to fetch & set redis password for namespace %s: %v", namespace, err)
|
||||
}
|
||||
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression, redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName, redisPassword: redisOptions.Password}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
|
||||
@@ -352,9 +352,10 @@ func printSyncWindows(proj *v1alpha1.AppProject) {
|
||||
fmt.Fprintf(w, fmtStr, headers...)
|
||||
if proj.Spec.SyncWindows.HasWindows() {
|
||||
for i, window := range proj.Spec.SyncWindows {
|
||||
isActive, _ := window.Active()
|
||||
vals := []interface{}{
|
||||
strconv.Itoa(i),
|
||||
formatBoolOutput(window.Active()),
|
||||
formatBoolOutput(isActive),
|
||||
window.Kind,
|
||||
window.Schedule,
|
||||
window.Duration,
|
||||
|
||||
@@ -48,6 +48,8 @@ func AddProjFlags(command *cobra.Command, opts *ProjectOpts) {
|
||||
command.Flags().StringArrayVar(&opts.allowedNamespacedResources, "allow-namespaced-resource", []string{}, "List of allowed namespaced resources")
|
||||
command.Flags().StringArrayVar(&opts.deniedNamespacedResources, "deny-namespaced-resource", []string{}, "List of denied namespaced resources")
|
||||
command.Flags().StringSliceVar(&opts.SourceNamespaces, "source-namespaces", []string{}, "List of source namespaces for applications")
|
||||
command.Flags().StringArrayVar(&opts.destinationServiceAccounts, "dest-service-accounts", []string{},
|
||||
"Destination server, namespace and target service account (e.g. https://192.168.99.100:8443,default,default-sa)")
|
||||
}
|
||||
|
||||
func getGroupKindList(values []string) []v1.GroupKind {
|
||||
@@ -98,8 +100,8 @@ func (opts *ProjectOpts) GetDestinationServiceAccounts() []v1alpha1.ApplicationD
|
||||
destinationServiceAccounts := make([]v1alpha1.ApplicationDestinationServiceAccount, 0)
|
||||
for _, destStr := range opts.destinationServiceAccounts {
|
||||
parts := strings.Split(destStr, ",")
|
||||
if len(parts) != 2 {
|
||||
log.Fatalf("Expected destination of the form: server,namespace. Received: %s", destStr)
|
||||
if len(parts) != 3 {
|
||||
log.Fatalf("Expected destination service account of the form: server,namespace, defaultServiceAccount. Received: %s", destStr)
|
||||
} else {
|
||||
destinationServiceAccounts = append(destinationServiceAccounts, v1alpha1.ApplicationDestinationServiceAccount{
|
||||
Server: parts[0],
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestProjectOpts_ResourceLists(t *testing.T) {
|
||||
@@ -22,3 +24,27 @@ func TestProjectOpts_ResourceLists(t *testing.T) {
|
||||
[]v1.GroupKind{{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}}, opts.GetDeniedClusterResources(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestProjectOpts_GetDestinationServiceAccounts(t *testing.T) {
|
||||
opts := ProjectOpts{
|
||||
destinationServiceAccounts: []string{
|
||||
"https://192.168.99.100:8443,test-ns,test-sa",
|
||||
"https://kubernetes.default.svc.local:6443,guestbook,guestbook-sa",
|
||||
},
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t,
|
||||
[]v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://192.168.99.100:8443",
|
||||
Namespace: "test-ns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.default.svc.local:6443",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
}, opts.GetDestinationServiceAccounts(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1690,7 +1690,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
app.Status.Summary = tree.GetSummary(app)
|
||||
}
|
||||
|
||||
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
|
||||
canSync, _ := project.Spec.SyncWindows.Matches(app).CanSync(false)
|
||||
if canSync {
|
||||
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionUpdated)
|
||||
setOpMs = opMS
|
||||
if syncErrCond != nil {
|
||||
|
||||
@@ -64,6 +64,7 @@ type fakeData struct {
|
||||
metricsCacheExpiration time.Duration
|
||||
applicationNamespaces []string
|
||||
updateRevisionForPathsResponse *apiclient.UpdateRevisionForPathsResponse
|
||||
additionalObjs []runtime.Object
|
||||
}
|
||||
|
||||
type MockKubectl struct {
|
||||
@@ -133,7 +134,9 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
},
|
||||
Data: data.configMapData,
|
||||
}
|
||||
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
|
||||
runtimeObjs := []runtime.Object{&clust, &secret, &cm}
|
||||
runtimeObjs = append(runtimeObjs, data.additionalObjs...)
|
||||
kubeClient := fake.NewSimpleClientset(runtimeObjs...)
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
|
||||
kubectl := &MockKubectl{Kubectl: &kubetest.MockKubectlCmd{}}
|
||||
ctrl, err := NewApplicationController(
|
||||
|
||||
57
controller/cache/info.go
vendored
57
controller/cache/info.go
vendored
@@ -278,6 +278,32 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
|
||||
}
|
||||
|
||||
func isPodInitializedConditionTrue(status *v1.PodStatus) bool {
|
||||
for _, condition := range status.Conditions {
|
||||
if condition.Type != v1.PodInitialized {
|
||||
continue
|
||||
}
|
||||
|
||||
return condition.Status == v1.ConditionTrue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRestartableInitContainer(initContainer *v1.Container) bool {
|
||||
if initContainer == nil {
|
||||
return false
|
||||
}
|
||||
if initContainer.RestartPolicy == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
|
||||
}
|
||||
|
||||
func isPodPhaseTerminal(phase v1.PodPhase) bool {
|
||||
return phase == v1.PodFailed || phase == v1.PodSucceeded
|
||||
}
|
||||
|
||||
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
pod := v1.Pod{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
|
||||
@@ -288,7 +314,8 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
totalContainers := len(pod.Spec.Containers)
|
||||
readyContainers := 0
|
||||
|
||||
reason := string(pod.Status.Phase)
|
||||
podPhase := pod.Status.Phase
|
||||
reason := string(podPhase)
|
||||
if pod.Status.Reason != "" {
|
||||
reason = pod.Status.Reason
|
||||
}
|
||||
@@ -306,6 +333,21 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
res.Images = append(res.Images, image)
|
||||
}
|
||||
|
||||
// If the Pod carries {type:PodScheduled, reason:SchedulingGated}, set reason to 'SchedulingGated'.
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
|
||||
reason = v1.PodReasonSchedulingGated
|
||||
}
|
||||
}
|
||||
|
||||
initContainers := make(map[string]*v1.Container)
|
||||
for i := range pod.Spec.InitContainers {
|
||||
initContainers[pod.Spec.InitContainers[i].Name] = &pod.Spec.InitContainers[i]
|
||||
if isRestartableInitContainer(&pod.Spec.InitContainers[i]) {
|
||||
totalContainers++
|
||||
}
|
||||
}
|
||||
|
||||
initializing := false
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
container := pod.Status.InitContainerStatuses[i]
|
||||
@@ -313,6 +355,12 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
switch {
|
||||
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
||||
continue
|
||||
case isRestartableInitContainer(initContainers[container.Name]) &&
|
||||
container.Started != nil && *container.Started:
|
||||
if container.Ready {
|
||||
readyContainers++
|
||||
}
|
||||
continue
|
||||
case container.State.Terminated != nil:
|
||||
// initialization is failed
|
||||
if len(container.State.Terminated.Reason) == 0 {
|
||||
@@ -334,8 +382,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
}
|
||||
break
|
||||
}
|
||||
if !initializing {
|
||||
restarts = 0
|
||||
if !initializing || isPodInitializedConditionTrue(&pod.Status) {
|
||||
hasRunning := false
|
||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||
container := pod.Status.ContainerStatuses[i]
|
||||
@@ -370,7 +417,9 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
// and https://github.com/kubernetes/kubernetes/issues/90358#issuecomment-617859364
|
||||
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
|
||||
reason = "Unknown"
|
||||
} else if pod.DeletionTimestamp != nil {
|
||||
// If the pod is being deleted and the pod phase is not succeeded or failed, set the reason to "Terminating".
|
||||
// See https://github.com/kubernetes/kubectl/issues/1595#issuecomment-2080001023
|
||||
} else if pod.DeletionTimestamp != nil && !isPodPhaseTerminal(podPhase) {
|
||||
reason = "Terminating"
|
||||
}
|
||||
|
||||
|
||||
546
controller/cache/info_test.go
vendored
546
controller/cache/info_test.go
vendored
@@ -285,6 +285,552 @@ func TestGetPodInfo(t *testing.T) {
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
|
||||
}
|
||||
|
||||
func TestGetPodWithInitialContainerInfo(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: "v1"
|
||||
kind: "Pod"
|
||||
metadata:
|
||||
labels:
|
||||
app: "app-with-initial-container"
|
||||
name: "app-with-initial-container-5f46976fdb-vd6rv"
|
||||
namespace: "default"
|
||||
ownerReferences:
|
||||
- apiVersion: "apps/v1"
|
||||
kind: "ReplicaSet"
|
||||
name: "app-with-initial-container-5f46976fdb"
|
||||
spec:
|
||||
containers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container"
|
||||
initContainers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
nodeName: "minikube"
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: "2024-10-08T08:44:25Z"
|
||||
initContainerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: false
|
||||
state:
|
||||
terminated:
|
||||
exitCode: 0
|
||||
reason: "Completed"
|
||||
phase: "Running"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
func TestGetPodInfoWithSidecar(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: app-with-sidecar
|
||||
name: app-with-sidecar-6664cc788c-lqlrp
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
name: app-with-sidecar-6664cc788c
|
||||
spec:
|
||||
containers:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
imagePullPolicy: Always
|
||||
name: app-with-sidecar
|
||||
initContainers:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
imagePullPolicy: Always
|
||||
name: logshipper
|
||||
restartPolicy: Always
|
||||
nodeName: minikube
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
name: app-with-sidecar
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: '2024-10-08T08:39:43Z'
|
||||
initContainerStatuses:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
name: logshipper
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: '2024-10-08T08:39:40Z'
|
||||
phase: Running
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "2/2"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
func TestGetPodInfoWithInitialContainer(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: myapp-long-exist-56b7d8794d-
|
||||
labels:
|
||||
app: myapp-long-exist
|
||||
name: myapp-long-exist-56b7d8794d-pbgrd
|
||||
namespace: linghao
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
name: myapp-long-exist-56b7d8794d
|
||||
spec:
|
||||
containers:
|
||||
- image: alpine:latest
|
||||
imagePullPolicy: Always
|
||||
name: myapp-long-exist
|
||||
initContainers:
|
||||
- image: alpine:latest
|
||||
imagePullPolicy: Always
|
||||
name: myapp-long-exist-logshipper
|
||||
nodeName: minikube
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: alpine:latest
|
||||
name: myapp-long-exist
|
||||
ready: false
|
||||
restartCount: 0
|
||||
started: false
|
||||
state:
|
||||
waiting:
|
||||
reason: PodInitializing
|
||||
initContainerStatuses:
|
||||
- image: alpine:latest
|
||||
name: myapp-long-exist-logshipper
|
||||
ready: false
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: '2024-10-09T08:03:45Z'
|
||||
phase: Pending
|
||||
startTime: '2024-10-09T08:02:39Z'
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:0/1"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 2 restartable init containers, the first one running but not started.
|
||||
func TestGetPodInfoWithRestartableInitContainer(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test1
|
||||
spec:
|
||||
initContainers:
|
||||
- name: restartable-init-1
|
||||
restartPolicy: Always
|
||||
- name: restartable-init-2
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: Pending
|
||||
initContainerStatuses:
|
||||
- name: restartable-init-1
|
||||
ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
started: false
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
- name: restartable-init-2
|
||||
ready: false
|
||||
state:
|
||||
waiting: {}
|
||||
started: false
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
waiting: {}
|
||||
conditions:
|
||||
- type: ContainersReady
|
||||
status: "False"
|
||||
- type: Initialized
|
||||
status: "False"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:0/2"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 2 restartable init containers, the first one started and the second one running but not started.
|
||||
func TestGetPodInfoWithPartiallyStartedInitContainers(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test1
|
||||
spec:
|
||||
initContainers:
|
||||
- name: restartable-init-1
|
||||
restartPolicy: Always
|
||||
- name: restartable-init-2
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: Pending
|
||||
initContainerStatuses:
|
||||
- name: restartable-init-1
|
||||
ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
started: true
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
- name: restartable-init-2
|
||||
ready: false
|
||||
state:
|
||||
running: {}
|
||||
started: false
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
waiting: {}
|
||||
conditions:
|
||||
- type: ContainersReady
|
||||
status: "False"
|
||||
- type: Initialized
|
||||
status: "False"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:1/2"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 2 restartable init containers started and 1 container running
|
||||
func TestGetPodInfoWithStartedInitContainers(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test2
|
||||
spec:
|
||||
initContainers:
|
||||
- name: restartable-init-1
|
||||
restartPolicy: Always
|
||||
- name: restartable-init-2
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: Running
|
||||
initContainerStatuses:
|
||||
- name: restartable-init-1
|
||||
ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
started: true
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
- name: restartable-init-2
|
||||
ready: false
|
||||
state:
|
||||
running: {}
|
||||
started: true
|
||||
containerStatuses:
|
||||
- ready: true
|
||||
restartCount: 4
|
||||
state:
|
||||
running: {}
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
conditions:
|
||||
- type: ContainersReady
|
||||
status: "False"
|
||||
- type: Initialized
|
||||
status: "True"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/3"},
|
||||
{Name: "Restart Count", Value: "7"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 1 init container restarting and 1 container not running
|
||||
func TestGetPodInfoWithNormalInitContainer(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test7
|
||||
spec:
|
||||
initContainers:
|
||||
- name: init-container
|
||||
containers:
|
||||
- name: main-container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: podPhase
|
||||
initContainerStatuses:
|
||||
- ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with the actual time
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
waiting: {}
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:0/1"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition succeed
|
||||
func TestPodConditionSucceeded(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test8
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Succeeded
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Completed
|
||||
exitCode: 0
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Completed"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition failed
|
||||
func TestPodConditionFailed(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test9
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Failed
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Error
|
||||
exitCode: 1
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Error"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition succeed with deletion
|
||||
func TestPodConditionSucceededWithDeletion(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test10
|
||||
deletionTimestamp: "2023-10-01T00:00:00Z"
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Succeeded
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Completed
|
||||
exitCode: 0
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Completed"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition running with deletion
|
||||
func TestPodConditionRunningWithDeletion(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test11
|
||||
deletionTimestamp: "2023-10-01T00:00:00Z"
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Running
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
running: {}
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition pending with deletion
|
||||
func TestPodConditionPendingWithDeletion(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test12
|
||||
deletionTimestamp: "2023-10-01T00:00:00Z"
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Pending
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test PodScheduled condition with reason SchedulingGated
|
||||
func TestPodScheduledWithSchedulingGated(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test13
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container1
|
||||
- name: container2
|
||||
status:
|
||||
phase: podPhase
|
||||
conditions:
|
||||
- type: PodScheduled
|
||||
status: "False"
|
||||
reason: SchedulingGated
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "SchedulingGated"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/2"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
func TestGetNodeInfo(t *testing.T) {
|
||||
node := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
|
||||
@@ -44,6 +44,10 @@ const (
|
||||
// EnvVarSyncWaveDelay is an environment variable which controls the delay in seconds between
|
||||
// each sync-wave
|
||||
EnvVarSyncWaveDelay = "ARGOCD_SYNC_WAVE_DELAY"
|
||||
|
||||
// serviceAccountDisallowedCharSet contains the characters that are not allowed to be present
|
||||
// in a DefaultServiceAccount configured for a DestinationServiceAccount
|
||||
serviceAccountDisallowedCharSet = "!*[]{}\\/"
|
||||
)
|
||||
|
||||
func (m *appStateManager) getOpenAPISchema(server string) (openapi.Resources, error) {
|
||||
@@ -170,12 +174,18 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
|
||||
return
|
||||
} else if syncWindowPreventsSync(app, proj) {
|
||||
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
|
||||
if state.Phase == common.OperationRunning {
|
||||
state.Message = "Sync operation blocked by sync window"
|
||||
} else {
|
||||
isBlocked, err := syncWindowPreventsSync(app, proj)
|
||||
if isBlocked {
|
||||
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
|
||||
if state.Phase == common.OperationRunning {
|
||||
state.Message = "Sync operation blocked by sync window"
|
||||
if err != nil {
|
||||
state.Message = fmt.Sprintf("%s: %v", state.Message, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !isMultiSourceRevision {
|
||||
@@ -287,8 +297,13 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
|
||||
|
||||
if m.settingsMgr.IsImpersonationEnabled() {
|
||||
serviceAccountToImpersonate, err := deriveServiceAccountName(proj, app)
|
||||
impersonationEnabled, err := m.settingsMgr.IsImpersonationEnabled()
|
||||
if err != nil {
|
||||
log.Errorf("could not get impersonation feature flag: %v", err)
|
||||
return
|
||||
}
|
||||
if impersonationEnabled {
|
||||
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(proj, app)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("failed to find a matching service account to impersonate: %v", err)
|
||||
@@ -548,18 +563,23 @@ func delayBetweenSyncWaves(phase common.SyncPhase, wave int, finalWave bool) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) bool {
|
||||
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) (bool, error) {
|
||||
window := proj.Spec.SyncWindows.Matches(app)
|
||||
isManual := false
|
||||
if app.Status.OperationState != nil {
|
||||
isManual = !app.Status.OperationState.Operation.InitiatedBy.Automated
|
||||
}
|
||||
return !window.CanSync(isManual)
|
||||
canSync, err := window.CanSync(isManual)
|
||||
if err != nil {
|
||||
// prevents sync because sync window has an error
|
||||
return true, err
|
||||
}
|
||||
return !canSync, nil
|
||||
}
|
||||
|
||||
// deriveServiceAccountName determines the service account to be used for impersonation for the sync operation.
|
||||
// deriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
|
||||
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
|
||||
func deriveServiceAccountName(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
|
||||
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
|
||||
// spec.Destination.Namespace is optional. If not specified, use the Application's
|
||||
// namespace
|
||||
serviceAccountNamespace := application.Spec.Destination.Namespace
|
||||
@@ -569,10 +589,18 @@ func deriveServiceAccountName(project *v1alpha1.AppProject, application *v1alpha
|
||||
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
|
||||
// if so, return the service account specified for that destination.
|
||||
for _, item := range project.Spec.DestinationServiceAccounts {
|
||||
dstServerMatched := glob.Match(item.Server, application.Spec.Destination.Server)
|
||||
dstNamespaceMatched := glob.Match(item.Namespace, application.Spec.Destination.Namespace)
|
||||
dstServerMatched, err := glob.MatchWithError(item.Server, application.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
|
||||
}
|
||||
dstNamespaceMatched, err := glob.MatchWithError(item.Namespace, application.Spec.Destination.Namespace)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid glob pattern for destination namespace: %w", err)
|
||||
}
|
||||
if dstServerMatched && dstNamespaceMatched {
|
||||
if strings.Contains(item.DefaultServiceAccount, ":") {
|
||||
if strings.Trim(item.DefaultServiceAccount, " ") == "" || strings.ContainsAny(item.DefaultServiceAccount, serviceAccountDisallowedCharSet) {
|
||||
return "", fmt.Errorf("default service account contains invalid chars '%s'", item.DefaultServiceAccount)
|
||||
} else if strings.Contains(item.DefaultServiceAccount, ":") {
|
||||
// service account is specified along with its namespace.
|
||||
return fmt.Sprintf("system:serviceaccount:%s", item.DefaultServiceAccount), nil
|
||||
} else {
|
||||
|
||||
@@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -644,6 +646,771 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
}
|
||||
|
||||
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd-ns",
|
||||
Name: "testProj",
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
DestinationServiceAccounts: destinationServiceAccounts,
|
||||
},
|
||||
}
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: applicationNamespace,
|
||||
Name: "testApp",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "testProj",
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: destinationServerURL,
|
||||
Namespace: destinationNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("empty destination service accounts", func(t *testing.T) {
|
||||
// given an application referring a project with no destination service accounts
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should be an error saying no valid match was found
|
||||
assert.EqualError(t, err, expectedErrMsg)
|
||||
})
|
||||
|
||||
t.Run("exact match of destination namespace", func(t *testing.T) {
|
||||
// given an application referring a project with exactly one destination service account that matches the application destination,
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be no error and should use the right service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having one exact match for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook-test",
|
||||
DefaultServiceAccount: "guestbook-test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be no error and should use the right service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having multiple match for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-3",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be no error and it should use the first matching service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with glob patterns matching the application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and should use the first matching glob pattern service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("no match among a valid list", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with no matches for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test1",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test2",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be an error saying no match was found
|
||||
require.EqualError(t, err, expectedErrMsg)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("app destination namespace is empty", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with empty application destination namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
}
|
||||
destinationNamespace := ""
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:argocd-ns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the service account configured for with empty namespace should be used.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via catch all glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having a catch all glob pattern
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns1",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the catch all service account should be returned
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via invalid glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with a destination service account having an invalid glob pattern for namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "e[[a*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there must be an error as the glob pattern is invalid.
|
||||
require.ErrorContains(t, err, "invalid glob pattern for destination namespace")
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("sa specified with a namespace", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having a matching service account specified with its namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "myns:test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:myns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account with its namespace should be returned.
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
}
|
||||
|
||||
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd-ns",
|
||||
Name: "testProj",
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
DestinationServiceAccounts: destinationServiceAccounts,
|
||||
},
|
||||
}
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: applicationNamespace,
|
||||
Name: "testApp",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "testProj",
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: destinationServerURL,
|
||||
Namespace: destinationNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts and one exact match for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://abc.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://cde.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the right service account must be returned.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts and multiple matches for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and first matching service account should be used
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with a matching glob pattern and exact match
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account of the glob pattern, being the first match should be returned.
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("no match among a valid list", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with no match
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://abc.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://cde.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://xyz.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
expectedErr := "no matching service account found for destination server https://xyz.svc.local and namespace testns"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there an error with appropriate message must be returned
|
||||
require.EqualError(t, err, expectedErr)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via catch all glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with matching catch all glob pattern
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns1",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://localhost:6443"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the service account of the glob pattern match must be returned.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via invalid glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with a destination service account having an invalid glob pattern for server
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "e[[a*",
|
||||
Namespace: "test-ns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there must be an error as the glob pattern is invalid.
|
||||
require.ErrorContains(t, err, "invalid glob pattern for destination server")
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("sa specified with a namespace", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled and matching service account is prefixed with a namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://abc.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "myns:test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://abc.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:myns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the service account with the given namespace prefix must be returned.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncWithImpersonate(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
setup := func(impersonationEnabled bool, destinationNamespace, serviceAccountName string) *fixture {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = nil
|
||||
app.Status.History = nil
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
DestinationServiceAccounts: []v1alpha1.
|
||||
ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://localhost:6443",
|
||||
Namespace: destinationNamespace,
|
||||
DefaultServiceAccount: serviceAccountName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
additionalObjs := []runtime.Object{}
|
||||
if serviceAccountName != "" {
|
||||
syncServiceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Namespace: test.FakeDestNamespace,
|
||||
},
|
||||
}
|
||||
additionalObjs = append(additionalObjs, syncServiceAccount)
|
||||
}
|
||||
data := fakeData{
|
||||
apps: []runtime.Object{app, project},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: "https://localhost:6443",
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{},
|
||||
configMapData: map[string]string{
|
||||
"application.sync.impersonation.enabled": strconv.FormatBool(impersonationEnabled),
|
||||
},
|
||||
additionalObjs: additionalObjs,
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("sync with impersonation and no matching service account", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled with an application referring a project no matching service account
|
||||
f := setup(true, test.FakeArgoCDNamespace, "")
|
||||
opMessage := "failed to find a matching service account to impersonate: no matching service account found for destination server https://localhost:6443 and namespace fake-dest-ns"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then, app sync should fail with expected error message in operation state
|
||||
assert.Equal(t, common.OperationError, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
t.Run("sync with impersonation and empty service account match", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled with an application referring a project matching service account that is an empty string
|
||||
f := setup(true, test.FakeDestNamespace, "")
|
||||
opMessage := "failed to find a matching service account to impersonate: default service account contains invalid chars ''"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then app sync should fail with expected error message in operation state
|
||||
assert.Equal(t, common.OperationError, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
t.Run("sync with impersonation and matching sa", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled with an application referring a project matching service account
|
||||
f := setup(true, test.FakeDestNamespace, "test-sa")
|
||||
opMessage := "successfully synced (no more tasks)"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then app sync should not fail
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
t.Run("sync without impersonation", func(t *testing.T) {
|
||||
// given app sync impersonation feature is disabled with an application referring a project matching service account
|
||||
f := setup(false, test.FakeDestNamespace, "")
|
||||
opMessage := "successfully synced (no more tasks)"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then application sync should pass using the control plane service account
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
}
|
||||
|
||||
func dig[T any](obj interface{}, path []interface{}) T {
|
||||
i := obj
|
||||
|
||||
|
||||
@@ -32,23 +32,41 @@ function initializeVersionDropdown() {
|
||||
window[callbackName] = function(response) {
|
||||
const div = document.createElement('div');
|
||||
div.innerHTML = response.html;
|
||||
document.querySelector(".md-header__inner > .md-header__title").appendChild(div);
|
||||
const headerTitle = document.querySelector(".md-header__inner > .md-header__title");
|
||||
if (headerTitle) {
|
||||
headerTitle.appendChild(div);
|
||||
}
|
||||
|
||||
const container = div.querySelector('.rst-versions');
|
||||
if (!container) return; // Exit if container not found
|
||||
|
||||
// Add caret icon
|
||||
var caret = document.createElement('div');
|
||||
caret.innerHTML = "<i class='fa fa-caret-down dropdown-caret'></i>";
|
||||
caret.classList.add('dropdown-caret');
|
||||
div.querySelector('.rst-current-version').appendChild(caret);
|
||||
const currentVersionElem = div.querySelector('.rst-current-version');
|
||||
if (currentVersionElem) {
|
||||
currentVersionElem.appendChild(caret);
|
||||
}
|
||||
|
||||
div.querySelector('.rst-current-version').addEventListener('click', function() {
|
||||
container.classList.toggle('shift-up');
|
||||
});
|
||||
// Add click listener to toggle dropdown
|
||||
if (currentVersionElem && container) {
|
||||
currentVersionElem.addEventListener('click', function() {
|
||||
container.classList.toggle('shift-up');
|
||||
});
|
||||
}
|
||||
|
||||
// Sorting Logic
|
||||
sortVersionLinks(container);
|
||||
};
|
||||
|
||||
// Load CSS
|
||||
var CSSLink = document.createElement('link');
|
||||
CSSLink.rel = 'stylesheet';
|
||||
CSSLink.href = '/assets/versions.css';
|
||||
document.getElementsByTagName('head')[0].appendChild(CSSLink);
|
||||
|
||||
// Load JSONP Script
|
||||
var script = document.createElement('script');
|
||||
const currentVersion = getCurrentVersion();
|
||||
script.src = 'https://argo-cd.readthedocs.io/_/api/v2/footer_html/?' +
|
||||
@@ -56,6 +74,58 @@ function initializeVersionDropdown() {
|
||||
document.getElementsByTagName('head')[0].appendChild(script);
|
||||
}
|
||||
|
||||
// Function to sort version links
|
||||
function sortVersionLinks(container) {
|
||||
// Find all <dl> elements within the container
|
||||
const dlElements = container.querySelectorAll('dl');
|
||||
|
||||
dlElements.forEach(dl => {
|
||||
const dt = dl.querySelector('dt');
|
||||
if (dt && dt.textContent.trim().toLowerCase() === 'versions') {
|
||||
// Found the Versions <dl>
|
||||
const ddElements = Array.from(dl.querySelectorAll('dd'));
|
||||
|
||||
// Define sorting criteria
|
||||
ddElements.sort((a, b) => {
|
||||
const aText = a.textContent.trim().toLowerCase();
|
||||
const bText = b.textContent.trim().toLowerCase();
|
||||
|
||||
// Prioritize 'latest' and 'stable'
|
||||
if (aText === 'latest') return -1;
|
||||
if (bText === 'latest') return 1;
|
||||
if (aText === 'stable') return -1;
|
||||
if (bText === 'stable') return 1;
|
||||
|
||||
// Extract version numbers (e.g., release-2.9)
|
||||
const aVersionMatch = aText.match(/release-(\d+(\.\d+)*)/);
|
||||
const bVersionMatch = bText.match(/release-(\d+(\.\d+)*)/);
|
||||
|
||||
if (aVersionMatch && bVersionMatch) {
|
||||
const aVersion = aVersionMatch[1].split('.').map(Number);
|
||||
const bVersion = bVersionMatch[1].split('.').map(Number);
|
||||
|
||||
for (let i = 0; i < Math.max(aVersion.length, bVersion.length); i++) {
|
||||
const aNum = aVersion[i] || 0;
|
||||
const bNum = bVersion[i] || 0;
|
||||
if (aNum > bNum) return -1;
|
||||
if (aNum < bNum) return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Fallback to alphabetical order
|
||||
return aText.localeCompare(bText);
|
||||
});
|
||||
|
||||
// Remove existing <dd> elements
|
||||
ddElements.forEach(dd => dl.removeChild(dd));
|
||||
|
||||
// Append sorted <dd> elements
|
||||
ddElements.forEach(dd => dl.appendChild(dd));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// VERSION WARNINGS
|
||||
window.addEventListener("DOMContentLoaded", function() {
|
||||
var margin = 30;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Application Sync using impersonation
|
||||
|
||||
!!! warning "Alpha Feature"
|
||||
This is an experimental, alpha-quality feature that allows you to control the service account used for the sync operation. The configured service account, could have lesser privileges required for creating resources compared to the highly privileged access required for the control plane operations.
|
||||
This is an experimental, alpha-quality feature that allows you to control the service account used for the sync operation. The configured service account could have lesser privileges required for creating resources compared to the highly privileged access required for the control plane operations.
|
||||
|
||||
!!! warning
|
||||
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
|
||||
@@ -94,7 +94,7 @@ spec:
|
||||
sourceRepos:
|
||||
- '*'
|
||||
destinations:
|
||||
- *
|
||||
- '*'
|
||||
destinationServiceAccounts:
|
||||
- server: https://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
|
||||
@@ -329,14 +329,14 @@ data:
|
||||
# spread out the refreshes and give time to the repo-server to catch up. The jitter is the maximum duration that can be
|
||||
# added to the sync timeout. So, if the sync timeout is 3 minutes and the jitter is 1 minute, then the actual timeout will
|
||||
# be between 3 and 4 minutes. Disabled when the value is 0, defaults to 0.
|
||||
timeout.reconciliation.jitter: 0
|
||||
timeout.reconciliation.jitter: "0"
|
||||
|
||||
# cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default.
|
||||
cluster.inClusterEnabled: "true"
|
||||
|
||||
# The maximum number of pod logs to render in UI. If the application has more than this number of pods, the logs will not be rendered.
|
||||
# This is to prevent the UI from becoming unresponsive when rendering a large number of logs. Default is 10.
|
||||
server.maxPodLogsToRender: 10
|
||||
server.maxPodLogsToRender: "10"
|
||||
|
||||
# Application pod logs RBAC enforcement enables control over who can and who can't view application pod logs.
|
||||
# When you enable the switch, pod logs will be visible only to admin role by default. Other roles/users will not be able to view them via cli and UI.
|
||||
@@ -425,7 +425,7 @@ data:
|
||||
name: some-cluster
|
||||
server: https://some-cluster
|
||||
# The maximum size of the payload that can be sent to the webhook server.
|
||||
webhook.maxPayloadSizeMB: 1024
|
||||
webhook.maxPayloadSizeMB: "1024"
|
||||
|
||||
# application.sync.impersonation.enabled indicates whether the application sync can be decoupled from control plane service account using impersonation.
|
||||
# application.sync.impersonation.enabled enables application sync to use a custom service account, via impersonation. This allows decoupling sync from control-plane service account.
|
||||
application.sync.impersonation.enabled: "false"
|
||||
|
||||
@@ -98,20 +98,27 @@ data:
|
||||
return hs
|
||||
```
|
||||
|
||||
In order to prevent duplication of the custom health check for potentially multiple resources, it is also possible to specify a wildcard in the resource kind, and anywhere in the resource group, like this:
|
||||
In order to prevent duplication of custom health checks for potentially multiple resources, it is also possible to
|
||||
specify a wildcard in the resource kind, and anywhere in the resource group, like this:
|
||||
|
||||
```yaml
|
||||
resource.customizations.health.ec2.aws.crossplane.io_*: |
|
||||
...
|
||||
resource.customizations: |
|
||||
ec2.aws.crossplane.io/*:
|
||||
health.lua: |
|
||||
...
|
||||
```
|
||||
|
||||
```yaml
|
||||
resource.customizations.health.*.aws.crossplane.io_*: |
|
||||
...
|
||||
# If a key _begins_ with a wildcard, please ensure that the GVK key is quoted.
|
||||
resource.customizations: |
|
||||
"*.aws.crossplane.io/*":
|
||||
health.lua: |
|
||||
...
|
||||
```
|
||||
|
||||
!!!important
|
||||
Please, note that there can be ambiguous resolution of wildcards, see [#16905](https://github.com/argoproj/argo-cd/issues/16905)
|
||||
Please, note that wildcards are only supported when using the `resource.customizations` key, the `resource.customizations.health.<group>_<kind>`
|
||||
style keys do not work since wildcards (`*`) are not supported in Kubernetes configmap keys.
|
||||
|
||||
The `obj` is a global variable which contains the resource. The script must return an object with status and optional message field.
|
||||
The custom health check might return one of the following health statuses:
|
||||
@@ -121,7 +128,7 @@ The custom health check might return one of the following health statuses:
|
||||
* `Degraded` - the resource is degraded
|
||||
* `Suspended` - the resource is suspended and waiting for some external event to resume (e.g. suspended CronJob or paused Deployment)
|
||||
|
||||
By default health typically returns `Progressing` status.
|
||||
By default, health typically returns a `Progressing` status.
|
||||
|
||||
NOTE: As a security measure, access to the standard Lua libraries will be disabled by default. Admins can control access by
|
||||
setting `resource.customizations.useOpenLibs.<group>_<kind>`. In the following example, standard libraries are enabled for health check of `cert-manager.io/Certificate`.
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
|
||||
version.
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 2.13 | v1.30, v1.29, v1.28, v1.27 |
|
||||
| 2.12 | v1.29, v1.28, v1.27, v1.26 |
|
||||
| 2.11 | v1.29, v1.28, v1.27, v1.26, v1.25 |
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# v2.12 to 2.13
|
||||
|
||||
## Upgraded Helm Version
|
||||
|
||||
Note that bundled Helm version has been upgraded from 3.15.2 to 3.15.4.
|
||||
|
||||
## Custom Resource Actions for Flux Resources
|
||||
|
||||
[`Custom Resource Actions`](../resource_actions.md#Custom-Resource-Actions) have been added for Flux Resources.
|
||||
|
||||
@@ -68,9 +68,9 @@ This proposal would allow ArgoCD administrators to manage the cluster permission
|
||||
|
||||
### Goals
|
||||
- Applications may only impersonate ServiceAccounts that live in the same namespace as the destination namespace configured in the application.If the service account is created in a different namespace, then the user can provide the service account name in the format `<namespace>:<service_account_name>` . ServiceAccount to be used for syncing each application is determined by the target destination configured in the `AppProject` associated with the `Application`.
|
||||
- If impersonation feature is enabled, and no service account name is provided in the associated `AppProject`, then the sync operation would fail with an appropriate error message. Users can configure a catch all service account matching all destinations to avoid such sync errors.
|
||||
- If impersonation feature is enabled, and no service account name is provided in the associated `AppProject`, then the default service account of the destination namespace of the `Application` should be used.
|
||||
- Access restrictions implemented through properties in AppProject (if done) must have the existing behavior. From a security standpoint, any restrictions that were available before switching to a service account based approach should continue to exist even when the impersonation feature is enabled.
|
||||
- The feature can be enabled/disabled only at the system level. Once enabled/disabled, it is applicable to all ArgoCD `Applications`.
|
||||
- The feature can be enabled/disabled only at the system level. Once enabled/disabled, it is applicable to all Argo CD `Applications`.
|
||||
|
||||
### Non-Goals
|
||||
|
||||
@@ -82,7 +82,7 @@ As part of this proposal, it would be possible for an ArgoCD Admin to specify a
|
||||
|
||||
When applications gets synced, based on its destination (target cluster and namespace combination), the `defaultServiceAccount` configured in the `AppProject` will be selected and used for impersonation when executing the kubectl commands for the sync operation.
|
||||
|
||||
We would be introducing a new element `destinationServiceAccounts` in `AppProject.spec`. This element is used for the sole purpose of specifying the impersonation configuration. The `defaultServiceAccount` configured for the `AppProject` would be used for the sync operation for a particular destination cluster and namespace. If impersonation feature is enabled and no specific service account is provided in the `AppProject` CR, then the sync operation will fail with an error. Users can configure a catch all service account matching all destinations to avoid such sync errors.
|
||||
We would be introducing a new element `destinationServiceAccounts` in `AppProject.spec`. This element is used for the sole purpose of specifying the impersonation configuration. The `defaultServiceAccount` configured for the `AppProject` would be used for the sync operation for a particular destination cluster and namespace. If impersonation feature is enabled and no specific service account is provided in the `AppProject` CR, then the `default` service account in the destination namespace would be used for impersonation.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -109,7 +109,7 @@ spec:
|
||||
- server: https://kubernetes.default.svc
|
||||
namespace: guestbook-stage
|
||||
defaultServiceAccount: guestbook-stage-deployer
|
||||
- server: '*
|
||||
- server: '*'
|
||||
namespace: '*'
|
||||
defaultServiceAccount: default # catch all service account to be used when all other matches fail.
|
||||
```
|
||||
@@ -161,7 +161,10 @@ So that, I can use a generic convention of naming service accounts and avoid ass
|
||||
|
||||
#### Component: ArgoCD Application Controller
|
||||
|
||||
- Provide a configuration in `argocd-cm` which can be modified to enable the Impersonation feature. Set `application.sync.impersonation.enabled: "true"` in the Argo CD ConfigMap. Default value of `application.sync.impersonation.enabled` would be `"false"` and user has to explicitly override it to use this feature.
|
||||
- Provide a configuration in `argocd-cm` which can be modified to enable the Impersonation feature. Set `applicationcontroller.enable.impersonation: true` in the Argo CD ConfigMap. Default value of `applicationcontroller.enable.impersonation` would be `false` and user has to explicitly override it to use this feature.
|
||||
- Provide an option to override the Impersonation feature using environment variables.
|
||||
Set `ARGOCD_APPLICATION_CONTROLLER_ENABLE_IMPERSONATION=true` in the Application controller environment variables. Default value of the environment variable must be `false` and user has to explicitly set it to `true` to use this feature.
|
||||
- Provide an option to enable this feature using a command line flag `--enable-impersonation`. This new argument option needs to be added to the Application controller args.
|
||||
- Fix Application Controller `sync.go` to set the Impersonate configuration from the AppProject CR to the `SyncContext` Object (rawConfig and restConfig field, need to understand which config is used for the actual sync and if both configs need to be impersonated.)
|
||||
|
||||
#### Component: ArgoCD UI
|
||||
|
||||
@@ -30,6 +30,7 @@ argocd admin proj generate-spec PROJECT [flags]
|
||||
--deny-namespaced-resource stringArray List of denied namespaced resources
|
||||
--description string Project description
|
||||
-d, --dest stringArray Permitted destination server and namespace (e.g. https://192.168.99.100:8443,default)
|
||||
--dest-service-accounts stringArray Destination server, namespace and target service account (e.g. https://192.168.99.100:8443,default,default-sa)
|
||||
-f, --file string Filename or URL to Kubernetes manifests for the project
|
||||
-h, --help help for generate-spec
|
||||
-i, --inline If set then generated resource is written back to the file specified in --file flag
|
||||
|
||||
1
docs/user-guide/commands/argocd_proj_create.md
generated
1
docs/user-guide/commands/argocd_proj_create.md
generated
@@ -27,6 +27,7 @@ argocd proj create PROJECT [flags]
|
||||
--deny-namespaced-resource stringArray List of denied namespaced resources
|
||||
--description string Project description
|
||||
-d, --dest stringArray Permitted destination server and namespace (e.g. https://192.168.99.100:8443,default)
|
||||
--dest-service-accounts stringArray Destination server, namespace and target service account (e.g. https://192.168.99.100:8443,default,default-sa)
|
||||
-f, --file string Filename or URL to Kubernetes manifests for the project
|
||||
-h, --help help for create
|
||||
--orphaned-resources Enables orphaned resources monitoring
|
||||
|
||||
1
docs/user-guide/commands/argocd_proj_set.md
generated
1
docs/user-guide/commands/argocd_proj_set.md
generated
@@ -27,6 +27,7 @@ argocd proj set PROJECT [flags]
|
||||
--deny-namespaced-resource stringArray List of denied namespaced resources
|
||||
--description string Project description
|
||||
-d, --dest stringArray Permitted destination server and namespace (e.g. https://192.168.99.100:8443,default)
|
||||
--dest-service-accounts stringArray Destination server, namespace and target service account (e.g. https://192.168.99.100:8443,default,default-sa)
|
||||
-h, --help help for set
|
||||
--orphaned-resources Enables orphaned resources monitoring
|
||||
--orphaned-resources-warn Specifies if applications should have a warning condition when orphaned resources detected
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
1bc3f354f7ce4d7fd9cfa5bcc701c1f32c88d27076d96c2792d5b5226062aee5 helm-v3.15.4-darwin-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
88115846a1fb58f8eb8f64fec5c343d95ca394f1be811602fa54a887c98730ac helm-v3.15.4-darwin-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
11400fecfc07fd6f034863e4e0c4c4445594673fd2a129e701fe41f31170cfa9 helm-v3.15.4-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
fa419ecb139442e8a594c242343fafb7a46af3af34041c4eac1efcc49d74e626 helm-v3.15.4-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
e4efce93723f52dd858e9046ea836c9c75f346facce1b87b8cf78c817b97e6ac helm-v3.15.4-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
c6e0cdea598196895ac7b627ce972699ef9f06b0eba51dc4db7cc21b3369f24a helm-v3.15.4-linux-s390x.tar.gz
|
||||
@@ -11,7 +11,7 @@
|
||||
# Use ./hack/installers/checksums/add-helm-checksums.sh and
|
||||
# add-kustomize-checksums.sh to help download checksums.
|
||||
###############################################################################
|
||||
helm3_version=3.15.2
|
||||
helm3_version=3.15.4
|
||||
kubectl_version=1.17.8
|
||||
kubectx_version=0.6.3
|
||||
kustomize5_version=5.4.3
|
||||
|
||||
6
hack/update-supported-versions.sh
Normal file → Executable file
6
hack/update-supported-versions.sh
Normal file → Executable file
@@ -11,7 +11,11 @@ for n in 0 1 2; do
|
||||
minor_version_num=$((argocd_minor_version_num - n))
|
||||
minor_version="${argocd_major_version_num}.${minor_version_num}"
|
||||
git checkout "release-$minor_version" > /dev/null || exit 1
|
||||
line=$(yq '.jobs["test-e2e"].strategy.matrix["k3s-version"][]' .github/workflows/ci-build.yaml | \
|
||||
|
||||
line=$(yq '.jobs["test-e2e"].strategy.matrix |
|
||||
# k3s-version was an array prior to 2.12. This checks for the old format first and then falls back to the new format.
|
||||
(.["k3s-version"] // (.k3s | map(.version))) |
|
||||
.[]' .github/workflows/ci-build.yaml | \
|
||||
jq --arg minor_version "$minor_version" --raw-input --slurp --raw-output \
|
||||
'split("\n")[:-1] | map(sub("\\.[0-9]+$"; "")) | join(", ") | "| \($minor_version) | \(.) |"')
|
||||
out+="$line\n"
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.13.0-rc4
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -15,6 +15,7 @@ rules:
|
||||
- delete # supports deletion a live object in UI
|
||||
- get # supports viewing live object manifest in UI
|
||||
- patch # supports `argocd app patch`
|
||||
- list # supports `argocd appset generate` with cluster generator
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
15
manifests/core-install.yaml
generated
15
manifests/core-install.yaml
generated
@@ -21735,7 +21735,7 @@ spec:
|
||||
sync operation.
|
||||
properties:
|
||||
defaultServiceAccount:
|
||||
description: ServiceAccountName to be used for impersonation
|
||||
description: DefaultServiceAccount to be used for impersonation
|
||||
during the sync operation
|
||||
type: string
|
||||
namespace:
|
||||
@@ -21746,6 +21746,9 @@ spec:
|
||||
description: Server specifies the URL of the target cluster's
|
||||
Kubernetes control plane API.
|
||||
type: string
|
||||
required:
|
||||
- defaultServiceAccount
|
||||
- server
|
||||
type: object
|
||||
type: array
|
||||
destinations:
|
||||
@@ -22558,7 +22561,7 @@ spec:
|
||||
key: applicationsetcontroller.webhook.parallelism.limit
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -22676,7 +22679,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -22929,7 +22932,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -22981,7 +22984,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -23253,7 +23256,7 @@ spec:
|
||||
key: controller.ignore.normalizer.jq.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.13.0-rc4
|
||||
|
||||
5
manifests/crds/appproject-crd.yaml
generated
5
manifests/crds/appproject-crd.yaml
generated
@@ -95,7 +95,7 @@ spec:
|
||||
sync operation.
|
||||
properties:
|
||||
defaultServiceAccount:
|
||||
description: ServiceAccountName to be used for impersonation
|
||||
description: DefaultServiceAccount to be used for impersonation
|
||||
during the sync operation
|
||||
type: string
|
||||
namespace:
|
||||
@@ -106,6 +106,9 @@ spec:
|
||||
description: Server specifies the URL of the target cluster's
|
||||
Kubernetes control plane API.
|
||||
type: string
|
||||
required:
|
||||
- defaultServiceAccount
|
||||
- server
|
||||
type: object
|
||||
type: array
|
||||
destinations:
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.13.0-rc4
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
22
manifests/ha/install.yaml
generated
22
manifests/ha/install.yaml
generated
@@ -21735,7 +21735,7 @@ spec:
|
||||
sync operation.
|
||||
properties:
|
||||
defaultServiceAccount:
|
||||
description: ServiceAccountName to be used for impersonation
|
||||
description: DefaultServiceAccount to be used for impersonation
|
||||
during the sync operation
|
||||
type: string
|
||||
namespace:
|
||||
@@ -21746,6 +21746,9 @@ spec:
|
||||
description: Server specifies the URL of the target cluster's
|
||||
Kubernetes control plane API.
|
||||
type: string
|
||||
required:
|
||||
- defaultServiceAccount
|
||||
- server
|
||||
type: object
|
||||
type: array
|
||||
destinations:
|
||||
@@ -22463,6 +22466,7 @@ rules:
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -23901,7 +23905,7 @@ spec:
|
||||
key: applicationsetcontroller.webhook.parallelism.limit
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -24036,7 +24040,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -24124,7 +24128,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -24243,7 +24247,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -24524,7 +24528,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -24576,7 +24580,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -24930,7 +24934,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -25238,7 +25242,7 @@ spec:
|
||||
key: controller.ignore.normalizer.jq.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/namespace-install.yaml
generated
16
manifests/ha/namespace-install.yaml
generated
@@ -1694,7 +1694,7 @@ spec:
|
||||
key: applicationsetcontroller.webhook.parallelism.limit
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1829,7 +1829,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1917,7 +1917,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2036,7 +2036,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2317,7 +2317,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2369,7 +2369,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2723,7 +2723,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3031,7 +3031,7 @@ spec:
|
||||
key: controller.ignore.normalizer.jq.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
22
manifests/install.yaml
generated
22
manifests/install.yaml
generated
@@ -21735,7 +21735,7 @@ spec:
|
||||
sync operation.
|
||||
properties:
|
||||
defaultServiceAccount:
|
||||
description: ServiceAccountName to be used for impersonation
|
||||
description: DefaultServiceAccount to be used for impersonation
|
||||
during the sync operation
|
||||
type: string
|
||||
namespace:
|
||||
@@ -21746,6 +21746,9 @@ spec:
|
||||
description: Server specifies the URL of the target cluster's
|
||||
Kubernetes control plane API.
|
||||
type: string
|
||||
required:
|
||||
- defaultServiceAccount
|
||||
- server
|
||||
type: object
|
||||
type: array
|
||||
destinations:
|
||||
@@ -22430,6 +22433,7 @@ rules:
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -23018,7 +23022,7 @@ spec:
|
||||
key: applicationsetcontroller.webhook.parallelism.limit
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -23153,7 +23157,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -23241,7 +23245,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -23341,7 +23345,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -23594,7 +23598,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -23646,7 +23650,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -23998,7 +24002,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -24306,7 +24310,7 @@ spec:
|
||||
key: controller.ignore.normalizer.jq.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/namespace-install.yaml
generated
16
manifests/namespace-install.yaml
generated
@@ -811,7 +811,7 @@ spec:
|
||||
key: applicationsetcontroller.webhook.parallelism.limit
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -946,7 +946,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1034,7 +1034,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1134,7 +1134,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1387,7 +1387,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1439,7 +1439,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1791,7 +1791,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2099,7 +2099,7 @@ spec:
|
||||
key: controller.ignore.normalizer.jq.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.13.0-rc4
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -6,15 +6,22 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
|
||||
globutil "github.com/gobwas/glob"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
)
|
||||
|
||||
const (
|
||||
// serviceAccountDisallowedCharSet contains the characters that are not allowed to be present
|
||||
// in a DefaultServiceAccount configured for a DestinationServiceAccount
|
||||
serviceAccountDisallowedCharSet = "!*[]{}\\/"
|
||||
)
|
||||
|
||||
type ErrApplicationNotAllowedToUseProject struct {
|
||||
@@ -267,12 +274,27 @@ func (p *AppProject) ValidateProject() error {
|
||||
|
||||
destServiceAccts := make(map[string]bool)
|
||||
for _, destServiceAcct := range p.Spec.DestinationServiceAccounts {
|
||||
if destServiceAcct.Server == "!*" {
|
||||
return status.Errorf(codes.InvalidArgument, "server has an invalid format, '!*'")
|
||||
if strings.Contains(destServiceAcct.Server, "!") {
|
||||
return status.Errorf(codes.InvalidArgument, "server has an invalid format, '%s'", destServiceAcct.Server)
|
||||
}
|
||||
|
||||
if destServiceAcct.Namespace == "!*" {
|
||||
return status.Errorf(codes.InvalidArgument, "namespace has an invalid format, '!*'")
|
||||
if strings.Contains(destServiceAcct.Namespace, "!") {
|
||||
return status.Errorf(codes.InvalidArgument, "namespace has an invalid format, '%s'", destServiceAcct.Namespace)
|
||||
}
|
||||
|
||||
if strings.Trim(destServiceAcct.DefaultServiceAccount, " ") == "" ||
|
||||
strings.ContainsAny(destServiceAcct.DefaultServiceAccount, serviceAccountDisallowedCharSet) {
|
||||
return status.Errorf(codes.InvalidArgument, "defaultServiceAccount has an invalid format, '%s'", destServiceAcct.DefaultServiceAccount)
|
||||
}
|
||||
|
||||
_, err := globutil.Compile(destServiceAcct.Server)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "server has an invalid format, '%s'", destServiceAcct.Server)
|
||||
}
|
||||
|
||||
_, err = globutil.Compile(destServiceAcct.Namespace)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "namespace has an invalid format, '%s'", destServiceAcct.Namespace)
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s/%s", destServiceAcct.Server, destServiceAcct.Namespace)
|
||||
|
||||
@@ -156,7 +156,7 @@ message ApplicationDestinationServiceAccount {
|
||||
// Namespace specifies the target namespace for the application's resources.
|
||||
optional string namespace = 2;
|
||||
|
||||
// ServiceAccountName to be used for impersonation during the sync operation
|
||||
// DefaultServiceAccount to be used for impersonation during the sync operation
|
||||
optional string defaultServiceAccount = 3;
|
||||
}
|
||||
|
||||
|
||||
@@ -2379,11 +2379,11 @@ func (s *SyncWindows) HasWindows() bool {
|
||||
}
|
||||
|
||||
// Active returns a list of sync windows that are currently active
|
||||
func (s *SyncWindows) Active() *SyncWindows {
|
||||
func (s *SyncWindows) Active() (*SyncWindows, error) {
|
||||
return s.active(time.Now())
|
||||
}
|
||||
|
||||
func (s *SyncWindows) active(currentTime time.Time) *SyncWindows {
|
||||
func (s *SyncWindows) active(currentTime time.Time) (*SyncWindows, error) {
|
||||
// If SyncWindows.Active() is called outside of a UTC locale, it should be
|
||||
// first converted to UTC before we scan through the SyncWindows.
|
||||
currentTime = currentTime.In(time.UTC)
|
||||
@@ -2392,8 +2392,14 @@ func (s *SyncWindows) active(currentTime time.Time) *SyncWindows {
|
||||
var active SyncWindows
|
||||
specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
|
||||
for _, w := range *s {
|
||||
schedule, _ := specParser.Parse(w.Schedule)
|
||||
duration, _ := time.ParseDuration(w.Duration)
|
||||
schedule, sErr := specParser.Parse(w.Schedule)
|
||||
if sErr != nil {
|
||||
return nil, fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, sErr)
|
||||
}
|
||||
duration, dErr := time.ParseDuration(w.Duration)
|
||||
if dErr != nil {
|
||||
return nil, fmt.Errorf("cannot parse duration '%s': %w", w.Duration, dErr)
|
||||
}
|
||||
|
||||
// Offset the nextWindow time to consider the timeZone of the sync window
|
||||
timeZoneOffsetDuration := w.scheduleOffsetByTimeZone()
|
||||
@@ -2403,20 +2409,20 @@ func (s *SyncWindows) active(currentTime time.Time) *SyncWindows {
|
||||
}
|
||||
}
|
||||
if len(active) > 0 {
|
||||
return &active
|
||||
return &active, nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// InactiveAllows will iterate over the SyncWindows and return all inactive allow windows
|
||||
// for the current time. If the current time is in an inactive allow window, syncs will
|
||||
// be denied.
|
||||
func (s *SyncWindows) InactiveAllows() *SyncWindows {
|
||||
func (s *SyncWindows) InactiveAllows() (*SyncWindows, error) {
|
||||
return s.inactiveAllows(time.Now())
|
||||
}
|
||||
|
||||
func (s *SyncWindows) inactiveAllows(currentTime time.Time) *SyncWindows {
|
||||
func (s *SyncWindows) inactiveAllows(currentTime time.Time) (*SyncWindows, error) {
|
||||
// If SyncWindows.InactiveAllows() is called outside of a UTC locale, it should be
|
||||
// first converted to UTC before we scan through the SyncWindows.
|
||||
currentTime = currentTime.In(time.UTC)
|
||||
@@ -2427,21 +2433,27 @@ func (s *SyncWindows) inactiveAllows(currentTime time.Time) *SyncWindows {
|
||||
for _, w := range *s {
|
||||
if w.Kind == "allow" {
|
||||
schedule, sErr := specParser.Parse(w.Schedule)
|
||||
if sErr != nil {
|
||||
return nil, fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, sErr)
|
||||
}
|
||||
duration, dErr := time.ParseDuration(w.Duration)
|
||||
if dErr != nil {
|
||||
return nil, fmt.Errorf("cannot parse duration '%s': %w", w.Duration, dErr)
|
||||
}
|
||||
// Offset the nextWindow time to consider the timeZone of the sync window
|
||||
timeZoneOffsetDuration := w.scheduleOffsetByTimeZone()
|
||||
nextWindow := schedule.Next(currentTime.Add(timeZoneOffsetDuration - duration))
|
||||
|
||||
if !nextWindow.Before(currentTime.Add(timeZoneOffsetDuration)) && sErr == nil && dErr == nil {
|
||||
if !nextWindow.Before(currentTime.Add(timeZoneOffsetDuration)) {
|
||||
inactive = append(inactive, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(inactive) > 0 {
|
||||
return &inactive
|
||||
return &inactive, nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (w *SyncWindow) scheduleOffsetByTimeZone() time.Duration {
|
||||
@@ -2545,36 +2557,42 @@ func (w *SyncWindows) Matches(app *Application) *SyncWindows {
|
||||
}
|
||||
|
||||
// CanSync returns true if a sync window currently allows a sync. isManual indicates whether the sync has been triggered manually.
|
||||
func (w *SyncWindows) CanSync(isManual bool) bool {
|
||||
func (w *SyncWindows) CanSync(isManual bool) (bool, error) {
|
||||
if !w.HasWindows() {
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
active := w.Active()
|
||||
active, err := w.Active()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid sync windows: %w", err)
|
||||
}
|
||||
hasActiveDeny, manualEnabled := active.hasDeny()
|
||||
|
||||
if hasActiveDeny {
|
||||
if isManual && manualEnabled {
|
||||
return true
|
||||
return true, nil
|
||||
} else {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if active.hasAllow() {
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
inactiveAllows := w.InactiveAllows()
|
||||
inactiveAllows, err := w.InactiveAllows()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid sync windows: %w", err)
|
||||
}
|
||||
if inactiveAllows.HasWindows() {
|
||||
if isManual && inactiveAllows.manualEnabled() {
|
||||
return true
|
||||
return true, nil
|
||||
} else {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// hasDeny will iterate over the SyncWindows and return if a deny window is found and if
|
||||
@@ -2629,24 +2647,30 @@ func (w *SyncWindows) manualEnabled() bool {
|
||||
}
|
||||
|
||||
// Active returns true if the sync window is currently active
|
||||
func (w SyncWindow) Active() bool {
|
||||
func (w SyncWindow) Active() (bool, error) {
|
||||
return w.active(time.Now())
|
||||
}
|
||||
|
||||
func (w SyncWindow) active(currentTime time.Time) bool {
|
||||
func (w SyncWindow) active(currentTime time.Time) (bool, error) {
|
||||
// If SyncWindow.Active() is called outside of a UTC locale, it should be
|
||||
// first converted to UTC before search
|
||||
currentTime = currentTime.UTC()
|
||||
|
||||
specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
|
||||
schedule, _ := specParser.Parse(w.Schedule)
|
||||
duration, _ := time.ParseDuration(w.Duration)
|
||||
schedule, sErr := specParser.Parse(w.Schedule)
|
||||
if sErr != nil {
|
||||
return false, fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, sErr)
|
||||
}
|
||||
duration, dErr := time.ParseDuration(w.Duration)
|
||||
if dErr != nil {
|
||||
return false, fmt.Errorf("cannot parse duration '%s': %w", w.Duration, dErr)
|
||||
}
|
||||
|
||||
// Offset the nextWindow time to consider the timeZone of the sync window
|
||||
timeZoneOffsetDuration := w.scheduleOffsetByTimeZone()
|
||||
nextWindow := schedule.Next(currentTime.Add(timeZoneOffsetDuration - duration))
|
||||
|
||||
return nextWindow.Before(currentTime.Add(timeZoneOffsetDuration))
|
||||
return nextWindow.Before(currentTime.Add(timeZoneOffsetDuration)), nil
|
||||
}
|
||||
|
||||
// Update updates a sync window's settings with the given parameter
|
||||
@@ -2767,11 +2791,11 @@ type KustomizeOptions struct {
|
||||
// ApplicationDestinationServiceAccount holds information about the service account to be impersonated for the application sync operation.
|
||||
type ApplicationDestinationServiceAccount struct {
|
||||
// Server specifies the URL of the target cluster's Kubernetes control plane API.
|
||||
Server string `json:"server,omitempty" protobuf:"bytes,1,opt,name=server"`
|
||||
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
|
||||
// Namespace specifies the target namespace for the application's resources.
|
||||
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
|
||||
// ServiceAccountName to be used for impersonation during the sync operation
|
||||
DefaultServiceAccount string `json:"defaultServiceAccount,omitempty" protobuf:"bytes,3,opt,name=defaultServiceAccount"`
|
||||
// DefaultServiceAccount to be used for impersonation during the sync operation
|
||||
DefaultServiceAccount string `json:"defaultServiceAccount" protobuf:"bytes,3,opt,name=defaultServiceAccount"`
|
||||
}
|
||||
|
||||
// CascadedDeletion indicates if the deletion finalizer is set and controller should delete the application and it's cascaded resources
|
||||
|
||||
@@ -1778,7 +1778,9 @@ func TestSyncWindows_HasWindows(t *testing.T) {
|
||||
func TestSyncWindows_Active(t *testing.T) {
|
||||
t.Run("WithTestProject", func(t *testing.T) {
|
||||
proj := newTestProjectWithSyncWindows()
|
||||
assert.Len(t, *proj.Spec.SyncWindows.Active(), 1)
|
||||
activeWindows, err := proj.Spec.SyncWindows.Active()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, *activeWindows, 1)
|
||||
})
|
||||
|
||||
syncWindow := func(kind string, schedule string, duration string, timeZone string) *SyncWindow {
|
||||
@@ -1805,6 +1807,7 @@ func TestSyncWindows_Active(t *testing.T) {
|
||||
currentTime time.Time
|
||||
matchingIndex int
|
||||
expectedLength int
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "MatchFirst",
|
||||
@@ -1912,11 +1915,36 @@ func TestSyncWindows_Active(t *testing.T) {
|
||||
matchingIndex: 0,
|
||||
expectedLength: 1,
|
||||
},
|
||||
{
|
||||
name: "MatchNone-InvalidSchedule",
|
||||
syncWindow: SyncWindows{
|
||||
syncWindow("allow", "* 10 * * 7", "3h", ""),
|
||||
syncWindow("allow", "* 11 * * 7", "3h", ""),
|
||||
},
|
||||
currentTime: timeWithHour(12, time.UTC),
|
||||
expectedLength: 0,
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "MatchNone-InvalidDuration",
|
||||
syncWindow: SyncWindows{
|
||||
syncWindow("allow", "* 10 * * *", "3a", ""),
|
||||
syncWindow("allow", "* 11 * * *", "3a", ""),
|
||||
},
|
||||
currentTime: timeWithHour(12, time.UTC),
|
||||
expectedLength: 0,
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.syncWindow.active(tt.currentTime)
|
||||
result, err := tt.syncWindow.active(tt.currentTime)
|
||||
if tt.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if result == nil {
|
||||
result = &SyncWindows{}
|
||||
}
|
||||
@@ -1933,7 +1961,9 @@ func TestSyncWindows_InactiveAllows(t *testing.T) {
|
||||
t.Run("WithTestProject", func(t *testing.T) {
|
||||
proj := newTestProjectWithSyncWindows()
|
||||
proj.Spec.SyncWindows[0].Schedule = "0 0 1 1 1"
|
||||
assert.Len(t, *proj.Spec.SyncWindows.InactiveAllows(), 1)
|
||||
inactiveAllowWindows, err := proj.Spec.SyncWindows.InactiveAllows()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, *inactiveAllowWindows, 1)
|
||||
})
|
||||
|
||||
syncWindow := func(kind string, schedule string, duration string, timeZone string) *SyncWindow {
|
||||
@@ -1960,6 +1990,7 @@ func TestSyncWindows_InactiveAllows(t *testing.T) {
|
||||
currentTime time.Time
|
||||
matchingIndex int
|
||||
expectedLength int
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "MatchFirst",
|
||||
@@ -2085,11 +2116,34 @@ func TestSyncWindows_InactiveAllows(t *testing.T) {
|
||||
matchingIndex: 0,
|
||||
expectedLength: 1,
|
||||
},
|
||||
{
|
||||
name: "MatchNone-InvalidSchedule",
|
||||
syncWindow: SyncWindows{
|
||||
syncWindow("allow", "* 10 * * 7", "2h", ""),
|
||||
},
|
||||
currentTime: timeWithHour(17, time.UTC),
|
||||
expectedLength: 0,
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "MatchNone-InvalidDuration",
|
||||
syncWindow: SyncWindows{
|
||||
syncWindow("allow", "* 10 * * *", "2a", ""),
|
||||
},
|
||||
currentTime: timeWithHour(17, time.UTC),
|
||||
expectedLength: 0,
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.syncWindow.inactiveAllows(tt.currentTime)
|
||||
result, err := tt.syncWindow.inactiveAllows(tt.currentTime)
|
||||
if tt.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if result == nil {
|
||||
result = &SyncWindows{}
|
||||
}
|
||||
@@ -2200,9 +2254,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
proj := newProjectBuilder().withInactiveDenyWindow(true).build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will allow manual sync if inactive-deny-window set with manual false", func(t *testing.T) {
|
||||
@@ -2211,9 +2266,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
proj := newProjectBuilder().withInactiveDenyWindow(false).build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will deny manual sync if one inactive-allow-windows set with manual false", func(t *testing.T) {
|
||||
@@ -2225,9 +2281,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will allow manual sync if on active-allow-window set with manual true", func(t *testing.T) {
|
||||
@@ -2238,9 +2295,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will allow manual sync if on active-allow-window set with manual false", func(t *testing.T) {
|
||||
@@ -2251,9 +2309,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will allow auto sync if on active-allow-window", func(t *testing.T) {
|
||||
@@ -2264,9 +2323,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will allow manual sync active-allow and inactive-deny", func(t *testing.T) {
|
||||
@@ -2278,9 +2338,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will allow auto sync active-allow and inactive-deny", func(t *testing.T) {
|
||||
@@ -2292,9 +2353,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will deny manual sync inactive-allow", func(t *testing.T) {
|
||||
@@ -2305,9 +2367,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny auto sync inactive-allow", func(t *testing.T) {
|
||||
@@ -2318,9 +2381,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will allow manual sync inactive-allow with ManualSync enabled", func(t *testing.T) {
|
||||
@@ -2331,9 +2395,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will deny auto sync inactive-allow with ManualSync enabled", func(t *testing.T) {
|
||||
@@ -2344,9 +2409,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny manual sync with inactive-allow and inactive-deny", func(t *testing.T) {
|
||||
@@ -2358,9 +2424,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny auto sync with inactive-allow and inactive-deny", func(t *testing.T) {
|
||||
@@ -2372,9 +2439,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will allow auto sync with active-allow and inactive-allow", func(t *testing.T) {
|
||||
@@ -2386,9 +2454,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will deny manual sync with active-deny", func(t *testing.T) {
|
||||
@@ -2399,9 +2468,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny auto sync with active-deny", func(t *testing.T) {
|
||||
@@ -2412,9 +2482,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will allow manual sync with active-deny with ManualSync enabled", func(t *testing.T) {
|
||||
@@ -2425,9 +2496,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will deny auto sync with active-deny with ManualSync enabled", func(t *testing.T) {
|
||||
@@ -2438,9 +2510,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny manual sync with many active-deny having one with ManualSync disabled", func(t *testing.T) {
|
||||
@@ -2454,9 +2527,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny auto sync with many active-deny having one with ManualSync disabled", func(t *testing.T) {
|
||||
@@ -2470,9 +2544,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny manual sync with active-deny and active-allow windows with ManualSync disabled", func(t *testing.T) {
|
||||
@@ -2484,9 +2559,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will allow manual sync with active-deny and active-allow windows with ManualSync enabled", func(t *testing.T) {
|
||||
@@ -2498,9 +2574,10 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(true)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(true)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canSync)
|
||||
})
|
||||
t.Run("will deny auto sync with active-deny and active-allow windows with ManualSync enabled", func(t *testing.T) {
|
||||
@@ -2512,9 +2589,24 @@ func TestSyncWindows_CanSync(t *testing.T) {
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync := proj.Spec.SyncWindows.CanSync(false)
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
t.Run("will deny and return error with invalid windows", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
proj := newProjectBuilder().
|
||||
withInvalidWindows().
|
||||
build()
|
||||
|
||||
// when
|
||||
canSync, err := proj.Spec.SyncWindows.CanSync(false)
|
||||
|
||||
// then
|
||||
require.Error(t, err)
|
||||
assert.False(t, canSync)
|
||||
})
|
||||
}
|
||||
@@ -2564,8 +2656,9 @@ func TestSyncWindows_hasAllow(t *testing.T) {
|
||||
func TestSyncWindow_Active(t *testing.T) {
|
||||
window := &SyncWindow{Schedule: "* * * * *", Duration: "1h"}
|
||||
t.Run("ActiveWindow", func(t *testing.T) {
|
||||
window.Active()
|
||||
assert.True(t, window.Active())
|
||||
isActive, err := window.Active()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, isActive)
|
||||
})
|
||||
|
||||
syncWindow := func(kind string, schedule string, duration string) SyncWindow {
|
||||
@@ -2590,6 +2683,7 @@ func TestSyncWindow_Active(t *testing.T) {
|
||||
syncWindow SyncWindow
|
||||
currentTime time.Time
|
||||
expectedResult bool
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "Allow-active",
|
||||
@@ -2639,11 +2733,44 @@ func TestSyncWindow_Active(t *testing.T) {
|
||||
currentTime: timeWithHour(13-4, utcM4Zone),
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "Allow-inactive-InvalidSchedule",
|
||||
syncWindow: syncWindow("allow", "* 10 * * 7", "2h"),
|
||||
currentTime: timeWithHour(11, time.UTC),
|
||||
expectedResult: false,
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "Deny-inactive-InvalidSchedule",
|
||||
syncWindow: syncWindow("deny", "* 10 * * 7", "2h"),
|
||||
currentTime: timeWithHour(11, time.UTC),
|
||||
expectedResult: false,
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "Allow-inactive-InvalidDuration",
|
||||
syncWindow: syncWindow("allow", "* 10 * * *", "2a"),
|
||||
currentTime: timeWithHour(11, time.UTC),
|
||||
expectedResult: false,
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "Deny-inactive-InvalidDuration",
|
||||
syncWindow: syncWindow("deny", "* 10 * * *", "2a"),
|
||||
currentTime: timeWithHour(11, time.UTC),
|
||||
expectedResult: false,
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.syncWindow.active(tt.currentTime)
|
||||
result, err := tt.syncWindow.active(tt.currentTime)
|
||||
if tt.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
@@ -2755,6 +2882,16 @@ func (b *projectBuilder) withInactiveDenyWindow(allowManual bool) *projectBuilde
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *projectBuilder) withInvalidWindows() *projectBuilder {
|
||||
b.proj.Spec.SyncWindows = append(b.proj.Spec.SyncWindows,
|
||||
newSyncWindow("allow", "* 10 * * 7", false),
|
||||
newSyncWindow("deny", "* 10 * * 7", false),
|
||||
newSyncWindow("allow", "* 10 * * 7", true),
|
||||
newSyncWindow("deny", "* 10 * * 7", true),
|
||||
)
|
||||
return b
|
||||
}
|
||||
|
||||
func inactiveCronSchedule() string {
|
||||
hourPlus10, _, _ := time.Now().Add(10 * time.Hour).Clock()
|
||||
return fmt.Sprintf("0 %d * * *", hourPlus10)
|
||||
@@ -3959,3 +4096,158 @@ func TestApplicationTree_Merge(t *testing.T) {
|
||||
},
|
||||
}, tree)
|
||||
}
|
||||
|
||||
func TestAppProject_ValidateDestinationServiceAccount(t *testing.T) {
|
||||
testData := []struct {
|
||||
server string
|
||||
namespace string
|
||||
defaultServiceAccount string
|
||||
expectedErrMsg string
|
||||
}{
|
||||
{
|
||||
// Given, a project
|
||||
// When, a default destination service account with all valid fields is added to it,
|
||||
// Then, there is no error.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "",
|
||||
},
|
||||
{
|
||||
// Given, a project
|
||||
// When, a default destination service account with negation glob pattern for server is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "!abc",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "server has an invalid format, '!abc'",
|
||||
},
|
||||
{
|
||||
// Given, a project
|
||||
// When, a default destination service account with empty namespace is added to it,
|
||||
// Then, there is no error.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with negation glob pattern for server is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "!*",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "server has an invalid format, '!*'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with negation glob pattern for namespace is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "!*",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "namespace has an invalid format, '!*'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with negation glob pattern for namespace is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "!abc",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "namespace has an invalid format, '!abc'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with empty service account is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "",
|
||||
expectedErrMsg: "defaultServiceAccount has an invalid format, ''",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with service account having just white spaces is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: " ",
|
||||
expectedErrMsg: "defaultServiceAccount has an invalid format, ' '",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with service account having backwards slash char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "test\\sa",
|
||||
expectedErrMsg: "defaultServiceAccount has an invalid format, 'test\\sa'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with service account having forward slash char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "test/sa",
|
||||
expectedErrMsg: "defaultServiceAccount has an invalid format, 'test/sa'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with service account having square braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "[test-sa]",
|
||||
expectedErrMsg: "defaultServiceAccount has an invalid format, '[test-sa]'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with service account having curly braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "{test-sa}",
|
||||
expectedErrMsg: "defaultServiceAccount has an invalid format, '{test-sa}'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with service account having curly braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "[[ech*",
|
||||
namespace: "test-ns",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "server has an invalid format, '[[ech*'",
|
||||
},
|
||||
{
|
||||
// Given, a project,
|
||||
// When, a default destination service account with service account having curly braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
server: "https://192.168.99.100:8443",
|
||||
namespace: "[[ech*",
|
||||
defaultServiceAccount: "test-sa",
|
||||
expectedErrMsg: "namespace has an invalid format, '[[ech*'",
|
||||
},
|
||||
}
|
||||
for _, data := range testData {
|
||||
proj := AppProject{
|
||||
Spec: AppProjectSpec{
|
||||
DestinationServiceAccounts: []ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: data.server,
|
||||
Namespace: data.namespace,
|
||||
DefaultServiceAccount: data.defaultServiceAccount,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := proj.ValidateProject()
|
||||
if data.expectedErrMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.ErrorContains(t, err, data.expectedErrMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,10 +14,10 @@ if obj.status.status ~= nil then
|
||||
-- "root" policy
|
||||
for i, entry in ipairs(obj.status.status) do
|
||||
if entry.compliant ~= "Compliant" then
|
||||
noncompliants[i] = entry.clustername
|
||||
table.insert(noncompliants, entry.clustername)
|
||||
end
|
||||
end
|
||||
if table.getn(noncompliants) == 0 then
|
||||
if #noncompliants == 0 then
|
||||
hs.message = "All clusters are compliant"
|
||||
else
|
||||
hs.message = "NonCompliant clusters: " .. table.concat(noncompliants, ", ")
|
||||
@@ -26,10 +26,10 @@ elseif obj.status.details ~= nil then
|
||||
-- "replicated" policy
|
||||
for i, entry in ipairs(obj.status.details) do
|
||||
if entry.compliant ~= "Compliant" then
|
||||
noncompliants[i] = entry.templateMeta.name
|
||||
table.insert(noncompliants, entry.templateMeta.name)
|
||||
end
|
||||
end
|
||||
if table.getn(noncompliants) == 0 then
|
||||
if #noncompliants == 0 then
|
||||
hs.message = "All templates are compliant"
|
||||
else
|
||||
hs.message = "NonCompliant templates: " .. table.concat(noncompliants, ", ")
|
||||
|
||||
@@ -0,0 +1,88 @@
|
||||
apiVersion: policy.open-cluster-management.io/v1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: open-cluster-management-global-set.argo-example
|
||||
namespace: local-cluster
|
||||
labels:
|
||||
policy.open-cluster-management.io/cluster-name: local-cluster
|
||||
policy.open-cluster-management.io/cluster-namespace: local-cluster
|
||||
policy.open-cluster-management.io/root-policy: open-cluster-management-global-set.argo-example
|
||||
spec:
|
||||
disabled: false
|
||||
policy-templates:
|
||||
- objectDefinition:
|
||||
apiVersion: policy.open-cluster-management.io/v1
|
||||
kind: ConfigurationPolicy
|
||||
metadata:
|
||||
name: example-namespace
|
||||
spec:
|
||||
object-templates:
|
||||
- complianceType: musthave
|
||||
objectDefinition:
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: example
|
||||
remediationAction: inform
|
||||
severity: low
|
||||
- objectDefinition:
|
||||
apiVersion: policy.open-cluster-management.io/v1
|
||||
kind: ConfigurationPolicy
|
||||
metadata:
|
||||
name: example-pod
|
||||
spec:
|
||||
namespaceSelector:
|
||||
exclude:
|
||||
- kube-*
|
||||
include:
|
||||
- default
|
||||
object-templates:
|
||||
- complianceType: musthave
|
||||
objectDefinition:
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: foobar
|
||||
spec:
|
||||
containers:
|
||||
- image: 'registry.redhat.io/rhel9/httpd-24:latest'
|
||||
name: httpd
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
runAsNonRoot: true
|
||||
remediationAction: enforce
|
||||
severity: low
|
||||
status:
|
||||
compliant: NonCompliant
|
||||
details:
|
||||
- compliant: Compliant
|
||||
history:
|
||||
- eventName: open-cluster-management-global-set.argo-example.17e7034c879045a3
|
||||
lastTimestamp: '2024-07-30T14:16:49Z'
|
||||
message: 'Compliant; notification - pods [foobar] was created successfully in namespace default'
|
||||
templateMeta:
|
||||
creationTimestamp: null
|
||||
name: example-foo
|
||||
- compliant: NonCompliant
|
||||
history:
|
||||
- eventName: open-cluster-management-global-set.argo-example.17e701cc5101e3a4
|
||||
lastTimestamp: '2024-07-30T13:49:19Z'
|
||||
message: 'NonCompliant; violation - namespaces [example] not found'
|
||||
templateMeta:
|
||||
creationTimestamp: null
|
||||
name: example-namespace
|
||||
- compliant: Compliant
|
||||
history:
|
||||
- eventName: open-cluster-management-global-set.argo-example.17e7034c879045a3
|
||||
lastTimestamp: '2024-07-30T14:16:49Z'
|
||||
message: 'Compliant; notification - pods [foobar] was created successfully in namespace default'
|
||||
- eventName: open-cluster-management-global-set.argo-example.17e7020b47782ddc
|
||||
lastTimestamp: '2024-07-30T13:53:49Z'
|
||||
message: 'NonCompliant; violation - pods [foobar] not found in namespace default'
|
||||
templateMeta:
|
||||
creationTimestamp: null
|
||||
name: example-pod
|
||||
@@ -1,50 +1,59 @@
|
||||
-- isInferenceServiceInRawDeploymentMode determines if the inference service deployed in RawDeployment mode
|
||||
-- KServe v12 and above supports Rawdeployment for Inference graphs. For Inference services, KServe has supported RawDeployment model since [v0.7.0](https://github.com/kserve/kserve/releases/tag/v0.7.0).
|
||||
function isInferenceServiceInRawDeploymentMode(obj)
|
||||
if obj.metadata.annotations == nil then
|
||||
return false
|
||||
end
|
||||
local deploymentMode = obj.metadata.annotations["serving.kserve.io/deploymentMode"]
|
||||
return deploymentMode ~= nil and deploymentMode == "RawDeployment"
|
||||
end
|
||||
|
||||
local health_status = {}
|
||||
|
||||
health_status.status = "Progressing"
|
||||
health_status.message = "Waiting for status update."
|
||||
if obj.status ~= nil and obj.status.conditions ~= nil then
|
||||
local status_true = 0
|
||||
health_status.message = "Waiting for InferenceService to report status..."
|
||||
|
||||
if obj.status ~= nil then
|
||||
|
||||
local progressing = false
|
||||
local degraded = false
|
||||
local status_false = 0
|
||||
local status_unknown = 0
|
||||
health_status.message = ""
|
||||
for i, condition in pairs(obj.status.conditions) do
|
||||
if condition.status == "True" and (condition.type == "IngressReady" or condition.type == "PredictorConfigurationReady" or condition.type == "PredictorReady" or condition.type == "PredictorRouteReady" or condition.type == "Ready") then
|
||||
status_true = status_true + 1
|
||||
elseif condition.status == "False" or condition.status == "Unknown" then
|
||||
msg = condition.type .. " is " .. condition.status
|
||||
if condition.reason ~= nil and condition.reason ~= "" then
|
||||
msg = msg .. ", since " .. condition.reason .. "."
|
||||
end
|
||||
if condition.message ~= nil and condition.message ~= "" then
|
||||
msg = msg .. " " .. condition.message
|
||||
end
|
||||
health_status.message = health_status.message .. msg .. "\n"
|
||||
if condition.status == "False" then
|
||||
status_false = status_false + 1
|
||||
local msg = ""
|
||||
|
||||
if obj.status.modelStatus ~= nil then
|
||||
if obj.status.modelStatus.transitionStatus ~= "UpToDate" then
|
||||
if obj.status.modelStatus.transitionStatus == "InProgress" then
|
||||
progressing = true
|
||||
else
|
||||
status_unknown = status_unknown + 1
|
||||
degraded = true
|
||||
end
|
||||
msg = msg .. "0: transitionStatus | " .. obj.status.modelStatus.transitionStatus
|
||||
end
|
||||
end
|
||||
if ((isInferenceServiceInRawDeploymentMode(obj) and status_true == 3) or status_true == 5) and status_false == 0 and status_unknown == 0 then
|
||||
health_status.message = "Inference Service is healthy."
|
||||
health_status.status = "Healthy"
|
||||
return health_status
|
||||
elseif status_false > 0 then
|
||||
health_status.status = "Degraded"
|
||||
return health_status
|
||||
else
|
||||
health_status.status = "Progressing"
|
||||
return health_status
|
||||
|
||||
if obj.status.conditions ~= nil then
|
||||
for i, condition in pairs(obj.status.conditions) do
|
||||
|
||||
if condition.status == "Unknown" then
|
||||
status_unknown = status_unknown + 1
|
||||
elseif condition.status == "False" then
|
||||
status_false = status_false + 1
|
||||
end
|
||||
|
||||
if condition.status ~= "True" then
|
||||
msg = msg .. " | " .. i .. ": " .. condition.type .. " | " .. condition.status
|
||||
if condition.reason ~= nil and condition.reason ~= "" then
|
||||
msg = msg .. " | " .. condition.reason
|
||||
end
|
||||
if condition.message ~= nil and condition.message ~= "" then
|
||||
msg = msg .. " | " .. condition.message
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
if progressing == false and degraded == false and status_unknown == 0 and status_false == 0 then
|
||||
health_status.status = "Healthy"
|
||||
msg = "InferenceService is healthy."
|
||||
elseif degraded == false and status_unknown >= 0 then
|
||||
health_status.status = "Progressing"
|
||||
else
|
||||
health_status.status = "Degraded"
|
||||
end
|
||||
|
||||
health_status.message = msg
|
||||
end
|
||||
end
|
||||
return health_status
|
||||
|
||||
return health_status
|
||||
|
||||
@@ -1,17 +1,41 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "PredictorConfigurationReady is Unknown\nPredictorReady is Unknown, since RevisionMissing. Configuration \"hello-world-predictor-default\" is waiting for a Revision to become ready.\nPredictorRouteReady is Unknown, since RevisionMissing. Configuration \"hello-world-predictor-default\" is waiting for a Revision to become ready.\nReady is Unknown, since RevisionMissing. Configuration \"hello-world-predictor-default\" is waiting for a Revision to become ready.\n"
|
||||
message: ' | 1: PredictorConfigurationReady | Unknown | 2: PredictorReady | Unknown | RevisionMissing | Configuration "hello-world-predictor-default" is waiting for a Revision to become ready. | 3: PredictorRouteReady | Unknown | RevisionMissing | Configuration "hello-world-predictor-default" is waiting for a Revision to become ready. | 4: Ready | Unknown | RevisionMissing | Configuration "hello-world-predictor-default" is waiting for a Revision to become ready.'
|
||||
inputPath: testdata/progressing.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: '0: transitionStatus | InProgress | 1: LatestDeploymentReady | Unknown | PredictorConfigurationReady not ready | 2: PredictorConfigurationReady | Unknown | 3: PredictorReady | Unknown | RevisionMissing | Configuration "helloworld-predictor" is waiting for a Revision to become ready. | 4: PredictorRouteReady | Unknown | RevisionMissing | Configuration "helloworld-predictor" is waiting for a Revision to become ready. | 5: Ready | Unknown | RevisionMissing | Configuration "helloworld-predictor" is waiting for a Revision to become ready. | 6: RoutesReady | Unknown | PredictorRouteReady not ready'
|
||||
inputPath: testdata/progressing_ocp.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "0: transitionStatus | InProgress | 1: PredictorReady | False | 2: Ready | False"
|
||||
inputPath: testdata/progressing_modelmesh.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "IngressReady is False, since Predictor ingress not created.\nPredictorConfigurationReady is False, since RevisionFailed. Revision \"helloworld-00002\" failed with message: Container failed with: container exited with no error.\nPredictorReady is False, since RevisionFailed. Revision \"helloworld-00002\" failed with message: Container failed with: container exited with no error.\nReady is False, since Predictor ingress not created.\n"
|
||||
message: '0: transitionStatus | BlockedByFailedLoad | 1: IngressReady | False | Predictor ingress not created | 2: PredictorConfigurationReady | False | RevisionFailed | Revision "helloworld-00002" failed with message: Container failed with: container exited with no error. | 3: PredictorReady | False | RevisionFailed | Revision "helloworld-00002" failed with message: Container failed with: container exited with no error. | 5: Ready | False | Predictor ingress not created'
|
||||
inputPath: testdata/degraded.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: '0: transitionStatus | BlockedByFailedLoad | 1: LatestDeploymentReady | False | PredictorConfigurationReady not ready | 2: PredictorConfigurationReady | False | RevisionFailed | Revision "helloworld-predictor-00002" failed with message: . | 3: PredictorReady | False | RevisionMissing | Configuration "helloworld-predictor" does not have any ready Revision. | 4: PredictorRouteReady | False | RevisionMissing | Configuration "helloworld-predictor" does not have any ready Revision. | 5: Ready | False | RevisionMissing | Configuration "helloworld-predictor" does not have any ready Revision. | 6: RoutesReady | False | PredictorRouteReady not ready'
|
||||
inputPath: testdata/degraded_ocp.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "0: transitionStatus | BlockedByFailedLoad"
|
||||
inputPath: testdata/degraded_modelmesh.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: Inference Service is healthy.
|
||||
message: InferenceService is healthy.
|
||||
inputPath: testdata/healthy.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: Inference Service is healthy.
|
||||
message: InferenceService is healthy.
|
||||
inputPath: testdata/healthy_ocp.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: InferenceService is healthy.
|
||||
inputPath: testdata/healthy_modelmesh.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: InferenceService is healthy.
|
||||
inputPath: testdata/healthy_raw.yaml
|
||||
|
||||
@@ -28,3 +28,5 @@ status:
|
||||
reason: Predictor ingress not created
|
||||
status: "False"
|
||||
type: Ready
|
||||
modelStatus:
|
||||
transitionStatus: BlockedByFailedLoad
|
||||
16
resource_customizations/serving.kserve.io/InferenceService/testdata/degraded_modelmesh.yaml
vendored
Normal file
16
resource_customizations/serving.kserve.io/InferenceService/testdata/degraded_modelmesh.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: serving.kserve.io/v1beta1
|
||||
kind: InferenceService
|
||||
metadata:
|
||||
name: helloworld
|
||||
namespace: default
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2024-05-30T22:43:16Z'
|
||||
status: 'True'
|
||||
type: PredictorReady
|
||||
- lastTransitionTime: '2024-05-30T22:43:16Z'
|
||||
status: 'True'
|
||||
type: Ready
|
||||
modelStatus:
|
||||
transitionStatus: BlockedByFailedLoad
|
||||
42
resource_customizations/serving.kserve.io/InferenceService/testdata/degraded_ocp.yaml
vendored
Normal file
42
resource_customizations/serving.kserve.io/InferenceService/testdata/degraded_ocp.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: serving.kserve.io/v1beta1
|
||||
kind: InferenceService
|
||||
metadata:
|
||||
name: helloworld
|
||||
namespace: default
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2024-05-30T23:03:45Z'
|
||||
reason: PredictorConfigurationReady not ready
|
||||
severity: Info
|
||||
status: 'False'
|
||||
type: LatestDeploymentReady
|
||||
- lastTransitionTime: '2024-05-30T23:03:45Z'
|
||||
message: 'Revision "helloworld-predictor-00002" failed with message: .'
|
||||
reason: RevisionFailed
|
||||
severity: Info
|
||||
status: 'False'
|
||||
type: PredictorConfigurationReady
|
||||
- lastTransitionTime: '2024-05-30T23:03:45Z'
|
||||
message: Configuration "helloworld-predictor" does not have any ready Revision.
|
||||
reason: RevisionMissing
|
||||
status: 'False'
|
||||
type: PredictorReady
|
||||
- lastTransitionTime: '2024-05-30T23:03:45Z'
|
||||
message: Configuration "helloworld-predictor" does not have any ready Revision.
|
||||
reason: RevisionMissing
|
||||
severity: Info
|
||||
status: 'False'
|
||||
type: PredictorRouteReady
|
||||
- lastTransitionTime: '2024-05-30T23:03:45Z'
|
||||
message: Configuration "helloworld-predictor" does not have any ready Revision.
|
||||
reason: RevisionMissing
|
||||
status: 'False'
|
||||
type: Ready
|
||||
- lastTransitionTime: '2024-05-30T23:03:45Z'
|
||||
reason: PredictorRouteReady not ready
|
||||
severity: Info
|
||||
status: 'False'
|
||||
type: RoutesReady
|
||||
modelStatus:
|
||||
transitionStatus: BlockedByFailedLoad
|
||||
16
resource_customizations/serving.kserve.io/InferenceService/testdata/healthy_modelmesh.yaml
vendored
Normal file
16
resource_customizations/serving.kserve.io/InferenceService/testdata/healthy_modelmesh.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: serving.kserve.io/v1beta1
|
||||
kind: InferenceService
|
||||
metadata:
|
||||
name: helloworld
|
||||
namespace: default
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2024-05-30T22:43:16Z'
|
||||
status: 'True'
|
||||
type: PredictorReady
|
||||
- lastTransitionTime: '2024-05-30T22:43:16Z'
|
||||
status: 'True'
|
||||
type: Ready
|
||||
modelStatus:
|
||||
transitionStatus: UpToDate
|
||||
35
resource_customizations/serving.kserve.io/InferenceService/testdata/healthy_ocp.yaml
vendored
Normal file
35
resource_customizations/serving.kserve.io/InferenceService/testdata/healthy_ocp.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: serving.kserve.io/v1beta1
|
||||
kind: InferenceService
|
||||
metadata:
|
||||
name: helloworld
|
||||
namespace: default
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2024-05-30T22:14:31Z'
|
||||
status: 'True'
|
||||
type: IngressReady
|
||||
- lastTransitionTime: '2024-05-30T22:14:30Z'
|
||||
severity: Info
|
||||
status: 'True'
|
||||
type: LatestDeploymentReady
|
||||
- lastTransitionTime: '2024-05-30T22:14:30Z'
|
||||
severity: Info
|
||||
status: 'True'
|
||||
type: PredictorConfigurationReady
|
||||
- lastTransitionTime: '2024-05-30T22:14:31Z'
|
||||
status: 'True'
|
||||
type: PredictorReady
|
||||
- lastTransitionTime: '2024-05-30T22:14:31Z'
|
||||
severity: Info
|
||||
status: 'True'
|
||||
type: PredictorRouteReady
|
||||
- lastTransitionTime: '2024-05-30T22:14:31Z'
|
||||
status: 'True'
|
||||
type: Ready
|
||||
- lastTransitionTime: '2024-05-30T22:14:31Z'
|
||||
severity: Info
|
||||
status: 'True'
|
||||
type: RoutesReady
|
||||
modelStatus:
|
||||
transitionStatus: UpToDate
|
||||
16
resource_customizations/serving.kserve.io/InferenceService/testdata/progressing_modelmesh.yaml
vendored
Normal file
16
resource_customizations/serving.kserve.io/InferenceService/testdata/progressing_modelmesh.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: serving.kserve.io/v1beta1
|
||||
kind: InferenceService
|
||||
metadata:
|
||||
name: helloworld
|
||||
namespace: default
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2024-05-30T22:43:16Z'
|
||||
status: 'False'
|
||||
type: PredictorReady
|
||||
- lastTransitionTime: '2024-05-30T22:43:16Z'
|
||||
status: 'False'
|
||||
type: Ready
|
||||
modelStatus:
|
||||
transitionStatus: InProgress
|
||||
40
resource_customizations/serving.kserve.io/InferenceService/testdata/progressing_ocp.yaml
vendored
Normal file
40
resource_customizations/serving.kserve.io/InferenceService/testdata/progressing_ocp.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: serving.kserve.io/v1beta1
|
||||
kind: InferenceService
|
||||
metadata:
|
||||
name: helloworld
|
||||
namespace: default
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2024-05-30T22:29:46Z'
|
||||
reason: PredictorConfigurationReady not ready
|
||||
severity: Info
|
||||
status: Unknown
|
||||
type: LatestDeploymentReady
|
||||
- lastTransitionTime: '2024-05-30T22:29:46Z'
|
||||
severity: Info
|
||||
status: Unknown
|
||||
type: PredictorConfigurationReady
|
||||
- lastTransitionTime: '2024-05-30T22:29:46Z'
|
||||
message: Configuration "helloworld-predictor" is waiting for a Revision to become ready.
|
||||
reason: RevisionMissing
|
||||
status: Unknown
|
||||
type: PredictorReady
|
||||
- lastTransitionTime: '2024-05-30T22:29:46Z'
|
||||
message: Configuration "helloworld-predictor" is waiting for a Revision to become ready.
|
||||
reason: RevisionMissing
|
||||
severity: Info
|
||||
status: Unknown
|
||||
type: PredictorRouteReady
|
||||
- lastTransitionTime: '2024-05-30T22:29:46Z'
|
||||
message: Configuration "helloworld-predictor" is waiting for a Revision to become ready.
|
||||
reason: RevisionMissing
|
||||
status: Unknown
|
||||
type: Ready
|
||||
- lastTransitionTime: '2024-05-30T22:29:46Z'
|
||||
reason: PredictorRouteReady not ready
|
||||
severity: Info
|
||||
status: Unknown
|
||||
type: RoutesReady
|
||||
modelStatus:
|
||||
transitionStatus: InProgress
|
||||
@@ -1886,7 +1886,11 @@ func (s *Server) Sync(ctx context.Context, syncReq *application.ApplicationSyncR
|
||||
|
||||
s.inferResourcesStatusHealth(a)
|
||||
|
||||
if !proj.Spec.SyncWindows.Matches(a).CanSync(true) {
|
||||
canSync, err := proj.Spec.SyncWindows.Matches(a).CanSync(true)
|
||||
if err != nil {
|
||||
return a, status.Errorf(codes.PermissionDenied, "cannot sync: invalid sync window: %v", err)
|
||||
}
|
||||
if !canSync {
|
||||
return a, status.Errorf(codes.PermissionDenied, "cannot sync: blocked by sync window")
|
||||
}
|
||||
|
||||
@@ -2603,10 +2607,17 @@ func (s *Server) GetApplicationSyncWindows(ctx context.Context, q *application.A
|
||||
}
|
||||
|
||||
windows := proj.Spec.SyncWindows.Matches(a)
|
||||
sync := windows.CanSync(true)
|
||||
sync, err := windows.CanSync(true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid sync windows: %w", err)
|
||||
}
|
||||
|
||||
activeWindows, err := windows.Active()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid sync windows: %w", err)
|
||||
}
|
||||
res := &application.ApplicationSyncWindowsResponse{
|
||||
ActiveWindows: convertSyncWindows(windows.Active()),
|
||||
ActiveWindows: convertSyncWindows(activeWindows),
|
||||
AssignedWindows: convertSyncWindows(windows),
|
||||
CanSync: &sync,
|
||||
}
|
||||
|
||||
@@ -144,8 +144,8 @@ func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) c
|
||||
|
||||
_ = send(true)
|
||||
|
||||
close(merged)
|
||||
ticker.Stop()
|
||||
close(merged)
|
||||
}()
|
||||
return merged
|
||||
}
|
||||
|
||||
@@ -33,6 +33,12 @@ const (
|
||||
DefaultIdleConnectionTimeout = 60 * time.Second
|
||||
DefaultMaxIdleConnections = 30
|
||||
|
||||
// HeaderArgoCDNamespace defines the namespace of the
|
||||
// argo control plane to be passed to the extension handler.
|
||||
// Example:
|
||||
// Argocd-Namespace: "namespace"
|
||||
HeaderArgoCDNamespace = "Argocd-Namespace"
|
||||
|
||||
// HeaderArgoCDApplicationName defines the name of the
|
||||
// expected application header to be passed to the extension
|
||||
// handler. The header value must follow the format:
|
||||
@@ -333,6 +339,7 @@ type RbacEnforcer interface {
|
||||
// and handling proxy extensions.
|
||||
type Manager struct {
|
||||
log *log.Entry
|
||||
namespace string
|
||||
settings SettingsGetter
|
||||
application ApplicationGetter
|
||||
project ProjectGetter
|
||||
@@ -355,9 +362,10 @@ type ExtensionMetricsRegistry interface {
|
||||
}
|
||||
|
||||
// NewManager will initialize a new manager.
|
||||
func NewManager(log *log.Entry, sg SettingsGetter, ag ApplicationGetter, pg ProjectGetter, rbac RbacEnforcer, ug UserGetter) *Manager {
|
||||
func NewManager(log *log.Entry, namespace string, sg SettingsGetter, ag ApplicationGetter, pg ProjectGetter, rbac RbacEnforcer, ug UserGetter) *Manager {
|
||||
return &Manager{
|
||||
log: log,
|
||||
namespace: namespace,
|
||||
settings: sg,
|
||||
application: ag,
|
||||
project: pg,
|
||||
@@ -740,7 +748,7 @@ func (m *Manager) CallExtension() func(http.ResponseWriter, *http.Request) {
|
||||
|
||||
user := m.userGetter.GetUser(r.Context())
|
||||
groups := m.userGetter.GetGroups(r.Context())
|
||||
prepareRequest(r, extName, app, user, groups)
|
||||
prepareRequest(r, m.namespace, extName, app, user, groups)
|
||||
m.log.Debugf("proxing request for extension %q", extName)
|
||||
// httpsnoop package is used to properly wrap the responseWriter
|
||||
// and avoid optional intefaces issue:
|
||||
@@ -763,11 +771,13 @@ func registerMetrics(extName string, metrics httpsnoop.Metrics, extensionMetrics
|
||||
// the Argo CD extension API section from it. It provides additional information to
|
||||
// the backend service appending them in the outgoing request headers. The appended
|
||||
// headers are:
|
||||
// - Control plane namespace
|
||||
// - Cluster destination name
|
||||
// - Cluster destination server
|
||||
// - Argo CD authenticated username
|
||||
func prepareRequest(r *http.Request, extName string, app *v1alpha1.Application, username string, groups []string) {
|
||||
func prepareRequest(r *http.Request, namespace string, extName string, app *v1alpha1.Application, username string, groups []string) {
|
||||
r.URL.Path = strings.TrimPrefix(r.URL.Path, fmt.Sprintf("%s/%s", URLPrefix, extName))
|
||||
r.Header.Set(HeaderArgoCDNamespace, namespace)
|
||||
if app.Spec.Destination.Name != "" {
|
||||
r.Header.Set(HeaderArgoCDTargetClusterName, app.Spec.Destination.Name)
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ func TestRegisterExtensions(t *testing.T) {
|
||||
|
||||
logger, _ := test.NewNullLogger()
|
||||
logEntry := logger.WithContext(context.Background())
|
||||
m := extension.NewManager(logEntry, settMock, nil, nil, nil, nil)
|
||||
m := extension.NewManager(logEntry, "", settMock, nil, nil, nil, nil)
|
||||
|
||||
return &fixture{
|
||||
settingsGetterMock: settMock,
|
||||
@@ -248,6 +248,7 @@ func TestCallExtension(t *testing.T) {
|
||||
userMock *mocks.UserGetter
|
||||
manager *extension.Manager
|
||||
}
|
||||
defaultServerNamespace := "control-plane-ns"
|
||||
defaultProjectName := "project-name"
|
||||
|
||||
setup := func() *fixture {
|
||||
@@ -260,7 +261,7 @@ func TestCallExtension(t *testing.T) {
|
||||
|
||||
logger, _ := test.NewNullLogger()
|
||||
logEntry := logger.WithContext(context.Background())
|
||||
m := extension.NewManager(logEntry, settMock, appMock, projMock, rbacMock, userMock)
|
||||
m := extension.NewManager(logEntry, defaultServerNamespace, settMock, appMock, projMock, rbacMock, userMock)
|
||||
m.AddMetricsRegistry(metricsMock)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
@@ -444,6 +445,7 @@ func TestCallExtension(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
actual := strings.TrimSuffix(string(body), "\n")
|
||||
assert.Equal(t, backendResponse, actual)
|
||||
assert.Equal(t, defaultServerNamespace, resp.Header.Get(extension.HeaderArgoCDNamespace))
|
||||
assert.Equal(t, clusterURL, resp.Header.Get(extension.HeaderArgoCDTargetClusterURL))
|
||||
assert.Equal(t, "Bearer some-bearer-token", resp.Header.Get("Authorization"))
|
||||
assert.Equal(t, "some-user", resp.Header.Get(extension.HeaderArgoCDUsername))
|
||||
|
||||
@@ -525,7 +525,10 @@ func (s *Server) GetSyncWindowsState(ctx context.Context, q *project.SyncWindows
|
||||
|
||||
res := &project.SyncWindowsResponse{}
|
||||
|
||||
windows := proj.Spec.SyncWindows.Active()
|
||||
windows, err := proj.Spec.SyncWindows.Active()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if windows.HasWindows() {
|
||||
res.Windows = *windows
|
||||
} else {
|
||||
|
||||
@@ -327,7 +327,7 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts, appsetOpts Applicatio
|
||||
ag := extension.NewDefaultApplicationGetter(appLister)
|
||||
pg := extension.NewDefaultProjectGetter(projLister, dbInstance)
|
||||
ug := extension.NewDefaultUserGetter(policyEnf)
|
||||
em := extension.NewManager(logger, sg, ag, pg, enf, ug)
|
||||
em := extension.NewManager(logger, opts.Namespace, sg, ag, pg, enf, ug)
|
||||
|
||||
a := &ArgoCDServer{
|
||||
ArgoCDServerOpts: opts,
|
||||
|
||||
@@ -42,6 +42,31 @@ func (c *Consequences) Expect(e Expectation) *Consequences {
|
||||
return c
|
||||
}
|
||||
|
||||
// ExpectConsistently will continuously evaluate a condition, and it must be true each time it is evaluated, otherwise the test is failed. The condition will be repeatedly evaluated until 'expirationDuration' is met, waiting 'waitDuration' after each success.
|
||||
func (c *Consequences) ExpectConsistently(e Expectation, waitDuration time.Duration, expirationDuration time.Duration) *Consequences {
|
||||
// this invocation makes sure this func is not reported as the cause of the failure - we are a "test helper"
|
||||
c.context.t.Helper()
|
||||
|
||||
expiration := time.Now().Add(expirationDuration)
|
||||
for time.Now().Before(expiration) {
|
||||
state, message := e(c)
|
||||
switch state {
|
||||
case succeeded:
|
||||
log.Infof("expectation succeeded: %s", message)
|
||||
case failed:
|
||||
c.context.t.Fatalf("failed expectation: %s", message)
|
||||
return c
|
||||
}
|
||||
|
||||
// On condition success: wait, then retry
|
||||
log.Infof("Expectation '%s' passes, repeating to ensure consistency", message)
|
||||
time.Sleep(waitDuration)
|
||||
}
|
||||
|
||||
// If the condition never failed before expiring, it is a pass.
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(app *Application)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
block(c.app())
|
||||
|
||||
@@ -619,3 +619,225 @@ func TestGetVirtualProjectMatch(t *testing.T) {
|
||||
_, err = fixture.RunCli("app", "sync", fixture.Name(), "--resource", ":Service:guestbook-ui", "--timeout", fmt.Sprintf("%v", 10))
|
||||
assert.Contains(t, err.Error(), "blocked by sync window")
|
||||
}
|
||||
|
||||
func TestAddProjectDestinationServiceAccount(t *testing.T) {
|
||||
fixture.EnsureCleanState(t)
|
||||
|
||||
projectName := "proj-" + strconv.FormatInt(time.Now().Unix(), 10)
|
||||
_, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.TestNamespace()).Create(
|
||||
context.Background(), &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: projectName}}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create project %v", err)
|
||||
}
|
||||
|
||||
// Given, an existing project
|
||||
// When, a default destination service account with all valid fields is added to it,
|
||||
// Then, there is no error.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"test-sa",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to add project destination service account %v", err)
|
||||
}
|
||||
|
||||
// Given, an existing project
|
||||
// When, a default destination service account with empty namespace is added to it,
|
||||
// Then, there is no error.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"",
|
||||
"test-sa",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to add project destination service account %v", err)
|
||||
}
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account is added with a custom service account namespace,
|
||||
// Then, there is no error.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns1",
|
||||
"test-sa",
|
||||
"--service-account-namespace",
|
||||
"default",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to add project destination service account %v", err)
|
||||
}
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a duplicate default destination service account is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"test-sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already defined")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a duplicate default destination service account is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"asdf",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already added")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with negation glob pattern for server is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"!*",
|
||||
"test-ns",
|
||||
"test-sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "server has an invalid format, '!*'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with negation glob pattern for server is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"!abc",
|
||||
"test-ns",
|
||||
"test-sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "server has an invalid format, '!abc'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with negation glob pattern for namespace is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"!*",
|
||||
"test-sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "namespace has an invalid format, '!*'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with negation glob pattern for namespace is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"!abc",
|
||||
"test-sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "namespace has an invalid format, '!abc'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with empty service account is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "defaultServiceAccount has an invalid format, ''")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with service account having just white spaces is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
" ",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "defaultServiceAccount has an invalid format, ' '")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with service account having backwards slash char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"test\\sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "defaultServiceAccount has an invalid format, 'test\\\\sa'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with service account having forward slash char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"test/sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "defaultServiceAccount has an invalid format, 'test/sa'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with service account having square braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"[test-sa]",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "defaultServiceAccount has an invalid format, '[test-sa]'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with service account having curly braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"test-ns",
|
||||
"{test-sa}",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "defaultServiceAccount has an invalid format, '{test-sa}'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with service account having curly braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"[[ech*",
|
||||
"test-ns",
|
||||
"test-sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "server has an invalid format, '[[ech*'")
|
||||
|
||||
// Given, an existing project,
|
||||
// When, a default destination service account with service account having curly braces char is added,
|
||||
// Then, there is an error with appropriate message.
|
||||
_, err = fixture.RunCli("proj", "add-destination-service-account", projectName,
|
||||
"https://192.168.99.100:8443",
|
||||
"[[ech*",
|
||||
"test-sa",
|
||||
)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "namespace has an invalid format, '[[ech*'")
|
||||
|
||||
proj, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.TestNamespace()).Get(context.Background(), projectName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, projectName, proj.Name)
|
||||
assert.Len(t, proj.Spec.DestinationServiceAccounts, 3)
|
||||
|
||||
assert.Equal(t, "https://192.168.99.100:8443", proj.Spec.DestinationServiceAccounts[0].Server)
|
||||
assert.Equal(t, "test-ns", proj.Spec.DestinationServiceAccounts[0].Namespace)
|
||||
assert.Equal(t, "test-sa", proj.Spec.DestinationServiceAccounts[0].DefaultServiceAccount)
|
||||
|
||||
assert.Equal(t, "https://192.168.99.100:8443", proj.Spec.DestinationServiceAccounts[1].Server)
|
||||
assert.Equal(t, "", proj.Spec.DestinationServiceAccounts[1].Namespace)
|
||||
assert.Equal(t, "test-sa", proj.Spec.DestinationServiceAccounts[1].DefaultServiceAccount)
|
||||
|
||||
assert.Equal(t, "https://192.168.99.100:8443", proj.Spec.DestinationServiceAccounts[2].Server)
|
||||
assert.Equal(t, "test-ns1", proj.Spec.DestinationServiceAccounts[2].Namespace)
|
||||
assert.Equal(t, "default:test-sa", proj.Spec.DestinationServiceAccounts[2].DefaultServiceAccount)
|
||||
|
||||
assertProjHasEvent(t, proj, "update", argo.EventReasonResourceUpdated)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -16,7 +17,12 @@ import (
|
||||
. "github.com/argoproj/argo-cd/v2/test/e2e/fixture/app"
|
||||
)
|
||||
|
||||
func TestSyncWithImpersonateDisable(t *testing.T) {
|
||||
const (
|
||||
WaitDuration = time.Second
|
||||
TimeoutDuration = time.Second * 3
|
||||
)
|
||||
|
||||
func TestSyncWithFeatureDisabled(t *testing.T) {
|
||||
Given(t).
|
||||
Path("guestbook").
|
||||
When().
|
||||
@@ -25,10 +31,13 @@ func TestSyncWithImpersonateDisable(t *testing.T) {
|
||||
app.Spec.SyncPolicy = &v1alpha1.SyncPolicy{Automated: &v1alpha1.SyncPolicyAutomated{}}
|
||||
}).
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeSynced))
|
||||
// With the impersonation feature disabled, Application sync should continue to use
|
||||
// the control plane service account for the sync operation and the sync should succeed.
|
||||
ExpectConsistently(SyncStatusIs(v1alpha1.SyncStatusCodeSynced), WaitDuration, TimeoutDuration).
|
||||
Expect(OperationMessageContains("successfully synced"))
|
||||
}
|
||||
|
||||
func TestSyncWithImpersonateDefaultNamespaceServiceAccountNoRBAC(t *testing.T) {
|
||||
func TestSyncWithNoDestinationServiceAccountsInProject(t *testing.T) {
|
||||
Given(t).
|
||||
Path("guestbook").
|
||||
When().
|
||||
@@ -37,37 +46,10 @@ func TestSyncWithImpersonateDefaultNamespaceServiceAccountNoRBAC(t *testing.T) {
|
||||
app.Spec.SyncPolicy = &v1alpha1.SyncPolicy{Automated: &v1alpha1.SyncPolicyAutomated{}}
|
||||
}).
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeOutOfSync))
|
||||
}
|
||||
|
||||
func TestSyncWithImpersonateDefaultNamespaceServiceAccountWithRBAC(t *testing.T) {
|
||||
roleName := "default-sa-role"
|
||||
Given(t).
|
||||
Path("guestbook").
|
||||
When().
|
||||
SetParamInSettingConfigMap("application.sync.impersonation.enabled", "true").
|
||||
CreateFromFile(func(app *v1alpha1.Application) {
|
||||
app.Spec.SyncPolicy = &v1alpha1.SyncPolicy{Automated: &v1alpha1.SyncPolicyAutomated{}}
|
||||
}).
|
||||
And(func() {
|
||||
err := createTestRole(roleName, fixture.DeploymentNamespace(), []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"apps", ""},
|
||||
Resources: []string{"deployments"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"services"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = createTestRoleBinding(roleName, "default", fixture.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeOutOfSync))
|
||||
// With the impersonation feature enabled, Application sync must fail
|
||||
// when there are no destination service accounts configured in AppProject
|
||||
ExpectConsistently(SyncStatusIs(v1alpha1.SyncStatusCodeOutOfSync), WaitDuration, TimeoutDuration).
|
||||
Expect(OperationMessageContains("failed to find a matching service account to impersonate"))
|
||||
}
|
||||
|
||||
func TestSyncWithImpersonateWithSyncServiceAccount(t *testing.T) {
|
||||
@@ -89,7 +71,7 @@ func TestSyncWithImpersonateWithSyncServiceAccount(t *testing.T) {
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
DefaultServiceAccount: "false-serviceAccount",
|
||||
DefaultServiceAccount: "missing-serviceAccount",
|
||||
},
|
||||
}
|
||||
err := createTestServiceAccount(serviceAccountName, fixture.DeploymentNamespace())
|
||||
@@ -118,10 +100,13 @@ func TestSyncWithImpersonateWithSyncServiceAccount(t *testing.T) {
|
||||
app.Spec.Project = projectName
|
||||
}).
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeSynced))
|
||||
// With the impersonation feature enabled, Application sync should succeed
|
||||
// as there is a valid match found in the available destination service accounts configured in AppProject
|
||||
ExpectConsistently(SyncStatusIs(v1alpha1.SyncStatusCodeSynced), WaitDuration, TimeoutDuration).
|
||||
Expect(OperationMessageContains("successfully synced"))
|
||||
}
|
||||
|
||||
func TestSyncWithImpersonateWithFalseServiceAccount(t *testing.T) {
|
||||
func TestSyncWithMissingServiceAccount(t *testing.T) {
|
||||
projectName := "false-test-project"
|
||||
serviceAccountName := "test-account"
|
||||
roleName := "test-account-sa-role"
|
||||
@@ -135,7 +120,7 @@ func TestSyncWithImpersonateWithFalseServiceAccount(t *testing.T) {
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
DefaultServiceAccount: "false-serviceAccount",
|
||||
DefaultServiceAccount: "missing-serviceAccount",
|
||||
},
|
||||
{
|
||||
Server: "*",
|
||||
@@ -169,11 +154,15 @@ func TestSyncWithImpersonateWithFalseServiceAccount(t *testing.T) {
|
||||
app.Spec.Project = projectName
|
||||
}).
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeOutOfSync))
|
||||
// With the impersonation feature enabled, Application sync must fail
|
||||
// when there is a valid match found in the available destination service accounts configured in AppProject,
|
||||
// but the matching service account is missing.
|
||||
ExpectConsistently(SyncStatusIs(v1alpha1.SyncStatusCodeOutOfSync), WaitDuration, TimeoutDuration).
|
||||
Expect(OperationMessageContains("one or more objects failed to apply"))
|
||||
}
|
||||
|
||||
func TestSyncWithNegationApplicationDestinationNamespace(t *testing.T) {
|
||||
projectName := "nagation-test-project"
|
||||
func TestSyncWithValidSAButDisallowedDestination(t *testing.T) {
|
||||
projectName := "negation-test-project"
|
||||
serviceAccountName := "test-account"
|
||||
roleName := "test-account-sa-role"
|
||||
Given(t).
|
||||
@@ -217,6 +206,7 @@ func TestSyncWithNegationApplicationDestinationNamespace(t *testing.T) {
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeSynced)).
|
||||
When().
|
||||
And(func() {
|
||||
// Patch destination to disallow target destination namespace
|
||||
patch := []byte(fmt.Sprintf(`{"spec": {"destinations": [{"namespace": "%s"}]}}`, "!"+fixture.DeploymentNamespace()))
|
||||
|
||||
_, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.TestNamespace()).Patch(context.Background(), projectName, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
@@ -224,7 +214,10 @@ func TestSyncWithNegationApplicationDestinationNamespace(t *testing.T) {
|
||||
}).
|
||||
Refresh(v1alpha1.RefreshTypeNormal).
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeUnknown))
|
||||
// With the impersonation feature enabled, Application sync must fail
|
||||
// as there is a valid match found in the available destination service accounts configured in AppProject
|
||||
// but the destination namespace is now disallowed.
|
||||
ExpectConsistently(SyncStatusIs(v1alpha1.SyncStatusCodeUnknown), WaitDuration, TimeoutDuration)
|
||||
}
|
||||
|
||||
// createTestAppProject creates a test AppProject resource.
|
||||
|
||||
@@ -0,0 +1,125 @@
|
||||
import * as moment from 'moment';
|
||||
import * as React from 'react';
|
||||
import * as models from '../../../shared/models';
|
||||
import './application-deployment-history.scss';
|
||||
import {DataLoader} from 'argo-ui';
|
||||
import {Revision} from '../../../shared/components';
|
||||
import {services} from '../../../shared/services';
|
||||
import {ApplicationParameters} from '../application-parameters/application-parameters';
|
||||
import {RevisionMetadataRows} from './revision-metadata-rows';
|
||||
|
||||
type props = {
|
||||
app: models.Application;
|
||||
info: models.RevisionHistory;
|
||||
index: number;
|
||||
};
|
||||
|
||||
export const ApplicationDeploymentHistoryDetails = ({app, info, index}: props) => {
|
||||
const deployments = (app.status.history || []).slice().reverse();
|
||||
const recentDeployments = deployments.map((info, i) => {
|
||||
const nextDeployedAt = i === 0 ? null : deployments[i - 1].deployedAt;
|
||||
const runEnd = nextDeployedAt ? moment(nextDeployedAt) : moment();
|
||||
return {...info, nextDeployedAt, durationMs: runEnd.diff(moment(info.deployedAt)) / 1000};
|
||||
});
|
||||
|
||||
const [showParameterDetails, setShowParameterDetails] = React.useState(Boolean);
|
||||
|
||||
return (
|
||||
<>
|
||||
{info.sources === undefined ? (
|
||||
<React.Fragment>
|
||||
<div>
|
||||
<div className='row'>
|
||||
<div className='columns small-3'>Revision:</div>
|
||||
<div className='columns small-9'>
|
||||
<Revision repoUrl={info.source.repoURL} revision={info.revision} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<RevisionMetadataRows
|
||||
applicationName={app.metadata.name}
|
||||
applicationNamespace={app.metadata.namespace}
|
||||
source={{...recentDeployments[index].source, targetRevision: recentDeployments[index].revision}}
|
||||
index={0}
|
||||
versionId={recentDeployments[index].id}
|
||||
/>
|
||||
<button
|
||||
type='button'
|
||||
className='argo-button argo-button--base application-deployment-history__show-parameter-details'
|
||||
onClick={() => setShowParameterDetails(!showParameterDetails)}>
|
||||
{showParameterDetails ? 'Hide details' : 'Show details'}
|
||||
</button>
|
||||
|
||||
{showParameterDetails && (
|
||||
<DataLoader
|
||||
input={{...recentDeployments[index].source, targetRevision: recentDeployments[index].revision, appName: app.metadata.name}}
|
||||
load={src => services.repos.appDetails(src, src.appName, app.spec.project, 0, recentDeployments[index].id)}>
|
||||
{(details: models.RepoAppDetails) => (
|
||||
<div>
|
||||
<ApplicationParameters
|
||||
application={{
|
||||
...app,
|
||||
spec: {...app.spec, source: recentDeployments[index].source}
|
||||
}}
|
||||
details={details}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</DataLoader>
|
||||
)}
|
||||
</React.Fragment>
|
||||
) : (
|
||||
info.sources.map((source, i) => (
|
||||
<React.Fragment key={`${index}_${i}`}>
|
||||
{i > 0 ? <div className='separator' /> : null}
|
||||
<div>
|
||||
<div className='row'>
|
||||
<div className='columns small-3'>Revision:</div>
|
||||
<div className='columns small-9'>
|
||||
<Revision repoUrl={source.repoURL} revision={info.revisions[i]} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<RevisionMetadataRows
|
||||
applicationName={app.metadata.name}
|
||||
applicationNamespace={app.metadata.namespace}
|
||||
source={{...source, targetRevision: recentDeployments[index].revisions[i]}}
|
||||
index={i}
|
||||
versionId={recentDeployments[index].id}
|
||||
/>
|
||||
<button
|
||||
type='button'
|
||||
className='argo-button argo-button--base application-deployment-history__show-parameter-details'
|
||||
onClick={() => setShowParameterDetails(!showParameterDetails)}>
|
||||
{showParameterDetails ? 'Hide details' : 'Show details'}
|
||||
</button>
|
||||
|
||||
{showParameterDetails && (
|
||||
<DataLoader
|
||||
input={{
|
||||
...source,
|
||||
targetRevision: recentDeployments[index].revisions[i],
|
||||
index: i,
|
||||
versionId: recentDeployments[index].id,
|
||||
appName: app.metadata.name
|
||||
}}
|
||||
load={src => services.repos.appDetails(src, src.appName, app.spec.project, i, recentDeployments[index].id)}>
|
||||
{(details: models.RepoAppDetails) => (
|
||||
<div>
|
||||
<ApplicationParameters
|
||||
application={{
|
||||
...app,
|
||||
spec: {...app.spec, source}
|
||||
}}
|
||||
details={details}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</DataLoader>
|
||||
)}
|
||||
</React.Fragment>
|
||||
))
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
@@ -1,22 +1,18 @@
|
||||
import {DataLoader, DropDownMenu, Duration} from 'argo-ui';
|
||||
import {DropDownMenu, Duration} from 'argo-ui';
|
||||
import {InitiatedBy} from './initiated-by';
|
||||
import * as moment from 'moment';
|
||||
import * as React from 'react';
|
||||
import {Revision, Timestamp} from '../../../shared/components';
|
||||
import {Timestamp} from '../../../shared/components';
|
||||
import * as models from '../../../shared/models';
|
||||
import {services} from '../../../shared/services';
|
||||
import {ApplicationParameters} from '../application-parameters/application-parameters';
|
||||
import {RevisionMetadataRows} from './revision-metadata-rows';
|
||||
import './application-deployment-history.scss';
|
||||
import {ApplicationDeploymentHistoryDetails} from './application-deployment-history-details';
|
||||
|
||||
export const ApplicationDeploymentHistory = ({
|
||||
app,
|
||||
rollbackApp,
|
||||
selectedRollbackDeploymentIndex,
|
||||
selectDeployment
|
||||
}: {
|
||||
app: models.Application;
|
||||
selectedRollbackDeploymentIndex: number;
|
||||
rollbackApp: (info: models.RevisionHistory) => any;
|
||||
selectDeployment: (index: number) => any;
|
||||
}) => {
|
||||
@@ -27,8 +23,6 @@ export const ApplicationDeploymentHistory = ({
|
||||
return {...info, nextDeployedAt, durationMs: runEnd.diff(moment(info.deployedAt)) / 1000};
|
||||
});
|
||||
|
||||
const [showParameterDetails, setShowParameterDetails] = React.useState(Boolean);
|
||||
|
||||
return (
|
||||
<div className='application-deployment-history'>
|
||||
{recentDeployments.map((info, index) => (
|
||||
@@ -78,104 +72,8 @@ export const ApplicationDeploymentHistory = ({
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{selectedRollbackDeploymentIndex === index ? (
|
||||
info.sources === undefined ? (
|
||||
<React.Fragment>
|
||||
<div>
|
||||
<div className='row'>
|
||||
<div className='columns small-3'>Revision:</div>
|
||||
<div className='columns small-9'>
|
||||
<Revision repoUrl={info.source.repoURL} revision={info.revision} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<RevisionMetadataRows
|
||||
applicationName={app.metadata.name}
|
||||
applicationNamespace={app.metadata.namespace}
|
||||
source={{...recentDeployments[index].source, targetRevision: recentDeployments[index].revision}}
|
||||
index={0}
|
||||
versionId={recentDeployments[index].id}
|
||||
/>
|
||||
<button
|
||||
type='button'
|
||||
className='argo-button argo-button--base application-deployment-history__show-parameter-details'
|
||||
onClick={() => setShowParameterDetails(!showParameterDetails)}>
|
||||
{showParameterDetails ? 'Hide details' : 'Show details'}
|
||||
</button>
|
||||
|
||||
{showParameterDetails && (
|
||||
<DataLoader
|
||||
input={{...recentDeployments[index].source, targetRevision: recentDeployments[index].revision, appName: app.metadata.name}}
|
||||
load={src => services.repos.appDetails(src, src.appName, app.spec.project, 0, recentDeployments[index].id)}>
|
||||
{(details: models.RepoAppDetails) => (
|
||||
<div>
|
||||
<ApplicationParameters
|
||||
application={{
|
||||
...app,
|
||||
spec: {...app.spec, source: recentDeployments[index].source}
|
||||
}}
|
||||
details={details}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</DataLoader>
|
||||
)}
|
||||
</React.Fragment>
|
||||
) : (
|
||||
info.sources.map((source, i) => (
|
||||
<React.Fragment key={`${index}_${i}`}>
|
||||
{i > 0 ? <div className='separator' /> : null}
|
||||
<div>
|
||||
<div className='row'>
|
||||
<div className='columns small-3'>Revision:</div>
|
||||
<div className='columns small-9'>
|
||||
<Revision repoUrl={source.repoURL} revision={info.revisions[i]} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<RevisionMetadataRows
|
||||
applicationName={app.metadata.name}
|
||||
applicationNamespace={app.metadata.namespace}
|
||||
source={{...source, targetRevision: recentDeployments[index].revisions[i]}}
|
||||
index={i}
|
||||
versionId={recentDeployments[index].id}
|
||||
/>
|
||||
<button
|
||||
type='button'
|
||||
className='argo-button argo-button--base application-deployment-history__show-parameter-details'
|
||||
onClick={() => setShowParameterDetails(!showParameterDetails)}>
|
||||
{showParameterDetails ? 'Hide details' : 'Show details'}
|
||||
</button>
|
||||
|
||||
{showParameterDetails && (
|
||||
<DataLoader
|
||||
input={{
|
||||
...source,
|
||||
targetRevision: recentDeployments[index].revisions[i],
|
||||
index: i,
|
||||
versionId: recentDeployments[index].id,
|
||||
appName: app.metadata.name
|
||||
}}
|
||||
load={src => services.repos.appDetails(src, src.appName, app.spec.project, i, recentDeployments[index].id)}>
|
||||
{(details: models.RepoAppDetails) => (
|
||||
<div>
|
||||
<ApplicationParameters
|
||||
application={{
|
||||
...app,
|
||||
spec: {...app.spec, source}
|
||||
}}
|
||||
details={details}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</DataLoader>
|
||||
)}
|
||||
</React.Fragment>
|
||||
))
|
||||
)
|
||||
) : (
|
||||
<p>Click to see source details.</p>
|
||||
)}
|
||||
<ApplicationDeploymentHistoryDetails index={index} info={info} app={app} />
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
|
||||
@@ -5,7 +5,7 @@ import {ApplicationSource, RevisionMetadata, ChartDetails} from '../../../shared
|
||||
import {services} from '../../../shared/services';
|
||||
|
||||
export const RevisionMetadataRows = (props: {applicationName: string; applicationNamespace: string; source: ApplicationSource; index: number; versionId: number}) => {
|
||||
if (props.source.chart) {
|
||||
if (props?.source?.chart) {
|
||||
return (
|
||||
<DataLoader
|
||||
input={props}
|
||||
|
||||
@@ -855,7 +855,6 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{app
|
||||
{this.selectedRollbackDeploymentIndex > -1 && (
|
||||
<ApplicationDeploymentHistory
|
||||
app={application}
|
||||
selectedRollbackDeploymentIndex={this.selectedRollbackDeploymentIndex}
|
||||
rollbackApp={info => this.rollbackApplication(info, application)}
|
||||
selectDeployment={i => this.setRollbackPanelVisible(i)}
|
||||
/>
|
||||
@@ -920,18 +919,20 @@ export class ApplicationDetails extends React.Component<RouteComponentProps<{app
|
||||
{
|
||||
iconClassName: 'fa fa-info-circle',
|
||||
title: <ActionMenuItem actionLabel='Details' />,
|
||||
action: () => this.selectNode(fullName)
|
||||
action: () => this.selectNode(fullName),
|
||||
disabled: !app.spec.source
|
||||
},
|
||||
{
|
||||
iconClassName: 'fa fa-file-medical',
|
||||
title: <ActionMenuItem actionLabel='Diff' />,
|
||||
action: () => this.selectNode(fullName, 0, 'diff'),
|
||||
disabled: app.status.sync.status === appModels.SyncStatuses.Synced
|
||||
disabled: app.status.sync.status === appModels.SyncStatuses.Synced || !app.spec.source
|
||||
},
|
||||
{
|
||||
iconClassName: 'fa fa-sync',
|
||||
title: <ActionMenuItem actionLabel='Sync' />,
|
||||
action: () => AppUtils.showDeploy('all', null, this.appContext.apis)
|
||||
action: () => AppUtils.showDeploy('all', null, this.appContext.apis),
|
||||
disabled: !app.spec.source
|
||||
},
|
||||
{
|
||||
iconClassName: 'fa fa-info-circle',
|
||||
|
||||
@@ -644,23 +644,24 @@ function gatherCoreSourceDetails(i: number, attributes: EditablePanelItem[], sou
|
||||
)
|
||||
});
|
||||
} else {
|
||||
const targetRevision = source ? source.targetRevision || 'HEAD' : 'Unknown';
|
||||
attributes.push({
|
||||
title: 'TARGET REVISION',
|
||||
view: <Revision repoUrl={source.repoURL} revision={source.targetRevision || 'HEAD'} />,
|
||||
edit: (formApi: FormApi) => <RevisionFormField helpIconTop={'0'} hideLabel={true} formApi={formApi} repoURL={source.repoURL} fieldValue={revisionField} />
|
||||
view: <Revision repoUrl={source?.repoURL} revision={targetRevision} />,
|
||||
edit: (formApi: FormApi) => <RevisionFormField helpIconTop={'0'} hideLabel={true} formApi={formApi} repoURL={source?.repoURL} fieldValue={revisionField} />
|
||||
});
|
||||
attributes.push({
|
||||
title: 'PATH',
|
||||
view: (
|
||||
<Revision repoUrl={source.repoURL} revision={source.targetRevision || 'HEAD'} path={source.path} isForPath={true}>
|
||||
{processPath(source.path)}
|
||||
<Revision repoUrl={source?.repoURL} revision={targetRevision} path={source?.path} isForPath={true}>
|
||||
{processPath(source?.path)}
|
||||
</Revision>
|
||||
),
|
||||
edit: (formApi: FormApi) => <FormField formApi={formApi} field={sourcesPathField} component={Text} />
|
||||
});
|
||||
attributes.push({
|
||||
title: 'REF',
|
||||
view: <span>{source.ref}</span>,
|
||||
view: <span>{source?.ref}</span>,
|
||||
edit: (formApi: FormApi) => <FormField formApi={formApi} field={refField} component={Text} />
|
||||
});
|
||||
}
|
||||
|
||||
@@ -104,37 +104,37 @@ export const SourcePanel = (props: {
|
||||
}
|
||||
});
|
||||
}
|
||||
if (a.spec.source.repoURL && a.spec.source.chart) {
|
||||
if (a.spec?.source?.repoURL && a.spec?.source?.chart) {
|
||||
props.appCurrent.spec.sources.forEach(source => {
|
||||
if (
|
||||
source.repoURL === a.spec.source.repoURL &&
|
||||
source.chart === a.spec.source.chart &&
|
||||
source.targetRevision === a.spec.source.targetRevision
|
||||
source?.repoURL === a.spec?.source?.repoURL &&
|
||||
source?.chart === a.spec?.source?.chart &&
|
||||
source?.targetRevision === a.spec?.source?.targetRevision
|
||||
) {
|
||||
sameChartVersion = true;
|
||||
chartError =
|
||||
'Version ' +
|
||||
source.targetRevision +
|
||||
source?.targetRevision +
|
||||
' of chart ' +
|
||||
source.chart +
|
||||
source?.chart +
|
||||
' from the selected repository was already added to this multi-source application';
|
||||
}
|
||||
});
|
||||
}
|
||||
if (!samePath) {
|
||||
if (!a.spec.source.path && !a.spec.source.chart && !a.spec.source.ref) {
|
||||
if (!a.spec?.source?.path && !a.spec?.source?.chart && !a.spec?.source?.ref) {
|
||||
pathError = 'Path or Ref is required';
|
||||
}
|
||||
}
|
||||
if (!sameChartVersion) {
|
||||
if (!a.spec.source.chart && !a.spec.source.path && !a.spec.source.ref) {
|
||||
if (!a.spec?.source?.chart && !a.spec?.source?.path && !a.spec?.source?.ref) {
|
||||
chartError = 'Chart is required';
|
||||
}
|
||||
}
|
||||
return {
|
||||
'spec.source.repoURL': !a.spec.source.repoURL && 'Repository URL is required',
|
||||
'spec.source.repoURL': !a.spec?.source?.repoURL && 'Repository URL is required',
|
||||
// eslint-disable-next-line no-prototype-builtins
|
||||
'spec.source.targetRevision': !a.spec.source.targetRevision && a.spec.source.hasOwnProperty('chart') && 'Version is required',
|
||||
'spec.source.targetRevision': !a.spec?.source?.targetRevision && a.spec?.source?.hasOwnProperty('chart') && 'Version is required',
|
||||
'spec.source.path': pathError,
|
||||
'spec.source.chart': chartError
|
||||
};
|
||||
@@ -157,8 +157,8 @@ export const SourcePanel = (props: {
|
||||
getApi={props.getFormApi}>
|
||||
{api => {
|
||||
// eslint-disable-next-line no-prototype-builtins
|
||||
const repoType = (api.getFormState().values.spec.source.hasOwnProperty('chart') && 'helm') || 'git';
|
||||
const repoInfo = reposInfo.find(info => info.repo === api.getFormState().values.spec.source.repoURL);
|
||||
const repoType = (api.getFormState().values.spec?.source?.hasOwnProperty('chart') && 'helm') || 'git';
|
||||
const repoInfo = reposInfo.find(info => info.repo === api.getFormState().values.spec?.source?.repoURL);
|
||||
if (repoInfo) {
|
||||
normalizeAppSource(appInEdit, repoInfo.type || 'git');
|
||||
}
|
||||
@@ -206,12 +206,12 @@ export const SourcePanel = (props: {
|
||||
</div>
|
||||
{(repoType === 'git' && (
|
||||
<React.Fragment>
|
||||
<RevisionFormField formApi={api} helpIconTop={'2.5em'} repoURL={api.getFormState().values.spec.source.repoURL} />
|
||||
<RevisionFormField formApi={api} helpIconTop={'2.5em'} repoURL={api.getFormState().values.spec?.source?.repoURL} />
|
||||
<div className='argo-form-row'>
|
||||
<DataLoader
|
||||
input={{
|
||||
repoURL: api.getFormState().values.spec.source.repoURL,
|
||||
revision: api.getFormState().values.spec.source.targetRevision
|
||||
repoURL: api.getFormState().values.spec?.source?.repoURL,
|
||||
revision: api.getFormState().values.spec?.source?.targetRevision
|
||||
}}
|
||||
load={async src =>
|
||||
(src.repoURL &&
|
||||
@@ -247,7 +247,7 @@ export const SourcePanel = (props: {
|
||||
new Array<models.HelmChart>()
|
||||
}>
|
||||
{(charts: models.HelmChart[]) => {
|
||||
const selectedChart = charts.find(chart => chart.name === api.getFormState().values.spec.source.chart);
|
||||
const selectedChart = charts.find(chart => chart.name === api.getFormState().values.spec?.source?.chart);
|
||||
return (
|
||||
<div className='row argo-form-row'>
|
||||
<div className='columns small-10'>
|
||||
@@ -284,15 +284,15 @@ export const SourcePanel = (props: {
|
||||
const typePanel = () => (
|
||||
<DataLoader
|
||||
input={{
|
||||
repoURL: appInEdit.spec.source.repoURL,
|
||||
path: appInEdit.spec.source.path,
|
||||
chart: appInEdit.spec.source.chart,
|
||||
targetRevision: appInEdit.spec.source.targetRevision,
|
||||
repoURL: appInEdit.spec?.source?.repoURL,
|
||||
path: appInEdit.spec?.source?.path,
|
||||
chart: appInEdit.spec?.source?.chart,
|
||||
targetRevision: appInEdit.spec?.source?.targetRevision,
|
||||
appName: appInEdit.metadata.name
|
||||
}}
|
||||
load={async src => {
|
||||
if (src.repoURL && src.targetRevision && (src.path || src.chart)) {
|
||||
return services.repos.appDetails(src, src.appName, props.appCurrent.spec.project, 0, 0).catch(() => ({
|
||||
if (src?.repoURL && src?.targetRevision && (src?.path || src?.chart)) {
|
||||
return services.repos.appDetails(src, src?.appName, props.appCurrent.spec?.project, 0, 0).catch(() => ({
|
||||
type: 'Directory',
|
||||
details: {}
|
||||
}));
|
||||
@@ -304,7 +304,7 @@ export const SourcePanel = (props: {
|
||||
}
|
||||
}}>
|
||||
{(details: models.RepoAppDetails) => {
|
||||
const type = (explicitPathType && explicitPathType.path === appInEdit.spec.source.path && explicitPathType.type) || details.type;
|
||||
const type = (explicitPathType && explicitPathType.path === appInEdit.spec?.source?.path && explicitPathType.type) || details.type;
|
||||
if (details.type !== type) {
|
||||
switch (type) {
|
||||
case 'Helm':
|
||||
@@ -337,7 +337,7 @@ export const SourcePanel = (props: {
|
||||
items={appTypes.map(item => ({
|
||||
title: item.type,
|
||||
action: () => {
|
||||
setExplicitPathType({type: item.type, path: appInEdit.spec.source.path});
|
||||
setExplicitPathType({type: item.type, path: appInEdit.spec?.source?.path});
|
||||
normalizeTypeFields(api, item.type);
|
||||
}
|
||||
}))}
|
||||
|
||||
@@ -112,7 +112,7 @@ export const ApplicationStatusPanel = ({application, showDiff, showOperation, sh
|
||||
application.status.sync &&
|
||||
(hasMultipleSources
|
||||
? application.status.sync.revisions && application.status.sync.revisions[0] && application.spec.sources && !application.spec.sources[0].chart
|
||||
: application.status.sync.revision && !application.spec.source.chart) && (
|
||||
: application.status.sync.revision && !application.spec?.source?.chart) && (
|
||||
<div className='application-status-panel__item-name'>
|
||||
<RevisionMetadataPanel
|
||||
appName={application.metadata.name}
|
||||
@@ -160,7 +160,7 @@ export const ApplicationStatusPanel = ({application, showDiff, showOperation, sh
|
||||
<RevisionMetadataPanel
|
||||
appName={application.metadata.name}
|
||||
appNamespace={application.metadata.namespace}
|
||||
type={source.chart && 'helm'}
|
||||
type={source?.chart && 'helm'}
|
||||
revision={operationStateRevision}
|
||||
versionId={utils.getAppCurrentVersion(application)}
|
||||
/>
|
||||
|
||||
@@ -172,7 +172,7 @@ export const ApplicationSummary = (props: ApplicationSummaryProps) => {
|
||||
},
|
||||
!hasMultipleSources && {
|
||||
title: 'REPO URL',
|
||||
view: <Repo url={source.repoURL} />,
|
||||
view: <Repo url={source?.repoURL} />,
|
||||
edit: (formApi: FormApi) => <FormField formApi={formApi} field='spec.source.repoURL' component={Text} />
|
||||
},
|
||||
...(!hasMultipleSources
|
||||
@@ -180,11 +180,7 @@ export const ApplicationSummary = (props: ApplicationSummaryProps) => {
|
||||
? [
|
||||
{
|
||||
title: 'CHART',
|
||||
view: (
|
||||
<span>
|
||||
{source.chart}:{source.targetRevision}
|
||||
</span>
|
||||
),
|
||||
view: <span>{source && `${source.chart}:${source.targetRevision}`}</span>,
|
||||
edit: (formApi: FormApi) =>
|
||||
hasMultipleSources ? (
|
||||
helpTip('CHART is not editable for applications with multiple sources. You can edit them in the "Manifest" tab.')
|
||||
|
||||
@@ -5,7 +5,7 @@ import {ApplicationSource as ApplicationSourceType} from '../../../shared/models
|
||||
import './applications-source.scss';
|
||||
|
||||
export const ApplicationsSource = ({source}: {source: ApplicationSourceType}) => {
|
||||
const sourceString = `${source.repoURL}/${source.path || source.chart}`;
|
||||
const sourceString = source ? `${source.repoURL}/${source.path || source.chart}` : '';
|
||||
return (
|
||||
<Tooltip content={sourceString}>
|
||||
<div className='application-source'>{sourceString}</div>
|
||||
|
||||
@@ -108,6 +108,7 @@ export const ApplicationTiles = ({applications, syncApplication, refreshApplicat
|
||||
<div className='applications-tiles argo-table-list argo-table-list--clickable' ref={appContainerRef}>
|
||||
{applications.map((app, i) => {
|
||||
const source = getAppDefaultSource(app);
|
||||
const targetRevision = source ? source.targetRevision || 'HEAD' : 'Unknown';
|
||||
return (
|
||||
<div
|
||||
key={AppUtils.appInstanceName(app)}
|
||||
@@ -126,7 +127,7 @@ export const ApplicationTiles = ({applications, syncApplication, refreshApplicat
|
||||
)} applications-tiles__item`}>
|
||||
<div className='row '>
|
||||
<div className={app.status.summary.externalURLs?.length > 0 ? 'columns small-10' : 'columns small-11'}>
|
||||
<i className={'icon argo-icon-' + (source.chart != null ? 'helm' : 'git')} />
|
||||
<i className={'icon argo-icon-' + (source?.chart != null ? 'helm' : 'git')} />
|
||||
<Tooltip content={AppUtils.appInstanceName(app)}>
|
||||
<span className='applications-list__title'>
|
||||
{AppUtils.appQualifiedName(app, useAuthSettingsCtx?.appsInAnyNamespaceEnabled)}
|
||||
@@ -208,8 +209,8 @@ export const ApplicationTiles = ({applications, syncApplication, refreshApplicat
|
||||
Repository:
|
||||
</div>
|
||||
<div className='columns small-9'>
|
||||
<Tooltip content={source.repoURL} zIndex={4}>
|
||||
<span>{source.repoURL}</span>
|
||||
<Tooltip content={source?.repoURL} zIndex={4}>
|
||||
<span>{source?.repoURL}</span>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
@@ -217,22 +218,22 @@ export const ApplicationTiles = ({applications, syncApplication, refreshApplicat
|
||||
<div className='columns small-3' title='Target Revision:'>
|
||||
Target Revision:
|
||||
</div>
|
||||
<div className='columns small-9'>{source.targetRevision || 'HEAD'}</div>
|
||||
<div className='columns small-9'>{targetRevision}</div>
|
||||
</div>
|
||||
{source.path && (
|
||||
{source?.path && (
|
||||
<div className='row'>
|
||||
<div className='columns small-3' title='Path:'>
|
||||
Path:
|
||||
</div>
|
||||
<div className='columns small-9'>{source.path}</div>
|
||||
<div className='columns small-9'>{source?.path}</div>
|
||||
</div>
|
||||
)}
|
||||
{source.chart && (
|
||||
{source?.chart && (
|
||||
<div className='row'>
|
||||
<div className='columns small-3' title='Chart:'>
|
||||
Chart:
|
||||
</div>
|
||||
<div className='columns small-9'>{source.chart}</div>
|
||||
<div className='columns small-9'>{source?.chart}</div>
|
||||
</div>
|
||||
)}
|
||||
<div className='row'>
|
||||
|
||||
@@ -746,10 +746,10 @@ export function renderResourceButtons(
|
||||
export function syncStatusMessage(app: appModels.Application) {
|
||||
const source = getAppDefaultSource(app);
|
||||
const revision = getAppDefaultSyncRevision(app);
|
||||
const rev = app.status.sync.revision || source.targetRevision || 'HEAD';
|
||||
let message = source.targetRevision || 'HEAD';
|
||||
const rev = app.status.sync.revision || (source ? source.targetRevision || 'HEAD' : 'Unknown');
|
||||
let message = source ? source?.targetRevision || 'HEAD' : 'Unknown';
|
||||
|
||||
if (revision) {
|
||||
if (revision && source) {
|
||||
if (source.chart) {
|
||||
message += ' (' + revision + ')';
|
||||
} else if (revision.length >= 7 && !revision.startsWith(source.targetRevision)) {
|
||||
@@ -993,23 +993,59 @@ export const OperationState = ({app, quiet}: {app: appModels.Application; quiet?
|
||||
);
|
||||
};
|
||||
|
||||
function isPodInitializedConditionTrue(status: any): boolean {
|
||||
if (!status?.conditions) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const condition of status.conditions) {
|
||||
if (condition.type !== 'Initialized') {
|
||||
continue;
|
||||
}
|
||||
return condition.status === 'True';
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// isPodPhaseTerminal returns true if the pod's phase is terminal.
|
||||
function isPodPhaseTerminal(phase: appModels.PodPhase): boolean {
|
||||
return phase === appModels.PodPhase.PodFailed || phase === appModels.PodPhase.PodSucceeded;
|
||||
}
|
||||
|
||||
export function getPodStateReason(pod: appModels.State): {message: string; reason: string; netContainerStatuses: any[]} {
|
||||
let reason = pod.status.phase;
|
||||
const podPhase = pod.status.phase;
|
||||
let reason = podPhase;
|
||||
let message = '';
|
||||
if (pod.status.reason) {
|
||||
reason = pod.status.reason;
|
||||
}
|
||||
|
||||
let initializing = false;
|
||||
|
||||
let netContainerStatuses = pod.status.initContainerStatuses || [];
|
||||
netContainerStatuses = netContainerStatuses.concat(pod.status.containerStatuses || []);
|
||||
|
||||
for (const condition of pod.status.conditions || []) {
|
||||
if (condition.type === 'PodScheduled' && condition.reason === 'SchedulingGated') {
|
||||
reason = 'SchedulingGated';
|
||||
}
|
||||
}
|
||||
|
||||
const initContainers: Record<string, any> = {};
|
||||
|
||||
for (const container of pod.spec.initContainers ?? []) {
|
||||
initContainers[container.name] = container;
|
||||
}
|
||||
|
||||
let initializing = false;
|
||||
for (const container of (pod.status.initContainerStatuses || []).slice().reverse()) {
|
||||
if (container.state.terminated && container.state.terminated.exitCode === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (container.started && initContainers[container.name].restartPolicy === 'Always') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (container.state.terminated) {
|
||||
if (container.state.terminated.reason) {
|
||||
reason = `Init:ExitCode:${container.state.terminated.exitCode}`;
|
||||
@@ -1027,7 +1063,7 @@ export function getPodStateReason(pod: appModels.State): {message: string; reaso
|
||||
break;
|
||||
}
|
||||
|
||||
if (!initializing) {
|
||||
if (!initializing || isPodInitializedConditionTrue(pod.status)) {
|
||||
let hasRunning = false;
|
||||
for (const container of pod.status.containerStatuses || []) {
|
||||
if (container.state.waiting && container.state.waiting.reason) {
|
||||
@@ -1059,7 +1095,7 @@ export function getPodStateReason(pod: appModels.State): {message: string; reaso
|
||||
if ((pod as any).metadata.deletionTimestamp && pod.status.reason === 'NodeLost') {
|
||||
reason = 'Unknown';
|
||||
message = '';
|
||||
} else if ((pod as any).metadata.deletionTimestamp) {
|
||||
} else if ((pod as any).metadata.deletionTimestamp && !isPodPhaseTerminal(podPhase)) {
|
||||
reason = 'Terminating';
|
||||
message = '';
|
||||
}
|
||||
@@ -1084,7 +1120,7 @@ export const getPodReadinessGatesState = (pod: appModels.State): {nonExistingCon
|
||||
for (const condition of podStatusConditions) {
|
||||
existingConditions.set(condition.type, true);
|
||||
// priority order of conditions
|
||||
// eg. if there are multiple conditions set with same name then the one which comes first is evaluated
|
||||
// e.g. if there are multiple conditions set with same name then the one which comes first is evaluated
|
||||
if (podConditions.has(condition.type)) {
|
||||
continue;
|
||||
}
|
||||
@@ -1131,10 +1167,10 @@ export function isAppNode(node: appModels.ResourceNode) {
|
||||
|
||||
export function getAppOverridesCount(app: appModels.Application) {
|
||||
const source = getAppDefaultSource(app);
|
||||
if (source.kustomize && source.kustomize.images) {
|
||||
if (source?.kustomize?.images) {
|
||||
return source.kustomize.images.length;
|
||||
}
|
||||
if (source.helm && source.helm.parameters) {
|
||||
if (source?.helm?.parameters) {
|
||||
return source.helm.parameters.length;
|
||||
}
|
||||
return 0;
|
||||
|
||||
14
ui/yarn.lock
14
ui/yarn.lock
@@ -4181,9 +4181,9 @@ domhandler@^4.0.0, domhandler@^4.2.0:
|
||||
domelementtype "^2.2.0"
|
||||
|
||||
dompurify@^2.2.8:
|
||||
version "2.3.6"
|
||||
resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-2.3.6.tgz#2e019d7d7617aacac07cbbe3d88ae3ad354cf875"
|
||||
integrity sha512-OFP2u/3T1R5CEgWCEONuJ1a5+MFKnOYpkywpUSxv/dj1LeBT1erK+JwM7zK0ROy2BRhqVCf0LRw/kHqKuMkVGg==
|
||||
version "2.5.6"
|
||||
resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-2.5.6.tgz#8402b501611eaa7fb3786072297fcbe2787f8592"
|
||||
integrity sha512-zUTaUBO8pY4+iJMPE1B9XlO2tXVYIcEA4SNGtvDELzTSCQO7RzH+j7S180BmhmJId78lqGU2z19vgVx2Sxs/PQ==
|
||||
|
||||
domutils@^2.5.2, domutils@^2.6.0:
|
||||
version "2.7.0"
|
||||
@@ -6121,7 +6121,7 @@ is-wsl@^2.2.0:
|
||||
isarray@0.0.1:
|
||||
version "0.0.1"
|
||||
resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
|
||||
integrity sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=
|
||||
integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==
|
||||
|
||||
isarray@^2.0.5:
|
||||
version "2.0.5"
|
||||
@@ -7642,9 +7642,9 @@ path-to-regexp@0.1.10:
|
||||
integrity sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==
|
||||
|
||||
path-to-regexp@^1.7.0:
|
||||
version "1.8.0"
|
||||
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.8.0.tgz#887b3ba9d84393e87a0a0b9f4cb756198b53548a"
|
||||
integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
|
||||
version "1.9.0"
|
||||
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.9.0.tgz#5dc0753acbf8521ca2e0f137b4578b917b10cf24"
|
||||
integrity sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==
|
||||
dependencies:
|
||||
isarray "0.0.1"
|
||||
|
||||
|
||||
@@ -173,9 +173,12 @@ func cmpSupports(ctx context.Context, pluginSockFilePath, appPath, repoPath, fil
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// if discovery is not configured, return the client without further checks
|
||||
if !cfg.IsDiscoveryConfigured {
|
||||
return conn, cmpClient, true
|
||||
// If discovery isn't configured but the plugin is named, then the plugin supports the repo.
|
||||
if namedPlugin {
|
||||
return conn, cmpClient, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
isSupported, isDiscoveryEnabled, err := matchRepositoryCMP(ctx, appPath, repoPath, cmpClient, env, tarExcludedGlobs)
|
||||
|
||||
@@ -631,14 +631,9 @@ func (m *nativeGitClient) lsRemote(revision string) (string, error) {
|
||||
revision = "HEAD"
|
||||
}
|
||||
|
||||
// Check if the revision is a valid semver constraint before attempting to resolve it
|
||||
if constraint, err := semver.NewConstraint(revision); err == nil {
|
||||
semverSha := m.resolveSemverRevision(constraint, refs)
|
||||
if semverSha != "" {
|
||||
return semverSha, nil
|
||||
}
|
||||
} else {
|
||||
log.Debugf("Revision '%s' is not a valid semver constraint, skipping semver resolution.", revision)
|
||||
semverSha := m.resolveSemverRevision(revision, refs)
|
||||
if semverSha != "" {
|
||||
return semverSha, nil
|
||||
}
|
||||
|
||||
// refToHash keeps a maps of remote refs to their hash
|
||||
@@ -684,18 +679,31 @@ func (m *nativeGitClient) lsRemote(revision string) (string, error) {
|
||||
|
||||
// If we get here, revision string had non hexadecimal characters (indicating its a branch, tag,
|
||||
// or symbolic ref) and we were unable to resolve it to a commit SHA.
|
||||
return "", fmt.Errorf("Unable to resolve '%s' to a commit SHA", revision)
|
||||
return "", fmt.Errorf("unable to resolve '%s' to a commit SHA", revision)
|
||||
}
|
||||
|
||||
// resolveSemverRevision is a part of the lsRemote method workflow.
|
||||
// When the user configure correctly the Git repository revision and the revision is a valid semver constraint
|
||||
// only the for loop in this function will run, otherwise the lsRemote loop will try to resolve the revision.
|
||||
// Some examples to illustrate the actual behavior, if:
|
||||
// * The revision is "v0.1.*"/"0.1.*" or "v0.1.2"/"0.1.2" and there's a tag matching that constraint only this function loop will run;
|
||||
// * The revision is "v0.1.*"/"0.1.*" or "0.1.2"/"0.1.2" and there is no tag matching that constraint this function loop and lsRemote loop will run for backward compatibility;
|
||||
// * The revision is "custom-tag" only the lsRemote loop will run because that revision is an invalid semver;
|
||||
// * The revision is "master-branch" only the lsRemote loop will run because that revision is an invalid semver;
|
||||
func (m *nativeGitClient) resolveSemverRevision(constraint *semver.Constraints, refs []*plumbing.Reference) string {
|
||||
// When the user correctly configures the Git repository revision, and that revision is a valid semver constraint, we
|
||||
// use this logic path rather than the standard lsRemote revision resolution loop.
|
||||
// Some examples to illustrate the actual behavior - if the revision is:
|
||||
// * "v0.1.2"/"0.1.2" or "v0.1"/"0.1", then this is not a constraint, it's a pinned version - so we fall back to the standard tag matching in the lsRemote loop.
|
||||
// * "v0.1.*"/"0.1.*", and there's a tag matching that constraint, then we find the latest matching version and return its commit hash.
|
||||
// * "v0.1.*"/"0.1.*", and there is *no* tag matching that constraint, then we fall back to the standard tag matching in the lsRemote loop.
|
||||
// * "custom-tag", only the lsRemote loop will run - because that revision is an invalid semver;
|
||||
// * "master-branch", only the lsRemote loop will run because that revision is an invalid semver;
|
||||
func (m *nativeGitClient) resolveSemverRevision(revision string, refs []*plumbing.Reference) string {
|
||||
if _, err := semver.NewVersion(revision); err == nil {
|
||||
// If the revision is a valid version, then we know it isn't a constraint; it's just a pin.
|
||||
// In which case, we should use standard tag resolution mechanisms.
|
||||
return ""
|
||||
}
|
||||
|
||||
constraint, err := semver.NewConstraint(revision)
|
||||
if err != nil {
|
||||
log.Debugf("Revision '%s' is not a valid semver constraint, skipping semver resolution.", revision)
|
||||
return ""
|
||||
}
|
||||
|
||||
maxVersion := semver.New(0, 0, 0, "", "")
|
||||
maxVersionHash := plumbing.ZeroHash
|
||||
for _, ref := range refs {
|
||||
@@ -723,6 +731,7 @@ func (m *nativeGitClient) resolveSemverRevision(constraint *semver.Constraints,
|
||||
return ""
|
||||
}
|
||||
|
||||
log.Debugf("Semver constraint '%s' resolved to tag '%s', at reference '%s'", revision, maxVersion.Original(), maxVersionHash.String())
|
||||
return maxVersionHash.String()
|
||||
}
|
||||
|
||||
|
||||
@@ -173,6 +173,148 @@ func Test_ChangedFiles(t *testing.T) {
|
||||
assert.ElementsMatch(t, []string{"README"}, changedFiles)
|
||||
}
|
||||
|
||||
func Test_SemverTags(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
client, err := NewClientExt(fmt.Sprintf("file://%s", tempDir), tempDir, NopCreds{}, true, false, "", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
mapTagRefs := map[string]string{}
|
||||
for _, tag := range []string{
|
||||
"v1.0.0-rc1",
|
||||
"v1.0.0-rc2",
|
||||
"v1.0.0",
|
||||
"v1.0",
|
||||
"v1.0.1",
|
||||
"v1.1.0",
|
||||
"2024-apple",
|
||||
"2024-banana",
|
||||
} {
|
||||
err = runCmd(client.Root(), "git", "commit", "-m", tag+" commit", "--allow-empty")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create an rc semver tag
|
||||
err = runCmd(client.Root(), "git", "tag", tag)
|
||||
require.NoError(t, err)
|
||||
|
||||
sha, err := client.LsRemote("HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
mapTagRefs[tag] = sha
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
ref string
|
||||
expected string
|
||||
error bool
|
||||
}{{
|
||||
name: "pinned rc version",
|
||||
ref: "v1.0.0-rc1",
|
||||
expected: mapTagRefs["v1.0.0-rc1"],
|
||||
}, {
|
||||
name: "lt rc constraint",
|
||||
ref: "< v1.0.0-rc3",
|
||||
expected: mapTagRefs["v1.0.0-rc2"],
|
||||
}, {
|
||||
name: "pinned major version",
|
||||
ref: "v1.0.0",
|
||||
expected: mapTagRefs["v1.0.0"],
|
||||
}, {
|
||||
name: "pinned patch version",
|
||||
ref: "v1.0.1",
|
||||
expected: mapTagRefs["v1.0.1"],
|
||||
}, {
|
||||
name: "pinned minor version",
|
||||
ref: "v1.1.0",
|
||||
expected: mapTagRefs["v1.1.0"],
|
||||
}, {
|
||||
name: "patch wildcard constraint",
|
||||
ref: "v1.0.*",
|
||||
expected: mapTagRefs["v1.0.1"],
|
||||
}, {
|
||||
name: "patch tilde constraint",
|
||||
ref: "~v1.0.0",
|
||||
expected: mapTagRefs["v1.0.1"],
|
||||
}, {
|
||||
name: "minor wildcard constraint",
|
||||
ref: "v1.*",
|
||||
expected: mapTagRefs["v1.1.0"],
|
||||
}, {
|
||||
// The semver library allows for using both * and x as the wildcard modifier.
|
||||
name: "alternative minor wildcard constraint",
|
||||
ref: "v1.x",
|
||||
expected: mapTagRefs["v1.1.0"],
|
||||
}, {
|
||||
name: "minor gte constraint",
|
||||
ref: ">= v1.0.0",
|
||||
expected: mapTagRefs["v1.1.0"],
|
||||
}, {
|
||||
name: "multiple constraints",
|
||||
ref: "> v1.0.0 < v1.1.0",
|
||||
expected: mapTagRefs["v1.0.1"],
|
||||
}, {
|
||||
// We treat non-specific semver versions as regular tags, rather than constraints.
|
||||
name: "non-specific version",
|
||||
ref: "v1.0",
|
||||
expected: mapTagRefs["v1.0"],
|
||||
}, {
|
||||
// Which means a missing tag will raise an error.
|
||||
name: "missing non-specific version",
|
||||
ref: "v1.1",
|
||||
error: true,
|
||||
}, {
|
||||
// This is NOT a semver constraint, so it should always resolve to itself - because specifying a tag should
|
||||
// return the commit for that tag.
|
||||
// semver/v3 has the unfortunate semver-ish behaviour where any tag starting with a number is considered to be
|
||||
// "semver-ish", where that number is the semver major version, and the rest then gets coerced into a beta
|
||||
// version string. This can cause unexpected behaviour with constraints logic.
|
||||
// In this case, if the tag is being incorrectly coerced into semver (for being semver-ish), it will incorrectly
|
||||
// return the commit for the 2024-banana tag; which we want to avoid.
|
||||
name: "apple non-semver tag",
|
||||
ref: "2024-apple",
|
||||
expected: mapTagRefs["2024-apple"],
|
||||
}, {
|
||||
name: "banana non-semver tag",
|
||||
ref: "2024-banana",
|
||||
expected: mapTagRefs["2024-banana"],
|
||||
}, {
|
||||
// A semver version (without constraints) should ONLY match itself.
|
||||
// We do not want "2024-apple" to get "semver-ish'ed" into matching "2024.0.0-apple"; they're different tags.
|
||||
name: "no semver tag coercion",
|
||||
ref: "2024.0.0-apple",
|
||||
error: true,
|
||||
}, {
|
||||
// No minor versions are specified, so we would expect a major version of 2025 or more.
|
||||
// This is because if we specify > 11 in semver, we would not expect 11.1.0 to pass; it should be 12.0.0 or more.
|
||||
// Similarly, if we were to specify > 11.0, we would expect 11.1.0 or more.
|
||||
name: "semver constraints on non-semver tags",
|
||||
ref: "> 2024-apple",
|
||||
error: true,
|
||||
}, {
|
||||
// However, if one specifies the minor/patch versions, semver constraints can be used to match non-semver tags.
|
||||
// 2024-banana is considered as "2024.0.0-banana" in semver-ish, and banana > apple, so it's a match.
|
||||
// Note: this is more for documentation and future reference than real testing, as it seems like quite odd behaviour.
|
||||
name: "semver constraints on non-semver tags",
|
||||
ref: "> 2024.0.0-apple",
|
||||
expected: mapTagRefs["2024-banana"],
|
||||
}} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
commitSHA, err := client.LsRemote(tc.ref)
|
||||
if tc.error {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.True(t, IsCommitSHA(commitSHA))
|
||||
assert.Equal(t, tc.expected, commitSHA)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_nativeGitClient_Submodule(t *testing.T) {
|
||||
tempDir, err := os.MkdirTemp("", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -233,15 +233,10 @@ func TestLsRemote(t *testing.T) {
|
||||
expectedCommit: "ff87d8cb9e669d3738434733ecba3c6dd2c64d70",
|
||||
},
|
||||
{
|
||||
name: "should resolve a pined tag with semantic versioning",
|
||||
name: "should resolve a pinned tag with semantic versioning",
|
||||
revision: "v0.8.0",
|
||||
expectedCommit: "d7c04ae24c16f8ec611b0331596fbc595537abe9",
|
||||
},
|
||||
{
|
||||
name: "should resolve a pined tag with semantic versioning without the 'v' prefix",
|
||||
revision: "0.8.0",
|
||||
expectedCommit: "d7c04ae24c16f8ec611b0331596fbc595537abe9",
|
||||
},
|
||||
{
|
||||
name: "should resolve a range tag with semantic versioning",
|
||||
revision: "v0.8.*", // it should resolve to v0.8.2
|
||||
@@ -299,7 +294,7 @@ func TestLsRemote(t *testing.T) {
|
||||
|
||||
for _, revision := range xfail {
|
||||
_, err := clnt.LsRemote(revision)
|
||||
assert.ErrorContains(t, err, "Unable to resolve")
|
||||
assert.ErrorContains(t, err, "unable to resolve")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Match tries to match a text with a given glob pattern.
|
||||
func Match(pattern, text string, separators ...rune) bool {
|
||||
compiledGlob, err := glob.Compile(pattern, separators...)
|
||||
if err != nil {
|
||||
@@ -13,3 +14,13 @@ func Match(pattern, text string, separators ...rune) bool {
|
||||
}
|
||||
return compiledGlob.Match(text)
|
||||
}
|
||||
|
||||
// MatchWithError tries to match a text with a given glob pattern.
|
||||
// returns error if the glob pattern fails to compile.
|
||||
func MatchWithError(pattern, text string, separators ...rune) (bool, error) {
|
||||
compiledGlob, err := glob.Compile(pattern, separators...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return compiledGlob.Match(text), nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package glob
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_Match(t *testing.T) {
|
||||
@@ -24,7 +24,7 @@ func Test_Match(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res := Match(tt.pattern, tt.input)
|
||||
assert.Equal(t, tt.result, res)
|
||||
require.Equal(t, tt.result, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -53,7 +53,36 @@ func Test_MatchList(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res := MatchStringInList(tt.list, tt.input, tt.patternMatch)
|
||||
assert.Equal(t, tt.result, res)
|
||||
require.Equal(t, tt.result, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MatchWithError(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
pattern string
|
||||
result bool
|
||||
expectedErr string
|
||||
}{
|
||||
{"Exact match", "hello", "hello", true, ""},
|
||||
{"Non-match exact", "hello", "hell", false, ""},
|
||||
{"Long glob match", "hello", "hell*", true, ""},
|
||||
{"Short glob match", "hello", "h*", true, ""},
|
||||
{"Glob non-match", "hello", "e*", false, ""},
|
||||
{"Invalid pattern", "e[[a*", "e[[a*", false, "unexpected end of input"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res, err := MatchWithError(tt.pattern, tt.input)
|
||||
require.Equal(t, tt.result, res)
|
||||
if tt.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.ErrorContains(t, err, tt.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,13 +141,11 @@ func (vm VM) GetHealthScript(obj *unstructured.Unstructured) (string, bool, erro
|
||||
return script.HealthLua, script.UseOpenLibs, nil
|
||||
}
|
||||
|
||||
// if not found as is, perhaps it matches wildcard entries in the configmap
|
||||
wildcardKey := GetWildcardConfigMapKey(vm, obj.GroupVersionKind())
|
||||
// if not found as is, perhaps it matches a wildcard entry in the configmap
|
||||
getWildcardHealthOverride, useOpenLibs := getWildcardHealthOverrideLua(vm.ResourceOverrides, obj.GroupVersionKind())
|
||||
|
||||
if wildcardKey != "" {
|
||||
if wildcardScript, ok := vm.ResourceOverrides[wildcardKey]; ok && wildcardScript.HealthLua != "" {
|
||||
return wildcardScript.HealthLua, wildcardScript.UseOpenLibs, nil
|
||||
}
|
||||
if getWildcardHealthOverride != "" {
|
||||
return getWildcardHealthOverride, useOpenLibs, nil
|
||||
}
|
||||
|
||||
// if not found in the ResourceOverrides at all, search it as is in the built-in scripts
|
||||
@@ -426,15 +424,18 @@ func GetConfigMapKey(gvk schema.GroupVersionKind) string {
|
||||
return fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind)
|
||||
}
|
||||
|
||||
func GetWildcardConfigMapKey(vm VM, gvk schema.GroupVersionKind) string {
|
||||
// getWildcardHealthOverrideLua returns the first encountered resource override which matches the wildcard and has a
|
||||
// non-empty health script. Having multiple wildcards with non-empty health checks that can match the GVK is
|
||||
// non-deterministic.
|
||||
func getWildcardHealthOverrideLua(overrides map[string]appv1.ResourceOverride, gvk schema.GroupVersionKind) (string, bool) {
|
||||
gvkKeyToMatch := GetConfigMapKey(gvk)
|
||||
|
||||
for key := range vm.ResourceOverrides {
|
||||
if glob.Match(key, gvkKeyToMatch) {
|
||||
return key
|
||||
for key, override := range overrides {
|
||||
if glob.Match(key, gvkKeyToMatch) && override.HealthLua != "" {
|
||||
return override.HealthLua, override.UseOpenLibs
|
||||
}
|
||||
}
|
||||
return ""
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (vm VM) getPredefinedLuaScripts(objKey string, scriptFile string) (string, error) {
|
||||
|
||||
@@ -787,6 +787,11 @@ return hs`
|
||||
const healthWildcardOverrideScript = `
|
||||
hs = {}
|
||||
hs.status = "Healthy"
|
||||
return hs`
|
||||
|
||||
const healthWildcardOverrideScriptUnhealthy = `
|
||||
hs = {}
|
||||
hs.status = "UnHealthy"
|
||||
return hs`
|
||||
|
||||
getHealthOverride := func(openLibs bool) ResourceHealthOverrides {
|
||||
@@ -804,6 +809,21 @@ return hs`
|
||||
},
|
||||
}
|
||||
|
||||
getMultipleWildcardHealthOverrides := ResourceHealthOverrides{
|
||||
"*.aws.crossplane.io/*": appv1.ResourceOverride{
|
||||
HealthLua: "",
|
||||
},
|
||||
"*.aws*": appv1.ResourceOverride{
|
||||
HealthLua: healthWildcardOverrideScriptUnhealthy,
|
||||
},
|
||||
}
|
||||
|
||||
getBaseWildcardHealthOverrides := ResourceHealthOverrides{
|
||||
"*/*": appv1.ResourceOverride{
|
||||
HealthLua: "",
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("Enable Lua standard lib", func(t *testing.T) {
|
||||
testObj := StrToUnstructured(testSA)
|
||||
overrides := getHealthOverride(true)
|
||||
@@ -837,6 +857,23 @@ return hs`
|
||||
assert.Equal(t, expectedStatus, status)
|
||||
})
|
||||
|
||||
t.Run("Get resource health for wildcard override with non-empty health.lua", func(t *testing.T) {
|
||||
testObj := StrToUnstructured(ec2AWSCrossplaneObjJson)
|
||||
overrides := getMultipleWildcardHealthOverrides
|
||||
status, err := overrides.GetResourceHealth(testObj)
|
||||
require.NoError(t, err)
|
||||
expectedStatus := &health.HealthStatus{Status: "Unknown", Message: "Lua returned an invalid health status"}
|
||||
assert.Equal(t, expectedStatus, status)
|
||||
})
|
||||
|
||||
t.Run("Get resource health for */* override with empty health.lua", func(t *testing.T) {
|
||||
testObj := StrToUnstructured(ec2AWSCrossplaneObjJson)
|
||||
overrides := getBaseWildcardHealthOverrides
|
||||
status, err := overrides.GetResourceHealth(testObj)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, status)
|
||||
})
|
||||
|
||||
t.Run("Resource health for wildcard override not found", func(t *testing.T) {
|
||||
testObj := StrToUnstructured(testSA)
|
||||
overrides := getWildcardHealthOverride
|
||||
|
||||
@@ -535,6 +535,9 @@ const (
|
||||
const (
|
||||
// default max webhook payload size is 1GB
|
||||
defaultMaxWebhookPayloadSize = int64(1) * 1024 * 1024 * 1024
|
||||
|
||||
// application sync with impersonation feature is disabled by default.
|
||||
defaultImpersonationEnabledFlag = false
|
||||
)
|
||||
|
||||
var sourceTypeToEnableGenerationKey = map[v1alpha1.ApplicationSourceType]string{
|
||||
@@ -2336,11 +2339,11 @@ func (mgr *SettingsManager) GetMaxWebhookPayloadSize() int64 {
|
||||
return maxPayloadSizeMB * 1024 * 1024
|
||||
}
|
||||
|
||||
// GetIsImpersonationEnabled returns true if application sync with impersonation feature is enabled in argocd-cm configmap
|
||||
func (mgr *SettingsManager) IsImpersonationEnabled() bool {
|
||||
// IsImpersonationEnabled returns true if application sync with impersonation feature is enabled in argocd-cm configmap
|
||||
func (mgr *SettingsManager) IsImpersonationEnabled() (bool, error) {
|
||||
cm, err := mgr.getConfigMap()
|
||||
if err != nil {
|
||||
return false
|
||||
return defaultImpersonationEnabledFlag, fmt.Errorf("error checking %s property in configmap: %w", impersonationEnabledKey, err)
|
||||
}
|
||||
return cm.Data[impersonationEnabledKey] == "true"
|
||||
return cm.Data[impersonationEnabledKey] == "true", nil
|
||||
}
|
||||
|
||||
@@ -1725,3 +1725,46 @@ func TestRedirectAdditionalURLs(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsImpersonationEnabled(t *testing.T) {
|
||||
// When there is no argocd-cm itself,
|
||||
// Then IsImpersonationEnabled() must return false (default value) and an error with appropriate error message.
|
||||
kubeClient := fake.NewSimpleClientset()
|
||||
settingsManager := NewSettingsManager(context.Background(), kubeClient, "default")
|
||||
featureFlag, err := settingsManager.IsImpersonationEnabled()
|
||||
require.False(t, featureFlag,
|
||||
"with no argocd-cm config map, IsImpersonationEnabled() must return return false (default value)")
|
||||
require.ErrorContains(t, err, "configmap \"argocd-cm\" not found",
|
||||
"with no argocd-cm config map, IsImpersonationEnabled() must return an error")
|
||||
|
||||
// When there is no impersonation feature flag present in the argocd-cm,
|
||||
// Then IsImpersonationEnabled() must return false (default value) and nil error.
|
||||
_, settingsManager = fixtures(map[string]string{})
|
||||
featureFlag, err = settingsManager.IsImpersonationEnabled()
|
||||
require.False(t, featureFlag,
|
||||
"with empty argocd-cm config map, IsImpersonationEnabled() must return false (default value)")
|
||||
require.NoError(t, err,
|
||||
"with empty argocd-cm config map, IsImpersonationEnabled() must not return any error")
|
||||
|
||||
// When user disables the feature explicitly,
|
||||
// Then IsImpersonationEnabled() must return false and nil error.
|
||||
_, settingsManager = fixtures(map[string]string{
|
||||
"application.sync.impersonation.enabled": "false",
|
||||
})
|
||||
featureFlag, err = settingsManager.IsImpersonationEnabled()
|
||||
require.False(t, featureFlag,
|
||||
"when user enables the flag in argocd-cm config map, IsImpersonationEnabled() must return user set value")
|
||||
require.NoError(t, err,
|
||||
"when user enables the flag in argocd-cm config map, IsImpersonationEnabled() must not return any error")
|
||||
|
||||
// When user enables the feature explicitly,
|
||||
// Then IsImpersonationEnabled() must return true and nil error.
|
||||
_, settingsManager = fixtures(map[string]string{
|
||||
"application.sync.impersonation.enabled": "true",
|
||||
})
|
||||
featureFlag, err = settingsManager.IsImpersonationEnabled()
|
||||
require.True(t, featureFlag,
|
||||
"when user enables the flag in argocd-cm config map, IsImpersonationEnabled() must return user set value")
|
||||
require.NoError(t, err,
|
||||
"when user enables the flag in argocd-cm config map, IsImpersonationEnabled() must not return any error")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user