diff --git a/controller/appcontroller.go b/controller/appcontroller.go index ec75d4e342..e44166e6e8 100644 --- a/controller/appcontroller.go +++ b/controller/appcontroller.go @@ -5,6 +5,7 @@ import ( "encoding/json" stderrors "errors" "fmt" + "maps" "math" "math/rand" "net/http" @@ -927,14 +928,14 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int go func() { errors.CheckError(ctrl.stateCache.Run(ctx)) }() go func() { errors.CheckError(ctrl.metricsServer.ListenAndServe()) }() - for i := 0; i < statusProcessors; i++ { + for range statusProcessors { go wait.Until(func() { for ctrl.processAppRefreshQueueItem() { } }, time.Second, ctx.Done()) } - for i := 0; i < operationProcessors; i++ { + for range operationProcessors { go wait.Until(func() { for ctrl.processAppOperationQueueItem() { } @@ -2131,9 +2132,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new var newAnnotations map[string]string if orig.GetAnnotations() != nil { newAnnotations = make(map[string]string) - for k, v := range orig.GetAnnotations() { - newAnnotations[k] = v - } + maps.Copy(newAnnotations, orig.GetAnnotations()) delete(newAnnotations, appv1.AnnotationKeyRefresh) delete(newAnnotations, appv1.AnnotationKeyHydrate) } @@ -2374,7 +2373,7 @@ func (ctrl *ApplicationController) selfHealRemainingBackoff(app *appv1.Applicati backOff.Steps = selfHealAttemptsCount var delay time.Duration steps := backOff.Steps - for i := 0; i < steps; i++ { + for range steps { delay = backOff.Step() } if timeSinceOperation == nil { diff --git a/controller/cache/cache.go b/controller/cache/cache.go index 8a7cff3438..fb85b53054 100644 --- a/controller/cache/cache.go +++ b/controller/cache/cache.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "maps" "math" "net" "net/url" @@ -366,9 +367,7 @@ func getAppRecursive(r *clustercache.Resource, ns map[kube.ResourceKey]*clusterc gv := ownerRefGV(ownerRef) if parent, ok := ns[kube.NewResourceKey(gv.Group, ownerRef.Kind, r.Ref.Namespace, ownerRef.Name)]; ok { visitedBranch := make(map[kube.ResourceKey]bool, len(visited)) - for k, v := range visited { - visitedBranch[k] = v - } + maps.Copy(visitedBranch, visited) app, ok := getAppRecursive(parent, ns, visitedBranch) if app != "" || !ok { return app, ok diff --git a/controller/metrics/metrics_test.go b/controller/metrics/metrics_test.go index 524ab98f79..2f90c5dc8f 100644 --- a/controller/metrics/metrics_test.go +++ b/controller/metrics/metrics_test.go @@ -410,7 +410,6 @@ argocd_app_labels{label_non_existing="",name="my-app-3",namespace="argocd",proje } for _, c := range cases { - c := c t.Run(c.description, func(t *testing.T) { testMetricServer(t, c.applications, c.responseContains, c.metricLabels, []string{}) }) @@ -464,7 +463,6 @@ argocd_app_condition{condition="ExcludedResourceWarning",name="my-app-4",namespa } for _, c := range cases { - c := c t.Run(c.description, func(t *testing.T) { testMetricServer(t, c.applications, c.responseContains, []string{}, c.metricConditions) }) @@ -506,7 +504,7 @@ argocd_app_sync_total{dest_server="https://localhost:6443",dry_run="false",name= // assertMetricsPrinted asserts every line in the expected lines appears in the body func assertMetricsPrinted(t *testing.T, expectedLines, body string) { t.Helper() - for _, line := range strings.Split(expectedLines, "\n") { + for line := range strings.SplitSeq(expectedLines, "\n") { if line == "" { continue } @@ -517,7 +515,7 @@ func assertMetricsPrinted(t *testing.T, expectedLines, body string) { // assertMetricsNotPrinted func assertMetricsNotPrinted(t *testing.T, expectedLines, body string) { t.Helper() - for _, line := range strings.Split(expectedLines, "\n") { + for line := range strings.SplitSeq(expectedLines, "\n") { if line == "" { continue } diff --git a/controller/sharding/cache.go b/controller/sharding/cache.go index febcd73b5a..c82ca546b4 100644 --- a/controller/sharding/cache.go +++ b/controller/sharding/cache.go @@ -1,6 +1,7 @@ package sharding import ( + "maps" "sync" log "github.com/sirupsen/logrus" @@ -134,9 +135,7 @@ func (sharding *ClusterSharding) GetDistribution() map[string]int { shards := sharding.Shards distribution := make(map[string]int, len(shards)) - for k, v := range shards { - distribution[k] = v - } + maps.Copy(distribution, shards) return distribution } diff --git a/controller/sharding/sharding_test.go b/controller/sharding/sharding_test.go index 0afd3c865c..072a5b525e 100644 --- a/controller/sharding/sharding_test.go +++ b/controller/sharding/sharding_test.go @@ -224,7 +224,7 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterNumber // and for 4096 clusters, execution time was under 9s // The other implementation was giving almost linear time of 400ms up to 10'000 clusters clusterPointers := []*v1alpha1.Cluster{} - for i := 0; i < 2048; i++ { + for i := range 2048 { cluster := createCluster(fmt.Sprintf("cluster-%d", i), strconv.Itoa(i)) clusterPointers = append(clusterPointers, &cluster) } @@ -282,7 +282,7 @@ func TestConsistentHashingWhenClusterIsAddedAndRemoved(t *testing.T) { prefix := "cluster" clusters := []v1alpha1.Cluster{} - for i := 0; i < clusterCount; i++ { + for i := range clusterCount { id := fmt.Sprintf("%06d", i) cluster := fmt.Sprintf("%s-%s", prefix, id) clusters = append(clusters, createCluster(cluster, id)) @@ -298,7 +298,7 @@ func TestConsistentHashingWhenClusterIsAddedAndRemoved(t *testing.T) { assert.Equal(t, 0, distributionFunction(nil)) distributionMap := map[int]int{} assignementMap := map[string]int{} - for i := 0; i < clusterCount; i++ { + for i := range clusterCount { assignedShard := distributionFunction(&clusters[i]) assignementMap[clusters[i].ID] = assignedShard distributionMap[assignedShard]++ @@ -330,7 +330,7 @@ func TestConsistentHashingWhenClusterIsAddedAndRemoved(t *testing.T) { replicasCount = 2 distributionFunction = ConsistentHashingWithBoundedLoadsDistributionFunction(getClusterAccessor(clusterList.Items), appAccessor, replicasCount) removedCluster := clusterList.Items[len(clusterList.Items)-1] - for i := 0; i < clusterCount; i++ { + for i := range clusterCount { c := &clusters[i] assignedShard := distributionFunction(c) prevıouslyAssignedShard := assignementMap[clusters[i].ID] diff --git a/controller/sync_namespace.go b/controller/sync_namespace.go index 2acf31654c..3af85e6957 100644 --- a/controller/sync_namespace.go +++ b/controller/sync_namespace.go @@ -1,6 +1,8 @@ package controller import ( + "maps" + gitopscommon "github.com/argoproj/gitops-engine/pkg/sync/common" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -47,9 +49,7 @@ func syncNamespace(syncPolicy *v1alpha1.SyncPolicy) func(m *unstructured.Unstruc // with server-side apply func appendSSAAnnotation(in map[string]string) map[string]string { r := map[string]string{} - for k, v := range in { - r[k] = v - } + maps.Copy(r, in) r[gitopscommon.AnnotationSyncOptions] = gitopscommon.SyncOptionServerSideApply return r } diff --git a/controller/syncid/id_test.go b/controller/syncid/id_test.go index 4842d47e89..1c89c1b948 100644 --- a/controller/syncid/id_test.go +++ b/controller/syncid/id_test.go @@ -18,9 +18,9 @@ func TestGenerate(t *testing.T) { globalCount.Store(0) // Run goroutines in parallel to test for race conditions - for g := 0; g < goroutines; g++ { + for range goroutines { go func() { - for i := 0; i < idsPerGoroutine; i++ { + for range idsPerGoroutine { id, err := Generate() if err != nil { errCh <- err @@ -32,7 +32,7 @@ func TestGenerate(t *testing.T) { } ids := make(map[string]any) - for i := 0; i < goroutines*idsPerGoroutine; i++ { + for range goroutines * idsPerGoroutine { select { case err := <-errCh: require.NoError(t, err)