Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 10 additions & 5 deletions pkg/scheduler/core/spreadconstraint/group_clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,9 @@ func GroupClustersWithScore(
calAvailableReplicasFunc func(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster,
) *GroupClustersInfo {
if isTopologyIgnored(placement) {
if shouldIgnoreCalculateAvailableResource(placement) {
return groupClustersIgnoringTopology(clustersScore, spec, nil)
}
return groupClustersIgnoringTopology(clustersScore, spec, calAvailableReplicasFunc)
}

Expand Down Expand Up @@ -343,11 +346,13 @@ func (info *GroupClustersInfo) generateClustersInfo(clustersScore framework.Clus
clusters = append(clusters, clusterScore.Cluster)
}

clustersReplicas := info.calAvailableReplicasFunc(clusters, rbSpec)
for i, clustersReplica := range clustersReplicas {
info.Clusters[i].AvailableReplicas = int64(clustersReplica.Replicas)
info.Clusters[i].AvailableReplicas += int64(rbSpec.AssignedReplicasForCluster(clustersReplica.Name))
info.Clusters[i].AllocatableReplicas = clustersReplica.Replicas
if info.calAvailableReplicasFunc != nil {
clustersReplicas := info.calAvailableReplicasFunc(clusters, rbSpec)
for i, clustersReplica := range clustersReplicas {
info.Clusters[i].AvailableReplicas = int64(clustersReplica.Replicas)
info.Clusters[i].AvailableReplicas += int64(rbSpec.AssignedReplicasForCluster(clustersReplica.Name))
info.Clusters[i].AllocatableReplicas = clustersReplica.Replicas
}
}

sortClusters(info.Clusters, func(i *ClusterDetailInfo, j *ClusterDetailInfo) *bool {
Expand Down
1 change: 1 addition & 0 deletions pkg/scheduler/core/spreadconstraint/group_clusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,7 @@ func Test_GroupClustersWithScore(t *testing.T) {

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.args.spec.Placement = tt.args.placement
groupInfo := GroupClustersWithScore(tt.args.clustersScore, tt.args.placement, tt.args.spec, calAvailableReplicasFunc)
for i, cluster := range groupInfo.Clusters {
if cluster.Name != tt.want.clusters[i] {
Expand Down
30 changes: 22 additions & 8 deletions pkg/scheduler/core/spreadconstraint/select_clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,7 @@ func shouldIgnoreSpreadConstraint(placement *policyv1alpha1.Placement) bool {
strategy := placement.ReplicaScheduling

// If the replica division preference is 'static weighted', ignore the declaration specified by spread constraints.
if strategy != nil && strategy.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDivided &&
strategy.ReplicaDivisionPreference == policyv1alpha1.ReplicaDivisionPreferenceWeighted &&
(strategy.WeightPreference == nil ||
len(strategy.WeightPreference.StaticWeightList) != 0 && strategy.WeightPreference.DynamicWeight == "") {
return true
}

return false
return isStaticWeightedStrategy(strategy)
}

func shouldIgnoreAvailableResource(placement *policyv1alpha1.Placement) bool {
Expand All @@ -78,3 +71,24 @@ func shouldIgnoreAvailableResource(placement *policyv1alpha1.Placement) bool {

return false
}

// when replica assignment strategy is "Duplicated" or "static weighted", no need to calculate the available resource
func shouldIgnoreCalculateAvailableResource(placement *policyv1alpha1.Placement) bool {
strategyType := placement.ReplicaSchedulingType()
if strategyType == policyv1alpha1.ReplicaSchedulingTypeDuplicated {
return true
}

return isStaticWeightedStrategy(placement.ReplicaScheduling)
}

// isStaticWeightedStrategy checks if the strategy is static weighted.
// A strategy is considered static weighted if the scheduling type is 'Divided',
// the division preference is 'Weighted', and either no weight preference is
// specified (implying equal weights) or static weights are defined without
// a dynamic weight factor.
func isStaticWeightedStrategy(strategy *policyv1alpha1.ReplicaSchedulingStrategy) bool {
return strategy != nil && strategy.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDivided &&
strategy.ReplicaDivisionPreference == policyv1alpha1.ReplicaDivisionPreferenceWeighted &&
(strategy.WeightPreference == nil || len(strategy.WeightPreference.StaticWeightList) != 0 && strategy.WeightPreference.DynamicWeight == "")
}
81 changes: 81 additions & 0 deletions pkg/scheduler/core/spreadconstraint/select_clusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,3 +249,84 @@ func TestSelectBestClusters(t *testing.T) {
})
}
}

func TestIgnoreCalculateAvailableResource(t *testing.T) {
tests := []struct {
name string
placement *policyv1alpha1.Placement
ignore bool
}{
{
name: "when ReplicaScheduling is nil",
placement: &policyv1alpha1.Placement{ReplicaScheduling: nil},
ignore: true,
},
{
name: "when strategy is duplicated",
placement: &policyv1alpha1.Placement{
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated},
},
ignore: true,
},
{
name: "when strategy is divided, and ReplicaDivisionPreference is Aggregated",
placement: &policyv1alpha1.Placement{
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
},
ignore: false,
},
{
name: "when strategy is divided, and ReplicaDivisionPreference is Weighted, and WeightPreference is nil",
placement: &policyv1alpha1.Placement{
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: nil,
},
},
ignore: true,
},
{
name: "when strategy is divided, and ReplicaDivisionPreference is Weighted, and WeightPreference is DynamicWeight",
placement: &policyv1alpha1.Placement{
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: &policyv1alpha1.ClusterPreferences{DynamicWeight: policyv1alpha1.DynamicWeightByAvailableReplicas},
},
},
ignore: false,
},
{
name: "when strategy is divided, and ReplicaDivisionPreference is Weighted, and WeightPreference is StaticWeightList",
placement: &policyv1alpha1.Placement{
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: &policyv1alpha1.ClusterPreferences{
StaticWeightList: []policyv1alpha1.StaticClusterWeight{
{
TargetCluster: policyv1alpha1.ClusterAffinity{
ClusterNames: []string{"member1"},
},
Weight: 1,
},
},
},
},
},
ignore: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := shouldIgnoreCalculateAvailableResource(tt.placement)
if !reflect.DeepEqual(got, tt.ignore) {
t.Errorf("shouldIgnoreCalculateAvailableResource() = %v, want %v", got, tt.ignore)
}
})
}
}
26 changes: 25 additions & 1 deletion test/e2e/suites/base/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"

Expand Down Expand Up @@ -79,14 +80,37 @@ var _ = ginkgo.Describe("metrics testing", func() {
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: framework.ClusterNames(),
},
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: &policyv1alpha1.ClusterPreferences{
DynamicWeight: policyv1alpha1.DynamicWeightByAvailableReplicas,
},
},
})
framework.CreateDeployment(kubeClient, deployment)
framework.CreatePropagationPolicy(karmadaClient, policy)
ginkgo.DeferCleanup(func() {
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name, func(_ *appsv1.Deployment) bool { return true })

bindingName := names.GenerateBindingName(deployment.Kind, deployment.Name)
var targetClusters []string
gomega.Eventually(func() bool {
binding, err := karmadaClient.WorkV1alpha2().ResourceBindings(testNamespace).Get(context.TODO(), bindingName, metav1.GetOptions{})
if err != nil {
return false
}
if len(binding.Spec.Clusters) == 0 {
return false
}
for _, cluster := range binding.Spec.Clusters {
targetClusters = append(targetClusters, cluster.Name)
}
return true
}, framework.PollTimeout, framework.PollInterval).Should(gomega.Equal(true))
framework.WaitDeploymentPresentOnClustersFitWith(targetClusters, deployment.Namespace, deployment.Name, func(_ *appsv1.Deployment) bool { return true })
})

for component, metricNameList := range componentMetrics {
Expand Down