Skip to content

Commit

Permalink
[YUNIKORN-1957] Fixing golint issue
Browse files Browse the repository at this point in the history
  • Loading branch information
rrajesh-cloudera committed Sep 6, 2024
1 parent c772729 commit 4c3249f
Showing 1 changed file with 20 additions and 9 deletions.
29 changes: 20 additions & 9 deletions test/e2e/user_group_limit/user_group_limit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
amCommon "github.com/apache/yunikorn-k8shim/pkg/admission/common"
amconf "github.com/apache/yunikorn-k8shim/pkg/admission/conf"
"github.com/apache/yunikorn-k8shim/pkg/common/constants"
tests "github.com/apache/yunikorn-k8shim/test/e2e"
"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
Expand Down Expand Up @@ -647,8 +648,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -669,6 +669,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand Down Expand Up @@ -696,15 +699,15 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
usergroup2Sandbox1Pod3 := deploySleepPod(usergroup2, sandboxQueue1, false, "because final memory usage is more than wildcard maxapplications")
checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1, usergroup2Sandbox1Pod2})

//Update Wildcard user entry limit to 3
// Update Wildcard user entry limit to 3
ginkgo.By("Update config")
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -725,6 +728,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand All @@ -745,7 +751,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -765,6 +771,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
}})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand Down Expand Up @@ -792,15 +801,15 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
usergroup2 = &si.UserGroupInformation{User: user2, Groups: []string{group2}}
group2Sandbox1Pod3 := deploySleepPod(usergroup2, sandboxQueue1, false, "because final memory usage is more than wildcard maxapplications")
checkUsageWildcardGroups(groupTestType, group2, sandboxQueue1, []*v1.Pod{group2Sandbox1Pod1, group2Sandbox1Pod2})
//Update Wildcard group entry limit to 3
// Update Wildcard group entry limit to 3
ginkgo.By("Update config")
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -820,6 +829,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
}})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand All @@ -832,7 +844,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
})

ginkgo.AfterEach(func() {
//tests.DumpClusterInfoIfSpecFailed(suiteName, []string{ns.Name})
tests.DumpClusterInfoIfSpecFailed(suiteName, []string{ns.Name})

// Delete all sleep pods
ginkgo.By("Delete all sleep pods")
Expand Down Expand Up @@ -941,5 +953,4 @@ func checkUsageWildcardGroups(testType TestType, name string, queuePath string,
Ω(resourceUsageDAO.ResourceUsage).NotTo(gomega.BeNil())
Ω(resourceUsageDAO.ResourceUsage.Resources["pods"]).To(gomega.Equal(resources.Quantity(len(expectedRunningPods))))
Ω(resourceUsageDAO.RunningApplications).To(gomega.ConsistOf(appIDs...))

}

0 comments on commit 4c3249f

Please sign in to comment.