diff --git a/apis/apps/v1alpha1/imagepulljob_types.go b/apis/apps/v1alpha1/imagepulljob_types.go index f5ed438e2b..01cc5d6f36 100644 --- a/apis/apps/v1alpha1/imagepulljob_types.go +++ b/apis/apps/v1alpha1/imagepulljob_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -63,6 +64,10 @@ type ImagePullJobTemplate struct { // +optional PodSelector *ImagePullJobPodSelector `json:"podSelector,omitempty"` + // Tolerations allow image pull to be scheduled onto nodes with specific taints + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // Parallelism is the requested parallelism, it can be set to any non-negative value. If it is unspecified, // it defaults to 1. If it is specified as 0, then the Job is effectively paused until it is increased. // +optional diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index 9440ee2715..d0485f17f0 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -1504,6 +1504,13 @@ func (in *ImagePullJobTemplate) DeepCopyInto(out *ImagePullJobTemplate) { *out = new(ImagePullJobPodSelector) (*in).DeepCopyInto(*out) } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Parallelism != nil { in, out := &in.Parallelism, &out.Parallelism *out = new(intstr.IntOrString) diff --git a/config/crd/bases/apps.kruise.io_imagelistpulljobs.yaml b/config/crd/bases/apps.kruise.io_imagelistpulljobs.yaml index 8081dcf0f1..9108b11993 100644 --- a/config/crd/bases/apps.kruise.io_imagelistpulljobs.yaml +++ b/config/crd/bases/apps.kruise.io_imagelistpulljobs.yaml @@ -245,6 +245,46 @@ spec: type: array type: object x-kubernetes-map-type: atomic + tolerations: + description: Tolerations allow image pull to be scheduled onto nodes + with specific taints + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array required: - completionPolicy - images diff --git a/config/crd/bases/apps.kruise.io_imagepulljobs.yaml b/config/crd/bases/apps.kruise.io_imagepulljobs.yaml index 206af585bf..7e22a80d5a 100644 --- a/config/crd/bases/apps.kruise.io_imagepulljobs.yaml +++ b/config/crd/bases/apps.kruise.io_imagepulljobs.yaml @@ -251,6 +251,46 @@ spec: type: array type: object x-kubernetes-map-type: atomic + tolerations: + description: Tolerations allow image pull to be scheduled onto nodes + with specific taints + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array required: - completionPolicy - image diff --git a/pkg/controller/imagepulljob/imagepulljob_controller.go b/pkg/controller/imagepulljob/imagepulljob_controller.go index 3a1138f651..40b774d721 100644 --- a/pkg/controller/imagepulljob/imagepulljob_controller.go +++ b/pkg/controller/imagepulljob/imagepulljob_controller.go @@ -232,6 +232,12 @@ func (r *ReconcileImagePullJob) Reconcile(_ context.Context, request reconcile.R if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get NodeImages: %v", err) } + //if utilfeature.DefaultFeatureGate.Enabled(features.ImagePullJobTolerationGate) { + nodeImages, err = utilimagejob.TolerationNodeImages(r.Client, nodeImages, job) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get NodeImages for Toleration: %v", err) + } + //} // If resourceVersion expectations have not satisfied yet, just skip this reconcile for _, nodeImage := range nodeImages { diff --git a/pkg/features/kruise_features.go b/pkg/features/kruise_features.go index 78eab56276..0253e63c8f 100644 --- a/pkg/features/kruise_features.go +++ b/pkg/features/kruise_features.go @@ -108,6 +108,9 @@ const ( // ImagePullJobGate enable imagepulljob-controller execute ImagePullJob. ImagePullJobGate featuregate.Feature = "ImagePullJobGate" + // ImagePullJobTolerationGate enable ImagePullJob support Tolerations. + ImagePullJobTolerationGate featuregate.Feature = "ImagePullJobTolerationGate" + // ResourceDistributionGate enable resourcedistribution-controller execute ResourceDistribution. ResourceDistributionGate featuregate.Feature = "ResourceDistributionGate" @@ -157,6 +160,7 @@ var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ CloneSetEventHandlerOptimization: {Default: false, PreRelease: featuregate.Alpha}, PreparingUpdateAsUpdate: {Default: false, PreRelease: featuregate.Alpha}, ImagePullJobGate: {Default: false, PreRelease: featuregate.Alpha}, + ImagePullJobTolerationGate: {Default: false, PreRelease: featuregate.Alpha}, ResourceDistributionGate: {Default: false, PreRelease: featuregate.Alpha}, DeletionProtectionForCRDCascadingGate: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/pkg/util/imagejob/imagejob_reader.go b/pkg/util/imagejob/imagejob_reader.go index 0bea0c538f..447db75ae4 100644 --- a/pkg/util/imagejob/imagejob_reader.go +++ b/pkg/util/imagejob/imagejob_reader.go @@ -153,6 +153,47 @@ func GetNodeImagesForJob(reader client.Reader, job *appsv1alpha1.ImagePullJob) ( return convertNodeImages(nodeImageList), err } +func TolerationNodeImages(reader client.Reader, nodeImages []*appsv1alpha1.NodeImage, job *appsv1alpha1.ImagePullJob) (tolerationNodeImage []*appsv1alpha1.NodeImage, err error) { + for _, ng := range nodeImages { + var node v1.Node + if err = reader.Get(context.TODO(), types.NamespacedName{Name: ng.Name}, &node); err != nil { + if errors.IsNotFound(err) { + tolerationNodeImage = append(tolerationNodeImage, ng) + continue + } + return nil, fmt.Errorf("get specific Node %s error: %v", ng.Name, err) + } + if nodeMatchesTolerations(node, job.Spec.Tolerations) { + tolerationNodeImage = append(tolerationNodeImage, ng) + } + } + return +} + +// nodeMatchesTolerations pod must have Toleration that matches all node Taint to return true +func nodeMatchesTolerations(node v1.Node, tolerations []v1.Toleration) bool { + for _, taint := range node.Spec.Taints { + if !tolerationToleratesTaint(tolerations, taint) { + return false + } + } + return true +} + +func tolerationToleratesTaint(tolerations []v1.Toleration, taint v1.Taint) bool { + for _, toleration := range tolerations { + if toleration.Key == taint.Key && toleration.Effect == taint.Effect { + if toleration.Operator == v1.TolerationOpExists { + return true + } + if toleration.Operator == v1.TolerationOpEqual && toleration.Value == taint.Value { + return true + } + } + } + return false +} + func convertNodeImages(nodeImageList *appsv1alpha1.NodeImageList) []*appsv1alpha1.NodeImage { nodeImages := make([]*appsv1alpha1.NodeImage, 0, len(nodeImageList.Items)) for i := range nodeImageList.Items { diff --git a/pkg/util/imagejob/imagejob_reader_test.go b/pkg/util/imagejob/imagejob_reader_test.go index daafb322e3..e57de41fb6 100644 --- a/pkg/util/imagejob/imagejob_reader_test.go +++ b/pkg/util/imagejob/imagejob_reader_test.go @@ -92,6 +92,9 @@ var ( { ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: map[string]string{"arch": "arm64"}}, }, + { + ObjectMeta: metav1.ObjectMeta{Name: "node6"}, + }, } initialPods = []*v1.Pod{ @@ -166,6 +169,125 @@ var ( ObjectMeta: metav1.ObjectMeta{Name: "job6", Finalizers: []string{"apps.kruise.io/fake-block"}}, Spec: appsv1alpha1.ImagePullJobSpec{}, }, + { + ObjectMeta: metav1.ObjectMeta{Name: "job7"}, + Spec: appsv1alpha1.ImagePullJobSpec{ + ImagePullJobTemplate: appsv1alpha1.ImagePullJobTemplate{ + Tolerations: []v1.Toleration{ + { + Key: "key1", + Value: "val1", + Effect: v1.TaintEffectNoSchedule, + Operator: v1.TolerationOpEqual, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "job8"}, + Spec: appsv1alpha1.ImagePullJobSpec{ + ImagePullJobTemplate: appsv1alpha1.ImagePullJobTemplate{ + Tolerations: []v1.Toleration{ + { + Key: "key2", + Value: "val2", + Effect: v1.TaintEffectNoExecute, + Operator: v1.TolerationOpExists, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "job9"}, + Spec: appsv1alpha1.ImagePullJobSpec{ + ImagePullJobTemplate: appsv1alpha1.ImagePullJobTemplate{ + Tolerations: []v1.Toleration{ + { + Key: "key1", + Value: "val1", + Effect: v1.TaintEffectNoSchedule, + Operator: v1.TolerationOpExists, + }, + { + Key: "key2", + Value: "val2", + Effect: v1.TaintEffectNoExecute, + Operator: v1.TolerationOpExists, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "job10"}, + Spec: appsv1alpha1.ImagePullJobSpec{ + ImagePullJobTemplate: appsv1alpha1.ImagePullJobTemplate{ + Tolerations: []v1.Toleration{ + { + Key: "key2", + Value: "val2", + Effect: v1.TaintEffectNoSchedule, + Operator: v1.TolerationOpExists, + }, + }, + }, + }, + }, + } + + initialNode = []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"arch": "amd64"}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"arch": "arm64"}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "node4"}, + Spec: v1.NodeSpec{ + Taints: []v1.Taint{ + { + Key: "key1", + Value: "val1", + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: map[string]string{"arch": "arm64"}}, + Spec: v1.NodeSpec{ + Taints: []v1.Taint{ + { + Key: "key2", + Value: "val2", + Effect: v1.TaintEffectNoExecute, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "node6"}, + Spec: v1.NodeSpec{ + Taints: []v1.Taint{ + { + Key: "key1", + Value: "val1", + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "key2", + Value: "val2", + Effect: v1.TaintEffectNoExecute, + }, + }, + }, + }, } ) @@ -183,6 +305,11 @@ func TestAll(t *testing.T) { g.Expect(err).NotTo(gomega.HaveOccurred()) } } + for _, o := range initialNode { + if err = c.Create(context.TODO(), o); err != nil { + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + } for _, o := range initialPods { if o.Namespace == "" { o.Namespace = metav1.NamespaceDefault @@ -196,6 +323,15 @@ func TestAll(t *testing.T) { if o.Namespace == "" { o.Namespace = metav1.NamespaceDefault } + + // A mock node that is not-ready after creation, node will add Taint "node.kubernetes.io/not-ready" + // add a same Tolerations to the job. + o.Spec.Tolerations = append(o.Spec.Tolerations, v1.Toleration{ + Key: "node.kubernetes.io/not-ready", + Operator: v1.TolerationOpExists, + Value: "", + Effect: v1.TaintEffectNoSchedule, + }) if err = c.Create(context.TODO(), o); err != nil { g.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -217,6 +353,9 @@ func TestAll(t *testing.T) { // Test GetNodeImagesForJob testGetNodeImagesForJob(g) + // Test TolerationNodeImages + testTolerationNodeImages(g) + // Test GetActiveJobsForPod testGetActiveJobsForPod(g) @@ -235,14 +374,32 @@ func testGetNodeImagesForJob(g *gomega.GomegaWithT) { return names.List() } - g.Expect(getNodeImagesForJob(initialJobs[0])).Should(gomega.Equal([]string{"node1", "node2", "node3", "node4", "node5"})) + g.Expect(getNodeImagesForJob(initialJobs[0])).Should(gomega.Equal([]string{"node1", "node2", "node3", "node4", "node5", "node6"})) g.Expect(getNodeImagesForJob(initialJobs[1])).Should(gomega.Equal([]string{"node2", "node4"})) g.Expect(getNodeImagesForJob(initialJobs[2])).Should(gomega.Equal([]string{"node3", "node5"})) - g.Expect(getNodeImagesForJob(initialJobs[3])).Should(gomega.Equal([]string{"node1", "node4"})) + g.Expect(getNodeImagesForJob(initialJobs[3])).Should(gomega.Equal([]string{"node1", "node4", "node6"})) g.Expect(getNodeImagesForJob(initialJobs[4])).Should(gomega.Equal([]string{"node2"})) //g.Expect(getNodeImagesForJob(initialJobs[5])).Should(gomega.Equal([]string{"node2"})) } +func testTolerationNodeImages(g *gomega.GomegaWithT) { + getTolerationNodeImages := func(ng []*appsv1alpha1.NodeImage, job *appsv1alpha1.ImagePullJob) []string { + nodeNames, err := TolerationNodeImages(c, ng, job) + g.Expect(err).NotTo(gomega.HaveOccurred()) + names := sets.NewString() + for _, n := range nodeNames { + names.Insert(n.Name) + } + return names.List() + } + + g.Expect(getTolerationNodeImages(initialNodeImages, initialJobs[0])).Should(gomega.Equal([]string{"node1", "node2", "node3"})) + g.Expect(getTolerationNodeImages(initialNodeImages, initialJobs[6])).Should(gomega.Equal([]string{"node1", "node2", "node3", "node4"})) + g.Expect(getTolerationNodeImages(initialNodeImages, initialJobs[7])).Should(gomega.Equal([]string{"node1", "node2", "node3", "node5"})) + g.Expect(getTolerationNodeImages(initialNodeImages, initialJobs[8])).Should(gomega.Equal([]string{"node1", "node2", "node3", "node4", "node5", "node6"})) + g.Expect(getTolerationNodeImages(initialNodeImages, initialJobs[9])).Should(gomega.Equal([]string{"node1", "node2", "node3"})) +} + func testGetActiveJobsForPod(g *gomega.GomegaWithT) { getActiveJobsForPod := func(pod *v1.Pod) []string { jobs, _, err := GetActiveJobsForPod(c, pod, nil) @@ -274,9 +431,9 @@ func testGetActiveJobsForNodeImage(g *gomega.GomegaWithT) { return names.List() } - g.Expect(getActiveJobsForNodeImage(initialNodeImages[0])).Should(gomega.Equal([]string{"job1", "job4"})) - g.Expect(getActiveJobsForNodeImage(initialNodeImages[1])).Should(gomega.Equal([]string{"job1", "job2", "job5"})) - g.Expect(getActiveJobsForNodeImage(initialNodeImages[2])).Should(gomega.Equal([]string{"job1", "job3"})) - g.Expect(getActiveJobsForNodeImage(initialNodeImages[3])).Should(gomega.Equal([]string{"job1", "job2", "job4"})) - g.Expect(getActiveJobsForNodeImage(initialNodeImages[4])).Should(gomega.Equal([]string{"job1", "job3", "job5"})) + g.Expect(getActiveJobsForNodeImage(initialNodeImages[0])).Should(gomega.Equal([]string{"job1", "job10", "job4", "job7", "job8", "job9"})) + g.Expect(getActiveJobsForNodeImage(initialNodeImages[1])).Should(gomega.Equal([]string{"job1", "job10", "job2", "job5", "job7", "job8", "job9"})) + g.Expect(getActiveJobsForNodeImage(initialNodeImages[2])).Should(gomega.Equal([]string{"job1", "job10", "job3", "job7", "job8", "job9"})) + g.Expect(getActiveJobsForNodeImage(initialNodeImages[3])).Should(gomega.Equal([]string{"job1", "job10", "job2", "job4", "job7", "job8", "job9"})) + g.Expect(getActiveJobsForNodeImage(initialNodeImages[4])).Should(gomega.Equal([]string{"job1", "job10", "job3", "job5", "job7", "job8", "job9"})) } diff --git a/test/e2e/apps/imagelistpulljobs.go b/test/e2e/apps/imagelistpulljobs.go index 885bf18d27..dbdf02073f 100644 --- a/test/e2e/apps/imagelistpulljobs.go +++ b/test/e2e/apps/imagelistpulljobs.go @@ -113,6 +113,15 @@ var _ = SIGDescribe("PullImages", func() { ActiveDeadlineSeconds: utilpointer.Int64Ptr(50), TTLSecondsAfterFinished: utilpointer.Int32Ptr(20), }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err := testerForImageListPullJob.CreateJob(job) @@ -169,6 +178,15 @@ var _ = SIGDescribe("PullImages", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Always, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err := testerForImageListPullJob.CreateJob(job) @@ -224,6 +242,15 @@ var _ = SIGDescribe("PullImages", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Always, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err := testerForImageListPullJob.CreateJob(job) @@ -272,6 +299,15 @@ var _ = SIGDescribe("PullImages", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Never, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err := testerForImageListPullJob.CreateJob(job) diff --git a/test/e2e/apps/pullimages.go b/test/e2e/apps/pullimages.go index 3596e27b72..76d76e4e7f 100644 --- a/test/e2e/apps/pullimages.go +++ b/test/e2e/apps/pullimages.go @@ -63,6 +63,12 @@ var _ = SIGDescribe("PullImage", func() { } else { framework.Logf("[FAILURE_DEBUG] List NodeImages: %v", util.DumpJSON(nodeImageList)) } + nodeList, err := testerForNodeImage.ListNode() + if err != nil { + framework.Logf("[FAILURE_DEBUG] List Node in %s error: %v", ns, err) + } else { + framework.Logf("[FAILURE_DEBUG] List Node in %s: %v", ns, util.DumpJSON(nodeList)) + } imagePullJobList, err := testerForImagePullJob.ListJobs(ns) if err != nil { framework.Logf("[FAILURE_DEBUG] List ImagePullJobs in %s error: %v", ns, err) @@ -148,6 +154,15 @@ var _ = SIGDescribe("PullImage", func() { ActiveDeadlineSeconds: utilpointer.Int64Ptr(50), TTLSecondsAfterFinished: utilpointer.Int32Ptr(20), }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err = testerForImagePullJob.CreateJob(job) @@ -235,6 +250,15 @@ var _ = SIGDescribe("PullImage", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Never, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err = testerForImagePullJob.CreateJob(job) @@ -323,6 +347,15 @@ var _ = SIGDescribe("PullImage", func() { TTLSecondsAfterFinished: utilpointer.Int32Ptr(20), }, PullSecrets: []string{"test-pull-secret"}, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } secret := &v1.Secret{ @@ -382,6 +415,15 @@ var _ = SIGDescribe("PullImage", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Always, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err := testerForImagePullJob.CreateJob(job) @@ -427,6 +469,15 @@ var _ = SIGDescribe("PullImage", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Never, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err := testerForImagePullJob.CreateJob(job) @@ -472,6 +523,15 @@ var _ = SIGDescribe("PullImage", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Never, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err := testerForImagePullJob.CreateJob(job1) @@ -509,6 +569,15 @@ var _ = SIGDescribe("PullImage", func() { CompletionPolicy: appsv1alpha1.CompletionPolicy{ Type: appsv1alpha1.Never, }, + Tolerations: []v1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }, { + Key: "node-role.kubernetes.io/control-plane", + Operator: v1.TolerationOpExists, + Effect: "NoSchedule", + }}, }, } err = testerForImagePullJob.CreateJob(job2) diff --git a/test/e2e/framework/nodeimage_util.go b/test/e2e/framework/nodeimage_util.go index 204778a9b0..7a892f0d63 100644 --- a/test/e2e/framework/nodeimage_util.go +++ b/test/e2e/framework/nodeimage_util.go @@ -75,6 +75,10 @@ func (tester *NodeImageTester) ListNodeImages() (*appsv1alpha1.NodeImageList, er return tester.kc.AppsV1alpha1().NodeImages().List(context.TODO(), metav1.ListOptions{}) } +func (tester *NodeImageTester) ListNode() (*v1.NodeList, error) { + return tester.c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +} + func (tester *NodeImageTester) ExpectNodes() ([]*v1.Node, error) { nodeList, err := tester.c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil {