Skip to content

Commit

Permalink
patches volume claim templates into pods before ValidatePodSpec in wo…
Browse files Browse the repository at this point in the history
…rkloadspread patch validation (#1740)

Signed-off-by: AiRanthem <zhongtianyun.zty@alibaba-inc.com>
Co-authored-by: AiRanthem <zhongtianyun.zty@alibaba-inc.com>
  • Loading branch information
AiRanthem and AiRanthem authored Sep 24, 2024
1 parent 198461e commit 4f04e93
Show file tree
Hide file tree
Showing 3 changed files with 191 additions and 113 deletions.
21 changes: 18 additions & 3 deletions pkg/webhook/workloadspread/validating/workloadspread_validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,23 +285,24 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap
allErrs = append(allErrs, corevalidation.ValidateTolerations(coreTolerations, fldPath.Index(i).Child("tolerations"))...)
}

//TODO validate patch
if subset.Patch.Raw != nil {
// In the case the WorkloadSpread is created before the workload,so no workloadTemplate is obtained, skip the remaining checks.
if workloadTemplate != nil {
// get the PodTemplateSpec from the workload
var podSpec v1.PodTemplateSpec
switch workloadTemplate.GetObjectKind().GroupVersionKind() {
case controllerKruiseKindCS:
podSpec = workloadTemplate.(*appsv1alpha1.CloneSet).Spec.Template
cs := workloadTemplate.(*appsv1alpha1.CloneSet)
podSpec = withVolumeClaimTemplates(cs.Spec.Template, cs.Spec.VolumeClaimTemplates)
case controllerKindDep:
podSpec = workloadTemplate.(*appsv1.Deployment).Spec.Template
case controllerKindRS:
podSpec = workloadTemplate.(*appsv1.ReplicaSet).Spec.Template
case controllerKindJob:
podSpec = workloadTemplate.(*batchv1.Job).Spec.Template
case controllerKindSts:
podSpec = workloadTemplate.(*appsv1.StatefulSet).Spec.Template
sts := workloadTemplate.(*appsv1.StatefulSet)
podSpec = withVolumeClaimTemplates(sts.Spec.Template, sts.Spec.VolumeClaimTemplates)
}
podBytes, _ := json.Marshal(podSpec)
modified, err := strategicpatch.StrategicMergePatch(podBytes, subset.Patch.Raw, &v1.Pod{})
Expand Down Expand Up @@ -358,6 +359,20 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap
return allErrs
}

func withVolumeClaimTemplates(pod v1.PodTemplateSpec, claims []v1.PersistentVolumeClaim) v1.PodTemplateSpec {
for _, pvc := range claims {
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: pvc.Name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
})
}
return pod
}

func validateWorkloadSpreadConflict(ws *appsv1alpha1.WorkloadSpread, others []appsv1alpha1.WorkloadSpread, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, other := range others {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,17 @@ import (
"strconv"
"testing"

utilruntime "k8s.io/apimachinery/pkg/util/runtime"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/pointer"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
Expand Down Expand Up @@ -943,3 +946,171 @@ func TestValidateWorkloadSpreadConflict(t *testing.T) {
})
}
}

func Test_validateWorkloadSpreadSubsets(t *testing.T) {
cloneset := &appsv1alpha1.CloneSet{
TypeMeta: metav1.TypeMeta{
Kind: "CloneSet",
APIVersion: "apps.kruise.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cs",
},
Spec: appsv1alpha1.CloneSetSpec{
Replicas: ptr.To(int32(6)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "test",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "test",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "main",
Image: "img:latest",
VolumeMounts: []corev1.VolumeMount{
{
Name: "vol-1--0",
MountPath: "/logs",
SubPath: "logs",
},
},
},
},
},
},
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "vol-1--0",
},
},
},
},
}

sts := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-sts",
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To(int32(6)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "nginx",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "nginx",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "main",
Image: "img:latest",
VolumeMounts: []corev1.VolumeMount{
{
Name: "vol-1--0",
MountPath: "/logs",
SubPath: "logs",
},
},
},
},
},
},
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "vol-1--0",
},
},
},
},
}
patchData := map[string]any{
"metadata": map[string]any{
"annotations": map[string]any{
"some-key": "some-value",
},
},
}
patch, _ := json.Marshal(patchData)
ws := &appsv1alpha1.WorkloadSpread{
Spec: appsv1alpha1.WorkloadSpreadSpec{
Subsets: []appsv1alpha1.WorkloadSpreadSubset{
{
Name: "test",
Patch: runtime.RawExtension{
Raw: patch,
},
},
},
},
}

badCloneSet := cloneset.DeepCopy()
badCloneSet.Spec.VolumeClaimTemplates[0].Name = "bad-boy"
badSts := sts.DeepCopy()
badSts.Spec.VolumeClaimTemplates[0].Name = "bad-boy"

testCases := []struct {
name string
workload client.Object
testFunc func(errList field.ErrorList)
}{
{
name: "good cloneset",
workload: cloneset,
testFunc: func(errList field.ErrorList) {
if len(errList) != 0 {
t.Fatalf("expected 0 error, got %d, errList = %+v", len(errList), errList)
}
},
}, {
name: "bad cloneset",
workload: badCloneSet,
testFunc: func(errList field.ErrorList) {
if len(errList) != 1 {
t.Fatalf("expected 1 error, got %d, errList = %+v", len(errList), errList)
}
},
}, {
name: "good sts",
workload: sts,
testFunc: func(errList field.ErrorList) {
if len(errList) != 0 {
t.Fatalf("expected 0 error, got %d, errList = %+v", len(errList), errList)
}
},
}, {
name: "bad sts",
workload: badSts,
testFunc: func(errList field.ErrorList) {
if len(errList) != 1 {
t.Fatalf("expected 1 error, got %d, errList = %+v", len(errList), errList)
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tc.testFunc(
validateWorkloadSpreadSubsets(ws, ws.Spec.Subsets, tc.workload, field.NewPath("spec").Child("subsets")),
)
})
}
}
108 changes: 0 additions & 108 deletions test/e2e/apps/workloadspread.go
Original file line number Diff line number Diff line change
Expand Up @@ -1929,113 +1929,5 @@ var _ = SIGDescribe("workloadspread", func() {

ginkgo.By("elastic deploy for deployment, zone-a=2, zone-b=nil, done")
})

//ginkgo.It("deploy for job, zone-a=1, zone-b=nil", func() {
// job := tester.NewBaseJob(ns)
// // create workloadSpread
// targetRef := appsv1alpha1.TargetReference{
// APIVersion: controllerKindJob.GroupVersion().String(),
// Kind: controllerKindJob.Kind,
// Name: job.Name,
// }
// subset1 := appsv1alpha1.WorkloadSpreadSubset{
// Name: "zone-a",
// RequiredNodeSelectorTerm: &corev1.NodeSelectorTerm{
// MatchExpressions: []corev1.NodeSelectorRequirement{
// {
// Key: WorkloadSpreadFakeZoneKey,
// Operator: corev1.NodeSelectorOpIn,
// Values: []string{"zone-a"},
// },
// },
// },
// MaxReplicas: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
// Patch: runtime.RawExtension{
// Raw: []byte(`{"metadata":{"annotations":{"subset":"zone-a"}}}`),
// },
// }
// subset2 := appsv1alpha1.WorkloadSpreadSubset{
// Name: "zone-b",
// RequiredNodeSelectorTerm: &corev1.NodeSelectorTerm{
// MatchExpressions: []corev1.NodeSelectorRequirement{
// {
// Key: WorkloadSpreadFakeZoneKey,
// Operator: corev1.NodeSelectorOpIn,
// Values: []string{"zone-b"},
// },
// },
// },
// Patch: runtime.RawExtension{
// Raw: []byte(`{"metadata":{"annotations":{"subset":"zone-b"}}}`),
// },
// }
// workloadSpread := tester.NewWorkloadSpread(ns, workloadSpreadName, &targetRef, []appsv1alpha1.WorkloadSpreadSubset{subset1, subset2})
// workloadSpread = tester.CreateWorkloadSpread(workloadSpread)
//
// job.Spec.Completions = pointer.Int32Ptr(10)
// job.Spec.Parallelism = pointer.Int32Ptr(2)
// job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever
// job = tester.CreateJob(job)
// tester.WaitJobCompleted(job)
//
// // get pods, and check workloadSpread
// ginkgo.By(fmt.Sprintf("get job(%s/%s) pods, and check workloadSpread(%s/%s) status", job.Namespace, job.Name, workloadSpread.Namespace, workloadSpread.Name))
// faster, err := util.GetFastLabelSelector(job.Spec.Selector)
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
// podList, err := tester.C.CoreV1().Pods(job.Namespace).List(metav1.ListOptions{LabelSelector: faster.String()})
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
//
// matchedPods := make([]corev1.Pod, 0, len(podList.Items))
// for i := range podList.Items {
// if podList.Items[i].Status.Phase == corev1.PodSucceeded {
// matchedPods = append(matchedPods, podList.Items[i])
// }
// }
//
// pods := matchedPods
// gomega.Expect(pods).To(gomega.HaveLen(10))
// subset1Pods := 0
// subset2Pods := 0
// for _, pod := range pods {
// if str, ok := pod.Annotations[workloadspread.MatchedWorkloadSpreadSubsetAnnotations]; ok {
// var injectWorkloadSpread *workloadspread.InjectWorkloadSpread
// err := json.Unmarshal([]byte(str), &injectWorkloadSpread)
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
// if injectWorkloadSpread.Subset == subset1.Name {
// subset1Pods++
// gomega.Expect(injectWorkloadSpread.Name).To(gomega.Equal(workloadSpread.Name))
// gomega.Expect(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions).To(gomega.Equal(subset1.RequiredNodeSelectorTerm.MatchExpressions))
// gomega.Expect(pod.Annotations["subset"]).To(gomega.Equal(subset1.Name))
// } else if injectWorkloadSpread.Subset == subset2.Name {
// subset2Pods++
// gomega.Expect(injectWorkloadSpread.Name).To(gomega.Equal(workloadSpread.Name))
// gomega.Expect(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions).To(gomega.Equal(subset2.RequiredNodeSelectorTerm.MatchExpressions))
// gomega.Expect(pod.Annotations["subset"]).To(gomega.Equal(subset2.Name))
// }
// } else {
// // others PodDeletionCostAnnotation not set
// gomega.Expect(pod.Annotations[workloadspread.PodDeletionCostAnnotation]).To(gomega.Equal(""))
// }
// }
// gomega.Expect(subset1Pods).To(gomega.Equal(5))
// gomega.Expect(subset2Pods).To(gomega.Equal(5))
//
// // check workloadSpread status
// ginkgo.By(fmt.Sprintf("check workloadSpread(%s/%s) status", workloadSpread.Namespace, workloadSpread.Name))
// workloadSpread, err = kc.AppsV1alpha1().WorkloadSpreads(workloadSpread.Namespace).Get(workloadSpread.Name, metav1.GetOptions{})
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
//
// gomega.Expect(workloadSpread.Status.SubsetStatuses[0].Name).To(gomega.Equal(workloadSpread.Spec.Subsets[0].Name))
// gomega.Expect(workloadSpread.Status.SubsetStatuses[0].MissingReplicas).To(gomega.Equal(int32(1)))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[0].CreatingPods)).To(gomega.Equal(0))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[0].DeletingPods)).To(gomega.Equal(0))
//
// gomega.Expect(workloadSpread.Status.SubsetStatuses[1].Name).To(gomega.Equal(workloadSpread.Spec.Subsets[1].Name))
// gomega.Expect(workloadSpread.Status.SubsetStatuses[1].MissingReplicas).To(gomega.Equal(int32(-1)))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[1].CreatingPods)).To(gomega.Equal(0))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[1].DeletingPods)).To(gomega.Equal(0))
//
// ginkgo.By("workloadSpread for job, done")
//})
})
})

0 comments on commit 4f04e93

Please sign in to comment.