diff --git a/apis/apps/pub/inplace_update.go b/apis/apps/pub/inplace_update.go index 8274473e21..01122b66fc 100644 --- a/apis/apps/pub/inplace_update.go +++ b/apis/apps/pub/inplace_update.go @@ -68,6 +68,9 @@ type InPlaceUpdateState struct { // NextContainerRefMetadata is the containers with lower priority that waiting for in-place update labels/annotations in next batch. NextContainerRefMetadata map[string]metav1.ObjectMeta `json:"nextContainerRefMetadata,omitempty"` + // NextContainerResources is the containers with lower priority that waiting for in-place update resources in next batch. + NextContainerResources map[string]v1.ResourceRequirements `json:"nextContainerResources,omitempty"` + // PreCheckBeforeNext is the pre-check that must pass before the next containers can be in-place update. PreCheckBeforeNext *InPlaceUpdatePreCheckBeforeNext `json:"preCheckBeforeNext,omitempty"` diff --git a/apis/apps/pub/zz_generated.deepcopy.go b/apis/apps/pub/zz_generated.deepcopy.go index cda5d19b79..f0a501645a 100644 --- a/apis/apps/pub/zz_generated.deepcopy.go +++ b/apis/apps/pub/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package pub import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -106,6 +107,13 @@ func (in *InPlaceUpdateState) DeepCopyInto(out *InPlaceUpdateState) { (*out)[key] = *val.DeepCopy() } } + if in.NextContainerResources != nil { + in, out := &in.NextContainerResources, &out.NextContainerResources + *out = make(map[string]corev1.ResourceRequirements, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } if in.PreCheckBeforeNext != nil { in, out := &in.PreCheckBeforeNext, &out.PreCheckBeforeNext *out = new(InPlaceUpdatePreCheckBeforeNext) diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index c6c8eb7dcd..d085403bf6 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -40,14 +40,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index c8ba11f600..fa73f87425 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -40,14 +40,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/features/kruise_features.go b/pkg/features/kruise_features.go index 3edbd36358..642ce928f7 100644 --- a/pkg/features/kruise_features.go +++ b/pkg/features/kruise_features.go @@ -105,6 +105,10 @@ const ( // PreparingUpdateAsUpdate enable CloneSet/Advanced StatefulSet controller to regard preparing-update Pod // as updated when calculating update/current revision during scaling. PreparingUpdateAsUpdate featuregate.Feature = "PreparingUpdateAsUpdate" + + // InPlaceWorkloadVerticalScaling enable CloneSet/Advanced StatefulSet controller to support vertical scaling + // of managed Pods. + InPlaceWorkloadVerticalScaling featuregate.Feature = "InPlaceWorkloadVerticalScaling" ) var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ @@ -129,6 +133,7 @@ var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ PreDownloadImageForDaemonSetUpdate: {Default: false, PreRelease: featuregate.Alpha}, CloneSetEventHandlerOptimization: {Default: false, PreRelease: featuregate.Alpha}, PreparingUpdateAsUpdate: {Default: false, PreRelease: featuregate.Alpha}, + InPlaceWorkloadVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, } func init() { diff --git a/pkg/util/inplaceupdate/inplace_update.go b/pkg/util/inplaceupdate/inplace_update.go index 81b8966ef9..69e7033527 100644 --- a/pkg/util/inplaceupdate/inplace_update.go +++ b/pkg/util/inplaceupdate/inplace_update.go @@ -39,8 +39,9 @@ import ( ) var ( - containerImagePatchRexp = regexp.MustCompile("^/spec/containers/([0-9]+)/image$") - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") + containerImagePatchRexp = regexp.MustCompile("^/spec/containers/([0-9]+)/image$") + containerResourcesPatchRexp = regexp.MustCompile("^/spec/containers/([0-9]+)/resources/.*$") + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") Clock clock.Clock = clock.RealClock{} ) @@ -79,11 +80,12 @@ type Interface interface { type UpdateSpec struct { Revision string `json:"revision"` - ContainerImages map[string]string `json:"containerImages,omitempty"` - ContainerRefMetadata map[string]metav1.ObjectMeta `json:"containerRefMetadata,omitempty"` - MetaDataPatch []byte `json:"metaDataPatch,omitempty"` - UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` - GraceSeconds int32 `json:"graceSeconds,omitempty"` + ContainerImages map[string]string `json:"containerImages,omitempty"` + ContainerRefMetadata map[string]metav1.ObjectMeta `json:"containerRefMetadata,omitempty"` + ContainerResources map[string]v1.ResourceRequirements `json:"containerResources,omitempty"` + MetaDataPatch []byte `json:"metaDataPatch,omitempty"` + UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` + GraceSeconds int32 `json:"graceSeconds,omitempty"` OldTemplate *v1.PodTemplateSpec `json:"oldTemplate,omitempty"` NewTemplate *v1.PodTemplateSpec `json:"newTemplate,omitempty"` @@ -131,7 +133,7 @@ func (c *realControl) Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult { } // check if there are containers with lower-priority that have to in-place update in next batch - if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 { + if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 || len(state.NextContainerResources) > 0 { // pre-check the previous updated containers if checkErr := doPreCheckBeforeNext(pod, state.PreCheckBeforeNext); checkErr != nil { @@ -254,6 +256,7 @@ func (c *realControl) updateNextBatch(pod *v1.Pod, opts *UpdateOptions) (bool, e ContainerImages: state.NextContainerImages, ContainerRefMetadata: state.NextContainerRefMetadata, UpdateEnvFromMetadata: state.UpdateEnvFromMetadata, + ContainerResources: state.NextContainerResources, } if clone, err = opts.PatchSpecToPod(clone, &spec, &state); err != nil { return err diff --git a/pkg/util/inplaceupdate/inplace_update_defaults.go b/pkg/util/inplaceupdate/inplace_update_defaults.go index 68364792d3..9cff5205fd 100644 --- a/pkg/util/inplaceupdate/inplace_update_defaults.go +++ b/pkg/util/inplaceupdate/inplace_update_defaults.go @@ -31,6 +31,7 @@ import ( utilfeature "github.com/openkruise/kruise/pkg/util/feature" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -43,20 +44,40 @@ func SetOptionsDefaults(opts *UpdateOptions) *UpdateOptions { opts = &UpdateOptions{} } - if opts.CalculateSpec == nil { - opts.CalculateSpec = defaultCalculateInPlaceUpdateSpec - } + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) { + if opts.CalculateSpec == nil { + opts.CalculateSpec = defaultCalculateInPlaceUpdateSpecWithVerticalUpdate + } - if opts.PatchSpecToPod == nil { - opts.PatchSpecToPod = defaultPatchUpdateSpecToPod - } + if opts.PatchSpecToPod == nil { + opts.PatchSpecToPod = defaultPatchUpdateSpecToPodWithVerticalUpdate + } - if opts.CheckPodUpdateCompleted == nil { - opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompleted - } + if opts.CheckPodUpdateCompleted == nil { + opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompletedWithVerticalUpdate + } + + if opts.CheckContainersUpdateCompleted == nil { + opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompletedWithVerticalUpdate + } + + } else { + if opts.CalculateSpec == nil { + opts.CalculateSpec = defaultCalculateInPlaceUpdateSpec + } + + if opts.PatchSpecToPod == nil { + opts.PatchSpecToPod = defaultPatchUpdateSpecToPod + } + + if opts.CheckPodUpdateCompleted == nil { + opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompleted + } + + if opts.CheckContainersUpdateCompleted == nil { + opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompleted + } - if opts.CheckContainersUpdateCompleted == nil { - opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompleted } return opts @@ -69,6 +90,7 @@ func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.I state.NextContainerImages = make(map[string]string) state.NextContainerRefMetadata = make(map[string]metav1.ObjectMeta) + state.NextContainerResources = make(map[string]v1.ResourceRequirements) if spec.MetaDataPatch != nil { cloneBytes, _ := json.Marshal(pod) @@ -471,3 +493,361 @@ func checkAllContainersHashConsistent(pod *v1.Pod, runtimeContainerMetaSet *apps return true } + +// defaultPatchUpdateSpecToPod returns new pod that merges spec into old pod +func defaultPatchUpdateSpecToPodWithVerticalUpdate(pod *v1.Pod, spec *UpdateSpec, state *appspub.InPlaceUpdateState) (*v1.Pod, error) { + klog.V(5).Infof("Begin to in-place update pod %s/%s with update spec %v, state %v", pod.Namespace, pod.Name, util.DumpJSON(spec), util.DumpJSON(state)) + + state.NextContainerImages = make(map[string]string) + state.NextContainerRefMetadata = make(map[string]metav1.ObjectMeta) + state.NextContainerResources = make(map[string]v1.ResourceRequirements) + + if spec.MetaDataPatch != nil { + cloneBytes, _ := json.Marshal(pod) + modified, err := strategicpatch.StrategicMergePatch(cloneBytes, spec.MetaDataPatch, &v1.Pod{}) + if err != nil { + return nil, err + } + pod = &v1.Pod{} + if err = json.Unmarshal(modified, pod); err != nil { + return nil, err + } + } + + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + // prepare containers that should update this time and next time, according to their priorities + containersToUpdate := sets.NewString() + var highestPriority *int + var containersWithHighestPriority []string + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + _, existImage := spec.ContainerImages[c.Name] + _, existMetadata := spec.ContainerRefMetadata[c.Name] + _, existResource := spec.ContainerResources[c.Name] + if !existImage && !existMetadata && !existResource { + continue + } + priority := utilcontainerlaunchpriority.GetContainerPriority(c) + if priority == nil { + containersToUpdate.Insert(c.Name) + } else if highestPriority == nil || *highestPriority < *priority { + highestPriority = priority + containersWithHighestPriority = []string{c.Name} + } else if *highestPriority == *priority { + containersWithHighestPriority = append(containersWithHighestPriority, c.Name) + } + } + for _, cName := range containersWithHighestPriority { + containersToUpdate.Insert(cName) + } + addMetadataSharedContainersToUpdate(pod, containersToUpdate, spec.ContainerRefMetadata) + + // DO NOT modify the fields in spec for it may have to retry on conflict in updatePodInPlace + + // update images and record current imageIDs for the containers to update + containersImageChanged := sets.NewString() + containersResourceChanged := sets.NewString() + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + newImage, imageExists := spec.ContainerImages[c.Name] + newResource, resourceExists := spec.ContainerResources[c.Name] + if !imageExists && !resourceExists { + continue + } + if containersToUpdate.Has(c.Name) { + if imageExists { + pod.Spec.Containers[i].Image = newImage + containersImageChanged.Insert(c.Name) + } + if resourceExists { + for key, quantity := range newResource.Limits { + c.Resources.Limits[key] = quantity + } + for key, quantity := range newResource.Requests { + c.Resources.Requests[key] = quantity + } + containersResourceChanged.Insert(c.Name) + } + } else { + state.NextContainerImages[c.Name] = newImage + state.NextContainerResources[c.Name] = newResource + } + } + for _, c := range pod.Status.ContainerStatuses { + if containersImageChanged.Has(c.Name) { + if state.LastContainerStatuses == nil { + state.LastContainerStatuses = map[string]appspub.InPlaceUpdateContainerStatus{} + } + state.LastContainerStatuses[c.Name] = appspub.InPlaceUpdateContainerStatus{ImageID: c.ImageID} + } + // TODO(LavenderQAQ): The status of resource needs to be printed + } + + // update annotations and labels for the containers to update + for cName, objMeta := range spec.ContainerRefMetadata { + if containersToUpdate.Has(cName) { + for k, v := range objMeta.Labels { + pod.Labels[k] = v + } + for k, v := range objMeta.Annotations { + pod.Annotations[k] = v + } + } else { + state.NextContainerRefMetadata[cName] = objMeta + } + } + + // add the containers that update this time into PreCheckBeforeNext, so that next containers can only + // start to update when these containers have updated ready + // TODO: currently we only support ContainersRequiredReady, not sure if we have to add ContainersPreferredReady in future + if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 || len(state.NextContainerResources) > 0 { + state.PreCheckBeforeNext = &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: containersToUpdate.List()} + } else { + state.PreCheckBeforeNext = nil + } + + state.ContainerBatchesRecord = append(state.ContainerBatchesRecord, appspub.InPlaceUpdateContainerBatch{ + Timestamp: metav1.NewTime(Clock.Now()), + Containers: containersToUpdate.List(), + }) + + klog.V(5).Infof("Decide to in-place update pod %s/%s with state %v", pod.Namespace, pod.Name, util.DumpJSON(state)) + + inPlaceUpdateStateJSON, _ := json.Marshal(state) + pod.Annotations[appspub.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) + return pod, nil +} + +// defaultCalculateInPlaceUpdateSpec calculates diff between old and update revisions. +// If the diff just contains replace operation of spec.containers[x].image, it will returns an UpdateSpec. +// Otherwise, it returns nil which means can not use in-place update. +func defaultCalculateInPlaceUpdateSpecWithVerticalUpdate(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) *UpdateSpec { + if oldRevision == nil || newRevision == nil { + return nil + } + opts = SetOptionsDefaults(opts) + + patches, err := jsonpatch.CreatePatch(oldRevision.Data.Raw, newRevision.Data.Raw) + if err != nil { + return nil + } + + oldTemp, err := GetTemplateFromRevision(oldRevision) + if err != nil { + return nil + } + newTemp, err := GetTemplateFromRevision(newRevision) + if err != nil { + return nil + } + + updateSpec := &UpdateSpec{ + Revision: newRevision.Name, + ContainerImages: make(map[string]string), + ContainerRefMetadata: make(map[string]metav1.ObjectMeta), + ContainerResources: make(map[string]v1.ResourceRequirements), + GraceSeconds: opts.GracePeriodSeconds, + } + if opts.GetRevision != nil { + updateSpec.Revision = opts.GetRevision(newRevision) + } + + // all patches for podSpec can just update images in pod spec + var metadataPatches []jsonpatch.Operation + for _, op := range patches { + op.Path = strings.Replace(op.Path, "/spec/template", "", 1) + + if !strings.HasPrefix(op.Path, "/spec/") { + if strings.HasPrefix(op.Path, "/metadata/") { + metadataPatches = append(metadataPatches, op) + continue + } + return nil + } + + if op.Operation != "replace" { + return nil + } + + isImageUpdate := containerImagePatchRexp.MatchString(op.Path) + isResourceUpdate := containerResourcesPatchRexp.MatchString(op.Path) + + if isImageUpdate { + // for example: /spec/containers/0/image + words := strings.Split(op.Path, "/") + idx, _ := strconv.Atoi(words[3]) + if len(oldTemp.Spec.Containers) <= idx { + return nil + } + updateSpec.ContainerImages[oldTemp.Spec.Containers[idx].Name] = op.Value.(string) + } else if isResourceUpdate { + // for example: /spec/containers/0/resources/limits/cpu + words := strings.Split(op.Path, "/") + if len(words) != 7 { + return nil + } + idx, err := strconv.Atoi(words[3]) + if err != nil || len(oldTemp.Spec.Containers) <= idx { + return nil + } + quantity, err := resource.ParseQuantity(op.Value.(string)) + if err != nil { + klog.Errorf("parse quantity error: %v", err) + return nil + } + + if _, ok := updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name]; !ok { + updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name] = v1.ResourceRequirements{ + Limits: make(v1.ResourceList), + Requests: make(v1.ResourceList), + } + } + switch words[5] { + case "limits": + updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name].Limits[v1.ResourceName(words[6])] = quantity + case "requests": + updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name].Requests[v1.ResourceName(words[6])] = quantity + } + } else { + return nil + } + } + + if len(metadataPatches) > 0 { + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceUpdateEnvFromMetadata) { + // for example: /metadata/labels/my-label-key + for _, op := range metadataPatches { + if op.Operation != "replace" && op.Operation != "add" { + continue + } + words := strings.SplitN(op.Path, "/", 4) + if len(words) != 4 || (words[2] != "labels" && words[2] != "annotations") { + continue + } + key := rfc6901Decoder.Replace(words[3]) + + for i := range newTemp.Spec.Containers { + c := &newTemp.Spec.Containers[i] + objMeta := updateSpec.ContainerRefMetadata[c.Name] + switch words[2] { + case "labels": + if !utilcontainermeta.IsContainerReferenceToMeta(c, "metadata.labels", key) { + continue + } + if objMeta.Labels == nil { + objMeta.Labels = make(map[string]string) + } + objMeta.Labels[key] = op.Value.(string) + delete(oldTemp.ObjectMeta.Labels, key) + delete(newTemp.ObjectMeta.Labels, key) + + case "annotations": + if !utilcontainermeta.IsContainerReferenceToMeta(c, "metadata.annotations", key) { + continue + } + if objMeta.Annotations == nil { + objMeta.Annotations = make(map[string]string) + } + objMeta.Annotations[key] = op.Value.(string) + delete(oldTemp.ObjectMeta.Annotations, key) + delete(newTemp.ObjectMeta.Annotations, key) + } + + updateSpec.ContainerRefMetadata[c.Name] = objMeta + updateSpec.UpdateEnvFromMetadata = true + } + } + } + + oldBytes, _ := json.Marshal(v1.Pod{ObjectMeta: oldTemp.ObjectMeta}) + newBytes, _ := json.Marshal(v1.Pod{ObjectMeta: newTemp.ObjectMeta}) + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldBytes, newBytes, &v1.Pod{}) + if err != nil { + return nil + } + updateSpec.MetaDataPatch = patchBytes + } + return updateSpec +} + +// DefaultCheckInPlaceUpdateCompleted checks whether imageID in pod status has been changed since in-place update. +// If the imageID in containerStatuses has not been changed, we assume that kubelet has not updated +// containers in Pod. +func DefaultCheckInPlaceUpdateCompletedWithVerticalUpdate(pod *v1.Pod) error { + if _, isInGraceState := appspub.GetInPlaceUpdateGrace(pod); isInGraceState { + return fmt.Errorf("still in grace period of in-place update") + } + + inPlaceUpdateState := appspub.InPlaceUpdateState{} + if stateStr, ok := appspub.GetInPlaceUpdateState(pod); !ok { + return nil + } else if err := json.Unmarshal([]byte(stateStr), &inPlaceUpdateState); err != nil { + return err + } + if len(inPlaceUpdateState.NextContainerImages) > 0 || len(inPlaceUpdateState.NextContainerRefMetadata) > 0 || len(inPlaceUpdateState.NextContainerResources) > 0 { + return fmt.Errorf("existing containers to in-place update in next batches") + } + + return defaultCheckContainersInPlaceUpdateCompletedWithVerticalUpdate(pod, &inPlaceUpdateState) +} + +func defaultCheckContainersInPlaceUpdateCompletedWithVerticalUpdate(pod *v1.Pod, inPlaceUpdateState *appspub.InPlaceUpdateState) error { + runtimeContainerMetaSet, err := appspub.GetRuntimeContainerMetaSet(pod) + if err != nil { + return err + } + + if inPlaceUpdateState.UpdateEnvFromMetadata { + if runtimeContainerMetaSet == nil { + return fmt.Errorf("waiting for all containers hash consistent, but runtime-container-meta not found") + } + if !checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, extractedEnvFromMetadataHash) { + return fmt.Errorf("waiting for all containers hash consistent") + } + } + + if runtimeContainerMetaSet != nil { + if checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, plainHash) { + klog.V(5).Infof("Check Pod %s/%s in-place update completed for all container hash consistent", pod.Namespace, pod.Name) + return nil + } + // If it needs not to update envs from metadata, we don't have to return error here, + // in case kruise-daemon has broken for some reason and runtime-container-meta is still in an old version. + } + + containerImages := make(map[string]string, len(pod.Spec.Containers)) + containerResources := make(map[string]v1.ResourceRequirements, len(pod.Spec.Containers)) + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + containerImages[c.Name] = c.Image + containerResources[c.Name] = c.Resources + if len(strings.Split(c.Image, ":")) <= 1 { + containerImages[c.Name] = fmt.Sprintf("%s:latest", c.Image) + } + } + + for _, cs := range pod.Status.ContainerStatuses { + if oldStatus, ok := inPlaceUpdateState.LastContainerStatuses[cs.Name]; ok { + // TODO: we assume that users should not update workload template with new image which actually has the same imageID as the old image + if oldStatus.ImageID == cs.ImageID { + if containerImages[cs.Name] != cs.Image { + return fmt.Errorf("container %s imageID not changed", cs.Name) + } + } + // TODO(LavenderQAQ): Check the vertical updating status of the container + delete(inPlaceUpdateState.LastContainerStatuses, cs.Name) + } + } + + if len(inPlaceUpdateState.LastContainerStatuses) > 0 { + return fmt.Errorf("not found statuses of containers %v", inPlaceUpdateState.LastContainerStatuses) + } + + return nil +}