diff --git a/hosted/aks/helper/helper_cluster.go b/hosted/aks/helper/helper_cluster.go index cd1232d8..547a4238 100644 --- a/hosted/aks/helper/helper_cluster.go +++ b/hosted/aks/helper/helper_cluster.go @@ -33,20 +33,91 @@ var ( subscriptionID = os.Getenv("AKS_SUBSCRIPTION_ID") ) +func CreateAKSHostedCluster(client *rancher.Client, cloudCredentialID, clusterName, k8sVersion, location string, tags map[string]string) (*management.Cluster, error) { + var aksClusterConfig aks.ClusterConfig + config.LoadConfig(aks.AKSClusterConfigConfigurationFileKey, &aksClusterConfig) + var aksNodePools []management.AKSNodePool + for _, aksNodePoolConfig := range *aksClusterConfig.NodePools { + aksNodePool := management.AKSNodePool{ + AvailabilityZones: aksNodePoolConfig.AvailabilityZones, + Count: aksNodePoolConfig.NodeCount, + EnableAutoScaling: aksNodePoolConfig.EnableAutoScaling, + MaxPods: aksNodePoolConfig.MaxPods, + MaxCount: aksNodePoolConfig.MaxCount, + MinCount: aksNodePoolConfig.MinCount, + Mode: aksNodePoolConfig.Mode, + Name: aksNodePoolConfig.Name, + OrchestratorVersion: &k8sVersion, + OsDiskSizeGB: aksNodePoolConfig.OsDiskSizeGB, + OsDiskType: aksNodePoolConfig.OsDiskType, + OsType: aksNodePoolConfig.OsType, + VMSize: aksNodePoolConfig.VMSize, + } + aksNodePools = append(aksNodePools, aksNodePool) + } + + cluster := &management.Cluster{ + AKSConfig: &management.AKSClusterConfigSpec{ + AzureCredentialSecret: cloudCredentialID, + ClusterName: clusterName, + DNSPrefix: pointer.String(clusterName + "-dns"), + Imported: false, + KubernetesVersion: &k8sVersion, + LinuxAdminUsername: aksClusterConfig.LinuxAdminUsername, + LoadBalancerSKU: aksClusterConfig.LoadBalancerSKU, + NetworkPlugin: aksClusterConfig.NetworkPlugin, + NodePools: aksNodePools, + PrivateCluster: aksClusterConfig.PrivateCluster, + ResourceGroup: clusterName, + ResourceLocation: location, + Tags: tags, + }, + DockerRootDir: "/var/lib/docker", + Name: clusterName, + } + + clusterResp, err := client.Management.Cluster.Create(cluster) + Expect(err).To(BeNil()) + + return clusterResp, err +} + +// ImportAKSHostedCluster imports an AKS cluster to Rancher +func ImportAKSHostedCluster(client *rancher.Client, clusterName, cloudCredentialID, location string, tags map[string]string) (*management.Cluster, error) { + cluster := &management.Cluster{ + DockerRootDir: "/var/lib/docker", + AKSConfig: &management.AKSClusterConfigSpec{ + AzureCredentialSecret: cloudCredentialID, + ClusterName: clusterName, + Imported: true, + ResourceLocation: location, + ResourceGroup: clusterName, + Tags: tags, + }, + Name: clusterName, + } + + clusterResp, err := client.Management.Cluster.Create(cluster) + Expect(err).To(BeNil()) + + return clusterResp, err +} + +// DeleteAKSHostCluster deletes the AKS cluster +func DeleteAKSHostCluster(cluster *management.Cluster, client *rancher.Client) error { + return client.Management.Cluster.Delete(cluster) +} + // UpgradeClusterKubernetesVersion upgrades the k8s version to the value defined by upgradeToVersion; // if checkClusterConfig is set to true, it will validate that the cluster control plane has been upgrade successfully func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, checkClusterConfig bool) (*management.Cluster, error) { + upgradedCluster := cluster currentVersion := *cluster.AKSConfig.KubernetesVersion - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.AKSConfig = cluster.AKSConfig upgradedCluster.AKSConfig.KubernetesVersion = &upgradeToVersion var err error cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err - } + Expect(err).To(BeNil()) if checkClusterConfig { // Check if the desired config is set correctly @@ -60,7 +131,7 @@ func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersi Eventually(func() string { ginkgo.GinkgoLogr.Info("Waiting for k8s upgrade to appear in AKSStatus.UpstreamSpec ...") cluster, err = client.Management.Cluster.ByID(cluster.ID) - Expect(err).NotTo(HaveOccurred()) + Expect(err).To(BeNil()) return *cluster.AKSStatus.UpstreamSpec.KubernetesVersion }, tools.SetTimeout(10*time.Minute), 5*time.Second).Should(Equal(upgradeToVersion)) // ensure nodepool version is same in Rancher @@ -76,17 +147,13 @@ func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersi // if wait is set to true, it will wait until the cluster finishes upgrading; // if checkClusterConfig is set to true, it will validate that nodepool has been upgraded successfully func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.AKSConfig = cluster.AKSConfig + upgradedCluster := cluster for i := range upgradedCluster.AKSConfig.NodePools { upgradedCluster.AKSConfig.NodePools[i].OrchestratorVersion = &upgradeToVersion } var err error cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err - } + Expect(err).To(BeNil()) if checkClusterConfig { // Check if the desired config is set correctly @@ -117,62 +184,6 @@ func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion return cluster, nil } -func CreateAKSHostedCluster(client *rancher.Client, cloudCredentialID, clusterName, k8sVersion, location string, tags map[string]string) (*management.Cluster, error) { - var aksClusterConfig aks.ClusterConfig - config.LoadConfig(aks.AKSClusterConfigConfigurationFileKey, &aksClusterConfig) - var aksNodePools []management.AKSNodePool - for _, aksNodePoolConfig := range *aksClusterConfig.NodePools { - aksNodePool := management.AKSNodePool{ - AvailabilityZones: aksNodePoolConfig.AvailabilityZones, - Count: aksNodePoolConfig.NodeCount, - EnableAutoScaling: aksNodePoolConfig.EnableAutoScaling, - MaxPods: aksNodePoolConfig.MaxPods, - MaxCount: aksNodePoolConfig.MaxCount, - MinCount: aksNodePoolConfig.MinCount, - Mode: aksNodePoolConfig.Mode, - Name: aksNodePoolConfig.Name, - OrchestratorVersion: &k8sVersion, - OsDiskSizeGB: aksNodePoolConfig.OsDiskSizeGB, - OsDiskType: aksNodePoolConfig.OsDiskType, - OsType: aksNodePoolConfig.OsType, - VMSize: aksNodePoolConfig.VMSize, - } - aksNodePools = append(aksNodePools, aksNodePool) - } - - cluster := &management.Cluster{ - AKSConfig: &management.AKSClusterConfigSpec{ - AzureCredentialSecret: cloudCredentialID, - ClusterName: clusterName, - DNSPrefix: pointer.String(clusterName + "-dns"), - Imported: false, - KubernetesVersion: &k8sVersion, - LinuxAdminUsername: aksClusterConfig.LinuxAdminUsername, - LoadBalancerSKU: aksClusterConfig.LoadBalancerSKU, - NetworkPlugin: aksClusterConfig.NetworkPlugin, - NodePools: aksNodePools, - PrivateCluster: aksClusterConfig.PrivateCluster, - ResourceGroup: clusterName, - ResourceLocation: location, - Tags: tags, - }, - DockerRootDir: "/var/lib/docker", - Name: clusterName, - } - - clusterResp, err := client.Management.Cluster.Create(cluster) - if err != nil { - return nil, err - } - - return clusterResp, err -} - -// DeleteAKSHostCluster deletes the AKS cluster -func DeleteAKSHostCluster(cluster *management.Cluster, client *rancher.Client) error { - return client.Management.Cluster.Delete(cluster) -} - // ListSingleVariantAKSAvailableVersions returns a list of single variants of minor versions // For e.g 1.27.5, 1.26.6, 1.25.8 func ListSingleVariantAKSAvailableVersions(client *rancher.Client, cloudCredentialID, region string) (availableVersions []string, err error) { @@ -210,12 +221,9 @@ func GetK8sVersionVariantAKS(minorVersion string, client *rancher.Client, cloudC // AddNodePool adds a nodepool to the list; if wait is set to true, it will wait until the cluster finishes upgrading; // if checkClusterConfig is set to true, it will validate that nodepool has been added successfully func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { + upgradedCluster := cluster currentNodePoolNumber := len(cluster.AKSConfig.NodePools) - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.AKSConfig = cluster.AKSConfig - updateNodePoolsList := cluster.AKSConfig.NodePools for i := 1; i <= increaseBy; i++ { for _, np := range cluster.AKSConfig.NodePools { @@ -233,9 +241,7 @@ func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Cl var err error cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err - } + Expect(err).To(BeNil()) if checkClusterConfig { // Check if the desired config is set correctly @@ -271,17 +277,13 @@ func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Cl func DeleteNodePool(cluster *management.Cluster, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { currentNodePoolNumber := len(cluster.AKSConfig.NodePools) - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.AKSConfig = cluster.AKSConfig + upgradedCluster := cluster updatedNodePoolsList := cluster.AKSConfig.NodePools[:1] upgradedCluster.AKSConfig.NodePools = updatedNodePoolsList var err error cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err - } + Expect(err).To(BeNil()) if checkClusterConfig { // Check if the desired config is set correctly @@ -314,18 +316,14 @@ func DeleteNodePool(cluster *management.Cluster, client *rancher.Client, wait, c // if wait is set to true, it will wait until the cluster finishes upgrading; // if checkClusterConfig is set to true, it will validate that nodepool has been scaled successfully func ScaleNodePool(cluster *management.Cluster, client *rancher.Client, nodeCount int64, wait, checkClusterConfig bool) (*management.Cluster, error) { - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.AKSConfig = cluster.AKSConfig + upgradedCluster := cluster for i := range upgradedCluster.AKSConfig.NodePools { upgradedCluster.AKSConfig.NodePools[i].Count = pointer.Int64(nodeCount) } var err error cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err - } + Expect(err).To(BeNil()) if checkClusterConfig { // Check if the desired config is set correctly @@ -423,28 +421,6 @@ func DeleteAKSClusteronAzure(clusterName string) error { return nil } -// ImportAKSHostedCluster imports an AKS cluster to Rancher -func ImportAKSHostedCluster(client *rancher.Client, clusterName, cloudCredentialID, location string, tags map[string]string) (*management.Cluster, error) { - cluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - AKSConfig: &management.AKSClusterConfigSpec{ - AzureCredentialSecret: cloudCredentialID, - ClusterName: clusterName, - Imported: true, - ResourceLocation: location, - ResourceGroup: clusterName, - Tags: tags, - }, - Name: clusterName, - } - - clusterResp, err := client.Management.Cluster.Create(cluster) - if err != nil { - return nil, err - } - return clusterResp, err -} - // defaultAKS returns the default AKS version used by Rancher; if forUpgrade is true, it returns the second-highest minor k8s version func defaultAKS(client *rancher.Client, cloudCredentialID, region string, forUpgrade bool) (defaultAKS string, err error) { url := fmt.Sprintf("%s://%s/meta/aksVersions", "https", client.RancherConfig.Host) diff --git a/hosted/eks/helper/helper_cluster.go b/hosted/eks/helper/helper_cluster.go index efc5fdd9..a35e74d3 100644 --- a/hosted/eks/helper/helper_cluster.go +++ b/hosted/eks/helper/helper_cluster.go @@ -3,16 +3,20 @@ package helper import ( "fmt" "os" - "strconv" - - "github.com/rancher/shepherd/extensions/clusters/eks" + "strings" + "time" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/rancher-sandbox/ele-testhelpers/tools" "github.com/rancher/hosted-providers-e2e/hosted/helpers" "github.com/epinio/epinio/acceptance/helpers/proc" "github.com/pkg/errors" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/clusters/eks" "github.com/rancher/shepherd/extensions/clusters/kubernetesversions" "github.com/rancher/shepherd/pkg/config" namegen "github.com/rancher/shepherd/pkg/namegenerator" @@ -20,50 +24,91 @@ import ( "k8s.io/utils/pointer" ) -func GetTags() map[string]string { - eksConfig := new(management.EKSClusterConfigSpec) - config.LoadConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig) - providerTags := helpers.GetCommonMetadataLabels() - if clusterCleanup, _ := strconv.ParseBool(os.Getenv("DOWNSTREAM_CLUSTER_CLEANUP")); clusterCleanup == false { - providerTags["janitor-ignore"] = "true" - } - - if eksConfig.Tags != nil { - for key, value := range *eksConfig.Tags { - providerTags[key] = value +// CreateEKSHostedCluster is a helper function that creates an EKS hosted cluster +func CreateEKSHostedCluster(client *rancher.Client, displayName, cloudCredentialID, kubernetesVersion, region string, tags map[string]string) (*management.Cluster, error) { + var eksClusterConfig eks.ClusterConfig + config.LoadConfig(eks.EKSClusterConfigConfigurationFileKey, &eksClusterConfig) + + var nodeGroups []management.NodeGroup + for _, nodeGroupConfig := range *eksClusterConfig.NodeGroupsConfig { + var launchTemplate *management.LaunchTemplate + if nodeGroupConfig.LaunchTemplateConfig != nil { + launchTemplate = &management.LaunchTemplate{ + Name: nodeGroupConfig.LaunchTemplateConfig.Name, + Version: nodeGroupConfig.LaunchTemplateConfig.Version, + } + } + nodeGroup := management.NodeGroup{ + DesiredSize: nodeGroupConfig.DesiredSize, + DiskSize: nodeGroupConfig.DiskSize, + Ec2SshKey: nodeGroupConfig.Ec2SshKey, + Gpu: nodeGroupConfig.Gpu, + ImageID: nodeGroupConfig.ImageID, + InstanceType: nodeGroupConfig.InstanceType, + Labels: &nodeGroupConfig.Labels, + LaunchTemplate: launchTemplate, + MaxSize: nodeGroupConfig.MaxSize, + MinSize: nodeGroupConfig.MinSize, + NodegroupName: nodeGroupConfig.NodegroupName, + NodeRole: nodeGroupConfig.NodeRole, + RequestSpotInstances: nodeGroupConfig.RequestSpotInstances, + ResourceTags: &nodeGroupConfig.ResourceTags, + SpotInstanceTypes: &nodeGroupConfig.SpotInstanceTypes, + Subnets: &nodeGroupConfig.Subnets, + Tags: &nodeGroupConfig.Tags, + UserData: nodeGroupConfig.UserData, + Version: &kubernetesVersion, } + nodeGroups = append(nodeGroups, nodeGroup) } - return providerTags -} -// UpgradeClusterKubernetesVersion upgrades the k8s version to the value defined by upgradeToVersion. -func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersion *string, client *rancher.Client) (*management.Cluster, error) { - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.EKSConfig = cluster.EKSConfig - upgradedCluster.EKSConfig.KubernetesVersion = upgradeToVersion + cluster := &management.Cluster{ + DockerRootDir: "/var/lib/docker", + EKSConfig: &management.EKSClusterConfigSpec{ + AmazonCredentialSecret: cloudCredentialID, + DisplayName: displayName, + Imported: false, + KmsKey: eksClusterConfig.KmsKey, + KubernetesVersion: &kubernetesVersion, + LoggingTypes: &eksClusterConfig.LoggingTypes, + NodeGroups: nodeGroups, + PrivateAccess: eksClusterConfig.PrivateAccess, + PublicAccess: eksClusterConfig.PublicAccess, + PublicAccessSources: &eksClusterConfig.PublicAccessSources, + Region: region, + SecretsEncryption: eksClusterConfig.SecretsEncryption, + SecurityGroups: &eksClusterConfig.SecurityGroups, + ServiceRole: eksClusterConfig.ServiceRole, + Subnets: &eksClusterConfig.Subnets, + Tags: &tags, + }, + Name: displayName, + } - cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + clusterResp, err := client.Management.Cluster.Create(cluster) if err != nil { return nil, err } - return cluster, nil + return clusterResp, err } -// UpgradeNodeKubernetesVersion upgrades the k8s version of nodegroup to the value defined by upgradeToVersion. -func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion *string, client *rancher.Client) (*management.Cluster, error) { - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.EKSConfig = cluster.EKSConfig - for i := range upgradedCluster.EKSConfig.NodeGroups { - upgradedCluster.EKSConfig.NodeGroups[i].Version = upgradeToVersion +func ImportEKSHostedCluster(client *rancher.Client, displayName, cloudCredentialID, region string) (*management.Cluster, error) { + cluster := &management.Cluster{ + DockerRootDir: "/var/lib/docker", + EKSConfig: &management.EKSClusterConfigSpec{ + AmazonCredentialSecret: cloudCredentialID, + DisplayName: displayName, + Imported: true, + Region: region, + }, + Name: displayName, } - cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + clusterResp, err := client.Management.Cluster.Create(cluster) if err != nil { return nil, err } - return cluster, nil + return clusterResp, err } // DeleteEKSHostCluster deletes the EKS cluster @@ -71,15 +116,91 @@ func DeleteEKSHostCluster(cluster *management.Cluster, client *rancher.Client) e return client.Management.Cluster.Delete(cluster) } +// UpgradeClusterKubernetesVersion upgrades the k8s version to the value defined by upgradeToVersion. +// if checkClusterConfig is set to true, it will validate that the cluster control plane has been upgrade successfully +func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, checkClusterConfig bool) (*management.Cluster, error) { + upgradedCluster := cluster + currentVersion := *cluster.EKSConfig.KubernetesVersion + upgradedCluster.EKSConfig.KubernetesVersion = &upgradeToVersion + + cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + Expect(err).To(BeNil()) + + if checkClusterConfig { + // Check if the desired config is set correctly + Expect(*cluster.EKSConfig.KubernetesVersion).To(Equal(upgradeToVersion)) + // ensure nodegroup version is still the same when config is applied + for _, ng := range cluster.EKSConfig.NodeGroups { + Expect(*ng.Version).To(Equal(currentVersion)) + } + + // Check if the desired config has been applied in Rancher + Eventually(func() string { + ginkgo.GinkgoLogr.Info("Waiting for k8s upgrade to appear in EKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return *cluster.EKSStatus.UpstreamSpec.KubernetesVersion + }, tools.SetTimeout(15*time.Minute), 10*time.Second).Should(Equal(upgradeToVersion)) + // ensure nodegroup version is same in Rancher + for _, ng := range cluster.EKSStatus.UpstreamSpec.NodeGroups { + Expect(*ng.Version).To(Equal(currentVersion)) + } + } + return cluster, nil +} + +// UpgradeNodeKubernetesVersion upgrades the k8s version of nodegroup to the value defined by upgradeToVersion. +// if wait is set to true, it will wait until the cluster finishes upgrading; +// if checkClusterConfig is set to true, it will validate that nodegroup has been upgraded successfully +func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { + upgradedCluster := cluster + for i := range upgradedCluster.EKSConfig.NodeGroups { + upgradedCluster.EKSConfig.NodeGroups[i].Version = &upgradeToVersion + } + + var err error + cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) + Expect(err).To(BeNil()) + + // Check if the desired config is set correctly + for _, ng := range cluster.EKSConfig.NodeGroups { + Expect(*ng.Version).To(Equal(upgradeToVersion)) + } + + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + } + + // TODO: Fix flaky check + if checkClusterConfig { + Eventually(func() bool { + ginkgo.GinkgoLogr.Info("waiting for the nodegroup upgrade to appear in EKSStatus.UpstreamSpec ...") + // Check if the desired config has been applied in Rancher + for _, ng := range cluster.EKSStatus.UpstreamSpec.NodeGroups { + if *ng.Version != upgradeToVersion { + return false + } + } + return true + }, tools.SetTimeout(15*time.Minute), 10*time.Second).Should(BeTrue()) + } + return cluster, nil +} + // AddNodeGroup adds a nodegroup to the list -func AddNodeGroup(cluster *management.Cluster, increaseBy int, client *rancher.Client) (*management.Cluster, error) { - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.EKSConfig = cluster.EKSConfig - nodeConfig := EksHostNodeConfig() +// if checkClusterConfig is set to true, it will validate that nodegroup has been added successfully +func AddNodeGroup(cluster *management.Cluster, increaseBy int, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { + upgradedCluster := cluster + currentNodeGroupNumber := len(cluster.EKSConfig.NodeGroups) + + // Workaround for eks-operator/issues/406 + var eksClusterConfig management.EKSClusterConfigSpec + config.LoadConfig(eks.EKSClusterConfigConfigurationFileKey, &eksClusterConfig) + updateNodeGroupsList := upgradedCluster.EKSConfig.NodeGroups for i := 1; i <= increaseBy; i++ { - for _, ng := range nodeConfig { + for _, ng := range eksClusterConfig.NodeGroups { newNodeGroup := management.NodeGroup{ NodegroupName: pointer.String(namegen.AppendRandomString("nodegroup")), DesiredSize: ng.DesiredSize, @@ -88,68 +209,144 @@ func AddNodeGroup(cluster *management.Cluster, increaseBy int, client *rancher.C MaxSize: ng.MaxSize, MinSize: ng.MinSize, } - upgradedCluster.EKSConfig.NodeGroups = append(upgradedCluster.EKSConfig.NodeGroups, newNodeGroup) + updateNodeGroupsList = append([]management.NodeGroup{newNodeGroup}, updateNodeGroupsList...) } } + upgradedCluster.EKSConfig.NodeGroups = updateNodeGroupsList + cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err + Expect(err).To(BeNil()) + + if checkClusterConfig { + // Check if the desired config is set correctly + Expect(len(cluster.EKSConfig.NodeGroups)).Should(BeNumerically("==", currentNodeGroupNumber+increaseBy)) + for i, ng := range cluster.EKSConfig.NodeGroups { + Expect(ng.NodegroupName).To(Equal(updateNodeGroupsList[i].NodegroupName)) + } + } + + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) } + + if checkClusterConfig { + // Check if the desired config has been applied in Rancher + Eventually(func() int { + ginkgo.GinkgoLogr.Info("Waiting for the total nodegroup count to increase in EKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return len(cluster.EKSStatus.UpstreamSpec.NodeGroups) + }, tools.SetTimeout(15*time.Minute), 10*time.Second).Should(BeNumerically("==", currentNodeGroupNumber+increaseBy)) + + for i, ng := range cluster.EKSStatus.UpstreamSpec.NodeGroups { + Expect(ng.NodegroupName).To(Equal(updateNodeGroupsList[i].NodegroupName)) + } + } + return cluster, nil } // DeleteNodeGroup deletes a nodegroup from the list +// if checkClusterConfig is set to true, it will validate that nodegroup has been deleted successfully // TODO: Modify this method to delete a custom qty of DeleteNodeGroup, perhaps by adding an `decreaseBy int` arg -func DeleteNodeGroup(cluster *management.Cluster, client *rancher.Client) (*management.Cluster, error) { - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.EKSConfig = cluster.EKSConfig - - upgradedCluster.EKSConfig.NodeGroups = cluster.EKSConfig.NodeGroups[:1] +func DeleteNodeGroup(cluster *management.Cluster, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { + upgradedCluster := cluster + currentNodeGroupNumber := len(cluster.EKSConfig.NodeGroups) + updateNodeGroupsList := cluster.EKSConfig.NodeGroups[:1] + upgradedCluster.EKSConfig.NodeGroups = updateNodeGroupsList cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err + Expect(err).To(BeNil()) + + if checkClusterConfig { + // Check if the desired config is set correctly + Expect(len(cluster.EKSConfig.NodeGroups)).Should(BeNumerically("==", currentNodeGroupNumber-1)) + for i, ng := range cluster.EKSConfig.NodeGroups { + Expect(ng.NodegroupName).To(Equal(updateNodeGroupsList[i].NodegroupName)) + } + } + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + } + if checkClusterConfig { + + // Check if the desired config has been applied in Rancher + Eventually(func() int { + ginkgo.GinkgoLogr.Info("Waiting for the total nodegroup count to decrease in EKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return len(cluster.EKSStatus.UpstreamSpec.NodeGroups) + }, tools.SetTimeout(15*time.Minute), 10*time.Second).Should(BeNumerically("==", currentNodeGroupNumber-1)) + for i, ng := range cluster.EKSStatus.UpstreamSpec.NodeGroups { + Expect(ng.NodegroupName).To(Equal(updateNodeGroupsList[i].NodegroupName)) + } } return cluster, nil } // ScaleNodeGroup modifies the number of initialNodeCount of all the nodegroups as defined by nodeCount -func ScaleNodeGroup(cluster *management.Cluster, client *rancher.Client, nodeCount int64) (*management.Cluster, error) { - upgradedCluster := new(management.Cluster) - upgradedCluster.Name = cluster.Name - upgradedCluster.EKSConfig = cluster.EKSConfig +// if wait is set to true, it will wait until the cluster finishes updating; +// if checkClusterConfig is set to true, it will validate that nodegroup has been scaled successfully +func ScaleNodeGroup(cluster *management.Cluster, client *rancher.Client, nodeCount int64, wait, checkClusterConfig bool) (*management.Cluster, error) { + upgradedCluster := cluster for i := range upgradedCluster.EKSConfig.NodeGroups { upgradedCluster.EKSConfig.NodeGroups[i].DesiredSize = pointer.Int64(nodeCount) upgradedCluster.EKSConfig.NodeGroups[i].MaxSize = pointer.Int64(nodeCount) - upgradedCluster.EKSConfig.NodeGroups[i].MinSize = pointer.Int64(nodeCount) } cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) - if err != nil { - return nil, err + Expect(err).To(BeNil()) + + if checkClusterConfig { + // Check if the desired config is set correctly + for i := range cluster.EKSConfig.NodeGroups { + Expect(*cluster.EKSConfig.NodeGroups[i].DesiredSize).To(BeNumerically("==", nodeCount)) + } } + + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + } + + if checkClusterConfig { + // check that the desired config is applied on Rancher + Eventually(func() bool { + ginkgo.GinkgoLogr.Info("Waiting for the node count change to appear in EKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + for i := range cluster.EKSStatus.UpstreamSpec.NodeGroups { + if ng := cluster.EKSStatus.UpstreamSpec.NodeGroups[i]; *ng.DesiredSize != nodeCount { + return false + } + } + return true + }, tools.SetTimeout(15*time.Minute), 10*time.Second).Should(BeTrue()) + } + return cluster, nil } // ListEKSAvailableVersions is a function to list and return only available EKS versions for a specific cluster. func ListEKSAvailableVersions(client *rancher.Client, clusterID string) (availableVersions []string, err error) { - // kubernetesversions.ListEKSAvailableVersions expects cluster.Version.GitVersion to be available, which it is not sometimes, so we fetch the cluster again to ensure it has all the available data - cluster, err := client.Management.Cluster.ByID(clusterID) + + allAvailableVersions, err := kubernetesversions.ListEKSAllVersions(client) if err != nil { return nil, err } - return kubernetesversions.ListEKSAvailableVersions(client, cluster) + + return helpers.FilterUIUnsupportedVersions(allAvailableVersions, client), nil } // Create AWS EKS cluster using EKS CLI -func CreateEKSClusterOnAWS(eks_region string, clusterName string, k8sVersion string, nodes string) error { +func CreateEKSClusterOnAWS(eks_region string, clusterName string, k8sVersion string, nodes string, tags map[string]string) error { currentKubeconfig := os.Getenv("KUBECONFIG") defer os.Setenv("KUBECONFIG", currentKubeconfig) helpers.SetTempKubeConfig(clusterName) - tags := GetTags() formattedTags := k8slabels.SelectorFromSet(tags).String() fmt.Println("Creating EKS cluster ...") args := []string{"create", "cluster", "--region=" + eks_region, "--name=" + clusterName, "--version=" + k8sVersion, "--nodegroup-name", "ranchernodes", "--nodes", nodes, "--managed", "--tags", formattedTags} @@ -186,77 +383,39 @@ func DeleteEKSClusterOnAWS(eks_region string, clusterName string) error { return nil } -func ImportEKSHostedCluster(client *rancher.Client, displayName, cloudCredentialID string, enableClusterAlerting, enableClusterMonitoring, enableNetworkPolicy, windowsPreferedCluster bool, labels map[string]string) (*management.Cluster, error) { - eksHostCluster := EksHostClusterConfig(displayName, cloudCredentialID) - cluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - EKSConfig: eksHostCluster, - Name: displayName, - EnableClusterAlerting: enableClusterAlerting, - EnableClusterMonitoring: enableClusterMonitoring, - EnableNetworkPolicy: &enableNetworkPolicy, - Labels: labels, - WindowsPreferedCluster: windowsPreferedCluster, - } - - clusterResp, err := client.Management.Cluster.Create(cluster) - if err != nil { - return nil, err - } - return clusterResp, err -} - -func EksHostClusterConfig(displayName, cloudCredentialID string) *management.EKSClusterConfigSpec { - var eksClusterConfig ImportClusterConfig - config.LoadConfig("eksClusterConfig", &eksClusterConfig) - - return &management.EKSClusterConfigSpec{ - AmazonCredentialSecret: cloudCredentialID, - DisplayName: displayName, - Imported: eksClusterConfig.Imported, - Region: eksClusterConfig.Region, - } -} - -func EksHostNodeConfig() []management.NodeGroup { - var nodeConfig management.EKSClusterConfigSpec - config.LoadConfig("eksClusterConfig", &nodeConfig) - - return nodeConfig.NodeGroups -} - -type ImportClusterConfig struct { - Region string `json:"region" yaml:"region"` - Imported bool `json:"imported" yaml:"imported"` - NodeGroups []*management.NodeGroup `json:"nodeGroups" yaml:"nodeGroups"` - Tags *map[string]string `json:"tags,omitempty" yaml:"tags,omitempty"` -} - // defaultEKS returns a version less than the highest version or K8S_UPGRADE_MINOR_VERSION if it is set. // Note: It does not return the default version used by UI which is the highest supported version. -func defaultEKS(client *rancher.Client) (defaultEKS string, err error) { - var versions []string - versions, err = kubernetesversions.ListEKSAllVersions(client) +func defaultEKS(client *rancher.Client, forUpgrade bool) (defaultEKS string, err error) { + + var allVersions []string + allVersions, err = kubernetesversions.ListEKSAllVersions(client) if err != nil { return } - if upgradeVersion := helpers.K8sUpgradedMinorVersion; upgradeVersion != "" { - for _, version := range versions { - if helpers.VersionCompare(upgradeVersion, version) > 0 { + versions := helpers.FilterUIUnsupportedVersions(allVersions, client) + maxValue := helpers.HighestK8sMinorVersionSupportedByUI(client) + + for i := 0; i < len(versions); i++ { + version := versions[i] + if forUpgrade { + if result := helpers.VersionCompare(version, maxValue); result == -1 { + return version, nil + } + } else { + if strings.Contains(version, maxValue) { return version, nil } } - } - return versions[1], nil + return } // GetK8sVersion returns the k8s version to be used by the test; // this value can either be envvar DOWNSTREAM_K8S_MINOR_VERSION or the default UI value returned by DefaultEKS. -func GetK8sVersion(client *rancher.Client) (string, error) { +func GetK8sVersion(client *rancher.Client, forUpgrade bool) (string, error) { if k8sVersion := helpers.DownstreamK8sMinorVersion; k8sVersion != "" { return k8sVersion, nil } - return defaultEKS(client) + return defaultEKS(client, forUpgrade) } diff --git a/hosted/eks/k8s_chart_support/k8s_chart_support_import_test.go b/hosted/eks/k8s_chart_support/k8s_chart_support_import_test.go index abf62fbd..6474e4f6 100644 --- a/hosted/eks/k8s_chart_support/k8s_chart_support_import_test.go +++ b/hosted/eks/k8s_chart_support/k8s_chart_support_import_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/eks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -16,18 +14,10 @@ import ( var _ = Describe("K8sChartSupportImport", func() { var cluster *management.Cluster BeforeEach(func() { - var err error - err = helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1") + err := helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - eksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - tags := helper.GetTags() - eksConfig.Tags = &tags - }) - - cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, region) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) @@ -48,6 +38,6 @@ var _ = Describe("K8sChartSupportImport", func() { It("should successfully test k8s chart support import", func() { testCaseID = 65 // Report to Qase - commonchecks(&ctx, cluster) + commonchecks(ctx.RancherAdminClient, cluster) }) }) diff --git a/hosted/eks/k8s_chart_support/k8s_chart_support_provisioning_test.go b/hosted/eks/k8s_chart_support/k8s_chart_support_provisioning_test.go index 420c660e..26ce9555 100644 --- a/hosted/eks/k8s_chart_support/k8s_chart_support_provisioning_test.go +++ b/hosted/eks/k8s_chart_support/k8s_chart_support_provisioning_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/eks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -17,13 +15,7 @@ var _ = Describe("K8sChartSupportProvisioning", func() { var cluster *management.Cluster BeforeEach(func() { var err error - eksConfig := new(eks.ClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - eksConfig.Tags = helper.GetTags() - eksConfig.KubernetesVersion = &k8sVersion - }) - cluster, err = eks.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, k8sVersion, region, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) @@ -39,7 +31,7 @@ var _ = Describe("K8sChartSupportProvisioning", func() { It("should successfully test k8s chart support provisioning", func() { testCaseID = 166 - commonchecks(&ctx, cluster) + commonchecks(ctx.RancherAdminClient, cluster) }) }) diff --git a/hosted/eks/k8s_chart_support/k8s_chart_support_suite_test.go b/hosted/eks/k8s_chart_support/k8s_chart_support_suite_test.go index 6699334a..92c56a50 100644 --- a/hosted/eks/k8s_chart_support/k8s_chart_support_suite_test.go +++ b/hosted/eks/k8s_chart_support/k8s_chart_support_suite_test.go @@ -9,8 +9,8 @@ import ( . "github.com/onsi/gomega" "github.com/rancher-sandbox/ele-testhelpers/tools" . "github.com/rancher-sandbox/qase-ginkgo" + "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters" namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" @@ -45,7 +45,7 @@ var _ = BeforeEach(func() { var err error clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient) + k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, false) Expect(err).To(BeNil()) Expect(k8sVersion).ToNot(BeEmpty()) @@ -70,7 +70,7 @@ var _ = ReportAfterEach(func(report SpecReport) { Qase(testCaseID, report) }) -func commonchecks(ctx *helpers.Context, cluster *management.Cluster) { +func commonchecks(client *rancher.Client, cluster *management.Cluster) { var originalChartVersion string By("checking the chart version", func() { @@ -94,13 +94,8 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster) { By("making a change(scaling nodegroup up) to the cluster to validate functionality after chart downgrade", func() { var err error - cluster, err = helper.ScaleNodeGroup(cluster, ctx.RancherAdminClient, initialNodeCount+1) + cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount+1, true, true) Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - for i := range cluster.EKSConfig.NodeGroups { - Expect(*cluster.EKSConfig.NodeGroups[i].DesiredSize).To(BeNumerically("==", initialNodeCount+1)) - } }) By("uninstalling the operator chart", func() { @@ -109,7 +104,7 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster) { By("making a change(scaling nodegroup down) to the cluster to re-install the operator and validating it is re-installed to the latest/original version", func() { var err error - cluster, err = helper.ScaleNodeGroup(cluster, ctx.RancherAdminClient, initialNodeCount) + cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount, false, true) Expect(err).To(BeNil()) By("ensuring that the chart is re-installed to the latest/original version", func() { diff --git a/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go b/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go index c343178b..905a7694 100644 --- a/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go +++ b/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/eks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -16,18 +14,10 @@ import ( var _ = Describe("K8sChartSupportUpgradeImport", func() { var cluster *management.Cluster BeforeEach(func() { - var err error - err = helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1") + err := helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - eksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - tags := helper.GetTags() - eksConfig.Tags = &tags - }) - - cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, region) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) diff --git a/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go b/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go index 86c44561..6ca4f7e8 100644 --- a/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go +++ b/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/eks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -17,13 +15,7 @@ var _ = Describe("K8sChartSupportUpgradeProvisioning", func() { var cluster *management.Cluster BeforeEach(func() { var err error - eksConfig := new(eks.ClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - eksConfig.Tags = helper.GetTags() - eksConfig.KubernetesVersion = &k8sVersion - }) - cluster, err = eks.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, k8sVersion, region, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) diff --git a/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go b/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go index 51455bd2..92683973 100644 --- a/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go +++ b/hosted/eks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go @@ -61,7 +61,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { var _ = BeforeEach(func() { var err error clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient) + k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, false) Expect(err).To(BeNil()) Expect(k8sVersion).ToNot(BeEmpty()) GinkgoLogr.Info(fmt.Sprintf("Using EKS version %s", k8sVersion)) @@ -93,26 +93,7 @@ var _ = ReportAfterEach(func(report SpecReport) { func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName, rancherUpgradedVersion, hostname, k8sUpgradedVersion string) { - By("checking cluster name is same", func() { - Expect(cluster.Name).To(BeEquivalentTo(clusterName)) - }) - - By("checking service account token secret", func() { - success, err := clusters.CheckServiceAccountTokenSecret(ctx.RancherAdminClient, clusterName) - Expect(err).To(BeNil()) - Expect(success).To(BeTrue()) - }) - - By("checking all management nodes are ready", func() { - err := nodestat.AllManagementNodeReady(ctx.RancherAdminClient, cluster.ID, helpers.Timeout) - Expect(err).To(BeNil()) - }) - - By("checking all pods are ready", func() { - podErrors := pods.StatusPods(ctx.RancherAdminClient, cluster.ID) - Expect(podErrors).To(BeEmpty()) - }) - + helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName) var originalChartVersion string By("checking the chart version", func() { originalChartVersion = helpers.GetCurrentOperatorChartVersion() @@ -189,7 +170,7 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName Expect(*latestVersion).To(ContainSubstring(k8sUpgradedVersion)) Expect(helpers.VersionCompare(*latestVersion, cluster.Version.GitVersion)).To(BeNumerically("==", 1)) - cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, latestVersion, ctx.RancherAdminClient) + cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, *latestVersion, ctx.RancherAdminClient, true) Expect(err).To(BeNil()) err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) Expect(err).To(BeNil()) @@ -207,7 +188,7 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName By("making a change to the cluster to validate functionality after chart downgrade", func() { var err error - cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, latestVersion, ctx.RancherAdminClient) + cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, *latestVersion, ctx.RancherAdminClient, true, false) Expect(err).To(BeNil()) if !cluster.EKSConfig.Imported { // TODO Does not upgrade version for imported cluster, since they use custom Launch Templates @@ -226,7 +207,7 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName By("making a change(adding a nodepool) to the cluster to re-install the operator and validating it is re-installed to the latest/upgraded version", func() { currentNodeGroupNumber := len(cluster.EKSConfig.NodeGroups) var err error - cluster, err = helper.AddNodeGroup(cluster, 1, ctx.RancherAdminClient) + cluster, err = helper.AddNodeGroup(cluster, 1, ctx.RancherAdminClient, false, false) Expect(err).To(BeNil()) By("ensuring that the chart is re-installed to the latest/upgraded version", func() { @@ -235,7 +216,12 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) Expect(err).To(BeNil()) - Expect(len(cluster.EKSConfig.NodeGroups)).To(BeNumerically("==", currentNodeGroupNumber+1)) + // Check if the desired config has been applied in Rancher + Eventually(func() int { + cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return len(cluster.EKSStatus.UpstreamSpec.NodeGroups) + }, tools.SetTimeout(20*time.Minute), 10*time.Second).Should(BeNumerically("==", currentNodeGroupNumber+1)) }) } diff --git a/hosted/eks/p0/p0_importing_test.go b/hosted/eks/p0/p0_importing_test.go index cc23d2c7..be3103d3 100644 --- a/hosted/eks/p0/p0_importing_test.go +++ b/hosted/eks/p0/p0_importing_test.go @@ -19,8 +19,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/rancher/shepherd/extensions/clusters/eks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" @@ -32,16 +30,19 @@ import ( var _ = Describe("P0Importing", func() { for _, testData := range []struct { qaseID int64 + isUpgrade bool testBody func(cluster *management.Cluster, client *rancher.Client, clusterName string) testTitle string }{ { qaseID: 234, + isUpgrade: false, testBody: p0NodesChecks, testTitle: "should successfully provision the cluster & add, delete, scale nodepool", }, { qaseID: 73, + isUpgrade: true, testBody: p0upgradeK8sVersionChecks, testTitle: "should be able to upgrade k8s version of the imported cluster", }, @@ -51,19 +52,13 @@ var _ = Describe("P0Importing", func() { var cluster *management.Cluster BeforeEach(func() { - var err error + k8sVersion, err := helper.GetK8sVersion(ctx.RancherAdminClient, testData.isUpgrade) + Expect(err).To(BeNil()) GinkgoLogr.Info("Using K8s version: " + k8sVersion) - err = helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1") + err = helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - eksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - tags := helper.GetTags() - eksConfig.Tags = &tags - }) - - cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, region) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) diff --git a/hosted/eks/p0/p0_provisioning_test.go b/hosted/eks/p0/p0_provisioning_test.go index 8b3dd6bc..9220eecd 100644 --- a/hosted/eks/p0/p0_provisioning_test.go +++ b/hosted/eks/p0/p0_provisioning_test.go @@ -19,11 +19,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/eks" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -32,16 +30,19 @@ import ( var _ = Describe("P0Provisioning", func() { for _, testData := range []struct { qaseID int64 + isUpgrade bool testBody func(cluster *management.Cluster, client *rancher.Client, clusterName string) testTitle string }{ { qaseID: 71, + isUpgrade: false, testBody: p0NodesChecks, testTitle: "should successfully provision the cluster & add, delete, scale nodepool", }, { qaseID: 74, + isUpgrade: true, testBody: p0upgradeK8sVersionChecks, testTitle: "should be able to upgrade k8s version of the provisioned cluster", }, @@ -51,16 +52,10 @@ var _ = Describe("P0Provisioning", func() { var cluster *management.Cluster BeforeEach(func() { - var err error + k8sVersion, err := helper.GetK8sVersion(ctx.RancherAdminClient, testData.isUpgrade) + Expect(err).To(BeNil()) GinkgoLogr.Info("Using K8s version: " + k8sVersion) - eksConfig := new(eks.ClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - eksConfig.Tags = helper.GetTags() - eksConfig.KubernetesVersion = &k8sVersion - }) - - cluster, err = eks.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, k8sVersion, region, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) diff --git a/hosted/eks/p0/p0_suite_test.go b/hosted/eks/p0/p0_suite_test.go index 6e2006a6..3328d6a5 100644 --- a/hosted/eks/p0/p0_suite_test.go +++ b/hosted/eks/p0/p0_suite_test.go @@ -25,7 +25,6 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters" namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" @@ -37,10 +36,10 @@ const ( ) var ( - ctx helpers.Context - k8sVersion, clusterName string - testCaseID int64 - region = helpers.GetEKSRegion() + ctx helpers.Context + clusterName string + testCaseID int64 + region = helpers.GetEKSRegion() ) func TestP0(t *testing.T) { @@ -56,10 +55,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { }) var _ = BeforeEach(func() { - var err error clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient) - Expect(err).To(BeNil()) }) var _ = ReportBeforeEach(func(report SpecReport) { @@ -78,77 +74,48 @@ func p0upgradeK8sVersionChecks(cluster *management.Cluster, client *rancher.Clie versions, err := helper.ListEKSAvailableVersions(client, cluster.ID) Expect(err).To(BeNil()) Expect(versions).ToNot(BeEmpty()) - upgradeToVersion := &versions[0] - GinkgoLogr.Info(fmt.Sprintf("Upgrading cluster to EKS version %s", *upgradeToVersion)) + upgradeToVersion := versions[0] + GinkgoLogr.Info(fmt.Sprintf("Upgrading cluster to EKS version %s", upgradeToVersion)) By("upgrading the ControlPlane", func() { - var err error - cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, upgradeToVersion, client) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, upgradeToVersion, client, true) Expect(err).To(BeNil()) - Expect(cluster.EKSConfig.KubernetesVersion).To(BeEquivalentTo(upgradeToVersion)) }) // Does not upgrades version since using custom LT, skip for imported cluster Expect(helpers.TestConfig).ToNot(BeEmpty()) if strings.Contains(helpers.TestConfig, "provisioning") { By("upgrading the NodeGroups", func() { - var err error - cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, upgradeToVersion, client) + cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, upgradeToVersion, client, true, false) Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) - Expect(err).To(BeNil()) - for _, ng := range cluster.EKSConfig.NodeGroups { - Expect(ng.Version).To(BeEquivalentTo(upgradeToVersion)) - } }) } } func p0NodesChecks(cluster *management.Cluster, client *rancher.Client, clusterName string) { helpers.ClusterIsReadyChecks(cluster, client, clusterName) - - currentNodeGroupNumber := len(cluster.EKSConfig.NodeGroups) initialNodeCount := *cluster.EKSConfig.NodeGroups[0].DesiredSize By("scaling up the NodeGroup", func() { var err error - cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount+1) + cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount+increaseBy, true, true) Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) - Expect(err).To(BeNil()) - for i := range cluster.EKSConfig.NodeGroups { - Expect(*cluster.EKSConfig.NodeGroups[i].DesiredSize).To(BeNumerically("==", initialNodeCount+1)) - } }) By("scaling down the NodeGroup", func() { var err error - cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount, true, true) Expect(err).To(BeNil()) - for i := range cluster.EKSConfig.NodeGroups { - Expect(*cluster.EKSConfig.NodeGroups[i].DesiredSize).To(BeNumerically("==", initialNodeCount)) - } }) By("adding a NodeGroup", func() { var err error - cluster, err = helper.AddNodeGroup(cluster, increaseBy, client) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + cluster, err = helper.AddNodeGroup(cluster, increaseBy, client, true, true) Expect(err).To(BeNil()) - Expect(len(cluster.EKSConfig.NodeGroups)).To(BeNumerically("==", currentNodeGroupNumber+1)) }) By("deleting the NodeGroup", func() { var err error - cluster, err = helper.DeleteNodeGroup(cluster, client) + cluster, err = helper.DeleteNodeGroup(cluster, client, true, true) Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) - Expect(err).To(BeNil()) - Expect(len(cluster.EKSConfig.NodeGroups)).To(BeNumerically("==", currentNodeGroupNumber)) - }) } diff --git a/hosted/eks/support_matrix/support_matrix_importing_test.go b/hosted/eks/support_matrix/support_matrix_importing_test.go index 845f3078..8828fd04 100644 --- a/hosted/eks/support_matrix/support_matrix_importing_test.go +++ b/hosted/eks/support_matrix/support_matrix_importing_test.go @@ -17,8 +17,6 @@ package support_matrix_test import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/rancher/shepherd/extensions/clusters/eks" - "github.com/rancher/shepherd/pkg/config" "fmt" @@ -42,15 +40,9 @@ var _ = Describe("SupportMatrixImporting", func() { BeforeEach(func() { clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) var err error - err = helper.CreateEKSClusterOnAWS(region, clusterName, version, "1") + err = helper.CreateEKSClusterOnAWS(region, clusterName, version, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - eksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - tags := helper.GetTags() - eksConfig.Tags = &tags - }) - cluster, err = helper.ImportEKSHostedCluster(ctx.StdUserClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.ImportEKSHostedCluster(ctx.StdUserClient, clusterName, ctx.CloudCred.ID, region) Expect(err).To(BeNil()) // Requires RancherAdminClient cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) diff --git a/hosted/eks/support_matrix/support_matrix_provisioning_test.go b/hosted/eks/support_matrix/support_matrix_provisioning_test.go index aad69990..28ee380d 100644 --- a/hosted/eks/support_matrix/support_matrix_provisioning_test.go +++ b/hosted/eks/support_matrix/support_matrix_provisioning_test.go @@ -17,12 +17,10 @@ package support_matrix_test import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/rancher/shepherd/pkg/config" "fmt" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/eks" namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" @@ -41,14 +39,8 @@ var _ = Describe("SupportMatrixProvisioning", func() { ) BeforeEach(func() { clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - eksConfig := new(eks.ClusterConfig) - config.LoadAndUpdateConfig(eks.EKSClusterConfigConfigurationFileKey, eksConfig, func() { - eksConfig.Region = region - eksConfig.KubernetesVersion = &version - eksConfig.Tags = helper.GetTags() - }) var err error - cluster, err = eks.CreateEKSHostedCluster(ctx.StdUserClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.CreateEKSHostedCluster(ctx.StdUserClient, clusterName, ctx.CloudCred.ID, version, region, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) // Requires RancherAdminClient cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) diff --git a/hosted/eks/support_matrix/support_matrix_suite_test.go b/hosted/eks/support_matrix/support_matrix_suite_test.go index cf8b0ca2..e1ec12cb 100644 --- a/hosted/eks/support_matrix/support_matrix_suite_test.go +++ b/hosted/eks/support_matrix/support_matrix_suite_test.go @@ -27,10 +27,10 @@ import ( ) var ( - availableVersionList []string - testCaseID int64 - ctx helpers.Context - region = helpers.GetEKSRegion() + allAvailableVersionList, availableVersionList []string + testCaseID int64 + ctx helpers.Context + region = helpers.GetEKSRegion() ) func TestSupportMatrix(t *testing.T) { @@ -39,7 +39,8 @@ func TestSupportMatrix(t *testing.T) { ctx = helpers.CommonBeforeSuite() helpers.CreateStdUserClient(&ctx) var err error - availableVersionList, err = kubernetesversions.ListEKSAllVersions(ctx.StdUserClient) + allAvailableVersionList, err = kubernetesversions.ListEKSAllVersions(ctx.StdUserClient) + availableVersionList = helpers.FilterUIUnsupportedVersions(allAvailableVersionList, ctx.StdUserClient) Expect(err).To(BeNil()) Expect(availableVersionList).ToNot(BeEmpty()) RunSpecs(t, "SupportMatrix Suite") diff --git a/hosted/helpers/helper_common.go b/hosted/helpers/helper_common.go index c7ad7718..78d82e70 100644 --- a/hosted/helpers/helper_common.go +++ b/hosted/helpers/helper_common.go @@ -264,10 +264,15 @@ func GetCommonMetadataLabels() map[string]string { filename = fmt.Sprintf("line%d_%s", specReport.LineNumber(), filename) } - return map[string]string{ + metadataLabels := map[string]string{ "owner": "hosted-providers-qa-ci-" + testuser.Username, "testfilenumber": filename, } + + if !clusterCleanup { + metadataLabels["janitor-ignore"] = "true" + } + return metadataLabels } func SetTempKubeConfig(clusterName string) {