From 142cbd33f45798b0ce3bbeb32bca794474ecc96f Mon Sep 17 00:00:00 2001 From: Parthvi Vala Date: Mon, 20 May 2024 20:11:22 +0530 Subject: [PATCH] Refactor AKS tests (#100) * Refactor the p0 and support_matrix tests Signed-off-by: Parthvi * Use second-highest k8s version as default and filter versions not supported by the UI Signed-off-by: Parthvi * Change k8s upgrade func name : Chandan Pinjani : Parthvi Vala * Refactor p0 tests and revert defaultAKS changes but modify to return a different value for upgrade cases Signed-off-by: Parthvi * Add ClusterIsReadyChecks common function and use it in AKS Signed-off-by: Parthvi * Review comment: Rename c -> testData Signed-off-by: Parthvi * Refactor the aks provisioning and import methods to not write to config file Signed-off-by: Parthvi * Attempt#2 at fixing AKS provisoning failures on nightly operator chart Signed-off-by: Parthvi * Refactor test checks; move checks to functions Signed-off-by: Parthvi * Attempt#1 at fixing flakes Signed-off-by: Parthvi * Remove debug env and fix panic error Signed-off-by: Parthvi * Remove AppliedSpec references Signed-off-by: Parthvi * Change the nodepool add/delete checks to check for name Signed-off-by: Parthvi * Remove AppliedSpec references (2) Signed-off-by: Parthvi * Review changes Signed-off-by: Parthvi * Use gomega for wait block Signed-off-by: Parthvi * Use client instead of ctx.RancherAdminClient in k8s chart support tests Signed-off-by: Parthvi --------- Signed-off-by: Parthvi --- README.md | 1 + hosted/aks/helper/helper_cluster.go | 345 +++++++++++++----- .../k8s_chart_support_import_test.go | 17 +- .../k8s_chart_support_provisioning_test.go | 16 +- .../k8s_chart_support_suite_test.go | 27 +- .../k8s_chart_support_import_upgrade_test.go | 15 +- ...chart_support_provisioning_upgrade_test.go | 13 +- .../k8s_chart_support_upgrade_suite_test.go | 64 +--- hosted/aks/p0/p0_importing_test.go | 178 +++------ hosted/aks/p0/p0_provisioning_test.go | 174 ++------- hosted/aks/p0/p0_suite_test.go | 62 +++- .../support_matrix_importing_test.go | 37 +- .../support_matrix_provisioning_test.go | 37 +- .../support_matrix_suite_test.go | 4 +- hosted/helpers/helper_common.go | 4 - 15 files changed, 448 insertions(+), 546 deletions(-) diff --git a/README.md b/README.md index b472a49f..19f70a98 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ Following are the common environment variables that need to be exported for runn 4. PROVIDER: Type of the hosted provider you want to test. Acceptable values - gke, eks, aks 5. DOWNSTREAM_K8S_MINOR_VERSION (optional): Downstream cluster Kubernetes version to test. If the env var is not provided, it uses a provider specific default value. 6. DOWNSTREAM_CLUSTER_CLEANUP (optional): If set to true, downstream cluster will be deleted. Default: false. +7. RANCHER_CLIENT_DEBUG (optional, debug): Set to true to watch API requests and responses being sent to rancher. #### To run K8s Chart support test cases: 1. KUBECONFIG: Upstream K8s' Kubeconfig file; usually it is k3s.yaml. diff --git a/hosted/aks/helper/helper_cluster.go b/hosted/aks/helper/helper_cluster.go index 41a5378d..b7dacbdd 100644 --- a/hosted/aks/helper/helper_cluster.go +++ b/hosted/aks/helper/helper_cluster.go @@ -7,7 +7,12 @@ import ( "net/http" "os" "strings" + "time" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/rancher-sandbox/ele-testhelpers/tools" + "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/clusters/aks" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -28,46 +33,141 @@ var ( subscriptionID = os.Getenv("AKS_SUBSCRIPTION_ID") ) -func GetTags() map[string]string { - aksConfig := new(management.AKSClusterConfigSpec) - config.LoadConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig) - tags := helpers.GetCommonMetadataLabels() - for key, value := range aksConfig.Tags { - tags[key] = value - } - return tags -} - -// UpgradeClusterKubernetesVersion upgrades the k8s version to the value defined by upgradeToVersion. -func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersion *string, client *rancher.Client) (*management.Cluster, error) { +// UpgradeClusterKubernetesVersion upgrades the k8s version to the value defined by upgradeToVersio; +// if checkClusterConfig is set to true, it will validate that the cluster control plane has been upgrade successfully +func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, checkClusterConfig bool) (*management.Cluster, error) { + currentVersion := *cluster.AKSConfig.KubernetesVersion upgradedCluster := new(management.Cluster) upgradedCluster.Name = cluster.Name upgradedCluster.AKSConfig = cluster.AKSConfig - upgradedCluster.AKSConfig.KubernetesVersion = upgradeToVersion + upgradedCluster.AKSConfig.KubernetesVersion = &upgradeToVersion - cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + var err error + cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) if err != nil { return nil, err } + + if checkClusterConfig { + // Check if the desired config is set correctly + Expect(*cluster.AKSConfig.KubernetesVersion).To(Equal(upgradeToVersion)) + // ensure nodepool version is still the same when config is applied + for _, np := range cluster.AKSConfig.NodePools { + Expect(*np.OrchestratorVersion).To(Equal(currentVersion)) + } + + // Check if the desired config has been applied in Rancher + Eventually(func() string { + ginkgo.GinkgoLogr.Info("Waiting for k8s upgrade to appear in AKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).NotTo(HaveOccurred()) + return *cluster.AKSStatus.UpstreamSpec.KubernetesVersion + }, tools.SetTimeout(10*time.Minute), 5*time.Second).Should(Equal(upgradeToVersion)) + // ensure nodepool version is same in Rancher + for _, np := range cluster.AKSStatus.UpstreamSpec.NodePools { + Expect(*np.OrchestratorVersion).To(Equal(currentVersion)) + } + + } return cluster, nil } -// UpgradeNodeKubernetesVersion upgrades the k8s version of nodepool to the value defined by upgradeToVersion. -func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion *string, client *rancher.Client) (*management.Cluster, error) { +// UpgradeNodeKubernetesVersion upgrades the k8s version of nodepool to the value defined by upgradeToVersion; +// if wait is set to true, it will wait until the cluster finishes upgrading; +// if checkClusterConfig is set to true, it will validate that nodepool has been upgraded successfully +func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { upgradedCluster := new(management.Cluster) upgradedCluster.Name = cluster.Name upgradedCluster.AKSConfig = cluster.AKSConfig for i := range upgradedCluster.AKSConfig.NodePools { - upgradedCluster.AKSConfig.NodePools[i].OrchestratorVersion = upgradeToVersion + upgradedCluster.AKSConfig.NodePools[i].OrchestratorVersion = &upgradeToVersion } - - cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + var err error + cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) if err != nil { return nil, err } + + if checkClusterConfig { + // Check if the desired config is set correctly + for _, np := range cluster.AKSConfig.NodePools { + Expect(*np.OrchestratorVersion).To(Equal(upgradeToVersion)) + } + } + + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + } + + if checkClusterConfig { + // Check if the desired config has been applied in Rancher + Eventually(func() bool { + ginkgo.GinkgoLogr.Info("waiting for the nodepool upgrade to appear in AKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + for _, np := range cluster.AKSStatus.UpstreamSpec.NodePools { + if *np.OrchestratorVersion != upgradeToVersion { + return false + } + } + return true + }, tools.SetTimeout(12*time.Minute), 10*time.Second).Should(BeTrue()) + } return cluster, nil } +func CreateAKSHostedCluster(client *rancher.Client, cloudCredentialID, clusterName, k8sVersion, location string, tags map[string]string) (*management.Cluster, error) { + var aksClusterConfig aks.ClusterConfig + config.LoadConfig(aks.AKSClusterConfigConfigurationFileKey, &aksClusterConfig) + var aksNodePools []management.AKSNodePool + for _, aksNodePoolConfig := range *aksClusterConfig.NodePools { + aksNodePool := management.AKSNodePool{ + AvailabilityZones: aksNodePoolConfig.AvailabilityZones, + Count: aksNodePoolConfig.NodeCount, + EnableAutoScaling: aksNodePoolConfig.EnableAutoScaling, + MaxPods: aksNodePoolConfig.MaxPods, + MaxCount: aksNodePoolConfig.MaxCount, + MinCount: aksNodePoolConfig.MinCount, + Mode: aksNodePoolConfig.Mode, + Name: aksNodePoolConfig.Name, + OrchestratorVersion: &k8sVersion, + OsDiskSizeGB: aksNodePoolConfig.OsDiskSizeGB, + OsDiskType: aksNodePoolConfig.OsDiskType, + OsType: aksNodePoolConfig.OsType, + VMSize: aksNodePoolConfig.VMSize, + } + aksNodePools = append(aksNodePools, aksNodePool) + } + + cluster := &management.Cluster{ + AKSConfig: &management.AKSClusterConfigSpec{ + AzureCredentialSecret: cloudCredentialID, + ClusterName: clusterName, + DNSPrefix: pointer.String(clusterName + "-dns"), + Imported: false, + KubernetesVersion: &k8sVersion, + LinuxAdminUsername: aksClusterConfig.LinuxAdminUsername, + LoadBalancerSKU: aksClusterConfig.LoadBalancerSKU, + NetworkPlugin: aksClusterConfig.NetworkPlugin, + NodePools: aksNodePools, + PrivateCluster: aksClusterConfig.PrivateCluster, + ResourceGroup: clusterName, + ResourceLocation: location, + Tags: tags, + }, + DockerRootDir: "/var/lib/docker", + Name: clusterName, + } + + clusterResp, err := client.Management.Cluster.Create(cluster) + if err != nil { + return nil, err + } + + return clusterResp, err +} + // DeleteAKSHostCluster deletes the AKS cluster func DeleteAKSHostCluster(cluster *management.Cluster, client *rancher.Client) error { return client.Management.Cluster.Delete(cluster) @@ -89,11 +189,11 @@ func ListSingleVariantAKSAvailableVersions(client *rancher.Client, cloudCredenti oldMinor = currentMinor } } - return singleVersionList, nil + return helpers.FilterUIUnsupportedVersions(singleVersionList, client), nil } // GetK8sVersionVariantAKS returns a variant of a given minor K8s version -func GetK8sVersionVariantAKS(minorVersion string, client *rancher.Client, cloudCredentialID, region string) (version string, err error) { +func GetK8sVersionVariantAKS(minorVersion string, client *rancher.Client, cloudCredentialID, region string) (string, error) { versions, err := ListSingleVariantAKSAvailableVersions(client, cloudCredentialID, region) if err != nil { return "", err @@ -107,15 +207,18 @@ func GetK8sVersionVariantAKS(minorVersion string, client *rancher.Client, cloudC return "", fmt.Errorf("version %s not found", minorVersion) } -// AddNodePool adds a nodepool to the list -func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Client) (*management.Cluster, error) { +// AddNodePool adds a nodepool to the list; if wait is set to true, it will wait until the cluster finishes upgrading; +// if checkClusterConfig is set to true, it will validate that nodepool has been added successfully +func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { + currentNodePoolNumber := len(cluster.AKSConfig.NodePools) + upgradedCluster := new(management.Cluster) upgradedCluster.Name = cluster.Name upgradedCluster.AKSConfig = cluster.AKSConfig - nodeConfig := AksHostNodeConfig() + updateNodePoolsList := cluster.AKSConfig.NodePools for i := 1; i <= increaseBy; i++ { - for _, np := range nodeConfig { + for _, np := range cluster.AKSConfig.NodePools { newNodepool := management.AKSNodePool{ Count: pointer.Int64(1), VMSize: np.VMSize, @@ -123,33 +226,94 @@ func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Cl EnableAutoScaling: np.EnableAutoScaling, Name: pointer.String(namegen.RandStringLower(5)), } - upgradedCluster.AKSConfig.NodePools = append(upgradedCluster.AKSConfig.NodePools, newNodepool) + updateNodePoolsList = append(updateNodePoolsList, newNodepool) } } - cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + upgradedCluster.AKSConfig.NodePools = updateNodePoolsList + + var err error + cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) if err != nil { return nil, err } + + if checkClusterConfig { + // Check if the desired config is set correctly + Expect(len(cluster.AKSConfig.NodePools)).Should(BeNumerically("==", currentNodePoolNumber+increaseBy)) + for i, np := range cluster.AKSConfig.NodePools { + Expect(np.Name).To(Equal(updateNodePoolsList[i].Name)) + } + } + + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + } + if checkClusterConfig { + // Check if the desired config has been applied in Rancher + Eventually(func() int { + ginkgo.GinkgoLogr.Info("Waiting for the total nodepool count to increase in AKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return len(cluster.AKSStatus.UpstreamSpec.NodePools) + }, tools.SetTimeout(12*time.Minute), 10*time.Second).Should(BeNumerically("==", currentNodePoolNumber+increaseBy)) + + for i, np := range cluster.AKSStatus.UpstreamSpec.NodePools { + Expect(np.Name).To(Equal(updateNodePoolsList[i].Name)) + } + } return cluster, nil } -// DeleteNodePool deletes a nodepool from the list +// DeleteNodePool deletes a nodepool from the list; if wait is set to true, it will wait until the cluster finishes upgrading; +// if checkClusterConfig is set to true, it will validate that nodepool has been deleted successfully // TODO: Modify this method to delete a custom qty of DeleteNodePool, perhaps by adding an `decreaseBy int` arg -func DeleteNodePool(cluster *management.Cluster, client *rancher.Client) (*management.Cluster, error) { +func DeleteNodePool(cluster *management.Cluster, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) { + currentNodePoolNumber := len(cluster.AKSConfig.NodePools) + upgradedCluster := new(management.Cluster) upgradedCluster.Name = cluster.Name upgradedCluster.AKSConfig = cluster.AKSConfig - upgradedCluster.AKSConfig.NodePools = cluster.AKSConfig.NodePools[:1] + updatedNodePoolsList := cluster.AKSConfig.NodePools[:1] + upgradedCluster.AKSConfig.NodePools = updatedNodePoolsList - cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + var err error + cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) if err != nil { return nil, err } + + if checkClusterConfig { + // Check if the desired config is set correctly + Expect(len(cluster.AKSConfig.NodePools)).Should(BeNumerically("==", currentNodePoolNumber-1)) + for i, np := range cluster.AKSConfig.NodePools { + Expect(np.Name).To(Equal(updatedNodePoolsList[i].Name)) + } + } + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + } + if checkClusterConfig { + + // Check if the desired config has been applied in Rancher + Eventually(func() int { + ginkgo.GinkgoLogr.Info("Waiting for the total nodepool count to decrease in AKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return len(cluster.AKSStatus.UpstreamSpec.NodePools) + }, tools.SetTimeout(12*time.Minute), 10*time.Second).Should(BeNumerically("==", currentNodePoolNumber-1)) + for i, np := range cluster.AKSStatus.UpstreamSpec.NodePools { + Expect(np.Name).To(Equal(updatedNodePoolsList[i].Name)) + } + } return cluster, nil } -// ScaleNodePool modifies the number of initialNodeCount of all the nodepools as defined by nodeCount -func ScaleNodePool(cluster *management.Cluster, client *rancher.Client, nodeCount int64) (*management.Cluster, error) { +// ScaleNodePool modifies the number of initialNodeCount of all the nodepools as defined by nodeCount; +// if wait is set to true, it will wait until the cluster finishes upgrading; +// if checkClusterConfig is set to true, it will validate that nodepool has been scaled successfully +func ScaleNodePool(cluster *management.Cluster, client *rancher.Client, nodeCount int64, wait, checkClusterConfig bool) (*management.Cluster, error) { upgradedCluster := new(management.Cluster) upgradedCluster.Name = cluster.Name upgradedCluster.AKSConfig = cluster.AKSConfig @@ -157,26 +321,58 @@ func ScaleNodePool(cluster *management.Cluster, client *rancher.Client, nodeCoun upgradedCluster.AKSConfig.NodePools[i].Count = pointer.Int64(nodeCount) } - cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) + var err error + cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster) if err != nil { return nil, err } + + if checkClusterConfig { + // Check if the desired config is set correctly + for i := range cluster.AKSConfig.NodePools { + Expect(*cluster.AKSConfig.NodePools[i].Count).To(BeNumerically("==", nodeCount)) + } + } + + if wait { + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + } + + if checkClusterConfig { + // check that the desired config is applied on Rancher + Eventually(func() bool { + ginkgo.GinkgoLogr.Info("Waiting for the node count change to appear in AKSStatus.UpstreamSpec ...") + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + for i := range cluster.AKSStatus.UpstreamSpec.NodePools { + if *cluster.AKSStatus.UpstreamSpec.NodePools[i].Count != nodeCount { + return false + } + } + return true + }, tools.SetTimeout(12*time.Minute), 10*time.Second).Should(BeTrue()) + } + return cluster, nil } // ListAKSAvailableVersions is a function to list and return only available AKS versions for a specific cluster. -func ListAKSAvailableVersions(client *rancher.Client, clusterID string) (availableVersions []string, err error) { +func ListAKSAvailableVersions(client *rancher.Client, clusterID string) ([]string, error) { // kubernetesversions.ListAKSAvailableVersions expects cluster.Version.GitVersion to be available, which it is not sometimes, so we fetch the cluster again to ensure it has all the available data cluster, err := client.Management.Cluster.ByID(clusterID) if err != nil { return nil, err } - return kubernetesversions.ListAKSAvailableVersions(client, cluster) + allAvailableVersions, err := kubernetesversions.ListAKSAvailableVersions(client, cluster) + if err != nil { + return nil, err + } + return helpers.FilterUIUnsupportedVersions(allAvailableVersions, client), nil } // Create Azure AKS cluster using AZ CLI -func CreateAKSClusterOnAzure(location string, clusterName string, k8sVersion string, nodes string) error { - tags := GetTags() +func CreateAKSClusterOnAzure(location string, clusterName string, k8sVersion string, nodes string, tags map[string]string) error { formattedTags := convertMapToAKSString(tags) fmt.Println("Creating AKS resource group ...") rgargs := []string{"group", "create", "--location", location, "--resource-group", clusterName, "--subscription", subscriptionID} @@ -227,17 +423,19 @@ func DeleteAKSClusteronAzure(clusterName string) error { return nil } -func ImportAKSHostedCluster(client *rancher.Client, displayName, cloudCredentialID string, enableClusterAlerting, enableClusterMonitoring, enableNetworkPolicy, windowsPreferedCluster bool, labels map[string]string) (*management.Cluster, error) { - aksHostCluster := AksHostClusterConfig(displayName, cloudCredentialID) +// ImportAKSHostedCluster imports an AKS cluster to Rancher +func ImportAKSHostedCluster(client *rancher.Client, clusterName, cloudCredentialID, location string, tags map[string]string) (*management.Cluster, error) { cluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - AKSConfig: aksHostCluster, - Name: displayName, - EnableClusterAlerting: enableClusterAlerting, - EnableClusterMonitoring: enableClusterMonitoring, - EnableNetworkPolicy: &enableNetworkPolicy, - Labels: labels, - WindowsPreferedCluster: windowsPreferedCluster, + DockerRootDir: "/var/lib/docker", + AKSConfig: &management.AKSClusterConfigSpec{ + AzureCredentialSecret: cloudCredentialID, + ClusterName: clusterName, + Imported: true, + ResourceLocation: location, + ResourceGroup: clusterName, + Tags: tags, + }, + Name: clusterName, } clusterResp, err := client.Management.Cluster.Create(cluster) @@ -247,36 +445,8 @@ func ImportAKSHostedCluster(client *rancher.Client, displayName, cloudCredential return clusterResp, err } -func AksHostClusterConfig(displayName, cloudCredentialID string) *management.AKSClusterConfigSpec { - var aksClusterConfig ImportClusterConfig - config.LoadConfig("aksClusterConfig", &aksClusterConfig) - - return &management.AKSClusterConfigSpec{ - AzureCredentialSecret: cloudCredentialID, - ClusterName: displayName, - Imported: aksClusterConfig.Imported, - ResourceLocation: aksClusterConfig.ResourceLocation, - ResourceGroup: aksClusterConfig.ResourceGroup, - } -} - -func AksHostNodeConfig() []management.AKSNodePool { - var nodeConfig management.AKSClusterConfigSpec - config.LoadConfig("aksClusterConfig", &nodeConfig) - - return nodeConfig.NodePools -} - -type ImportClusterConfig struct { - ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"` - ResourceLocation string `json:"resourceLocation" yaml:"resourceLocation"` - Tags map[string]string `json:"tags,omitempty" yaml:"tags,omitempty"` - Imported bool `json:"imported" yaml:"imported"` - NodePools []*management.AKSNodePool `json:"nodePools" yaml:"nodePools"` -} - -// defaultAKS returns the default AKS version used by Rancher -func defaultAKS(client *rancher.Client, cloudCredentialID, region string) (defaultAKS string, err error) { +// defaultAKS returns the default AKS version used by Rancher; if forUpgrade is true, it returns the second-highest minor k8s version +func defaultAKS(client *rancher.Client, cloudCredentialID, region string, forUpgrade bool) (defaultAKS string, err error) { url := fmt.Sprintf("%s://%s/meta/aksVersions", "https", client.RancherConfig.Host) req, err := http.NewRequest("GET", url, nil) if err != nil { @@ -307,10 +477,17 @@ func defaultAKS(client *rancher.Client, cloudCredentialID, region string) (defau // Iterate in the reverse order to get the highest version // We obtain the value similar to UI; ref: https://github.com/rancher/ui/blob/master/lib/shared/addon/components/cluster-driver/driver-azureaks/component.js#L140 + // For upgrade tests, it returns a variant of the second-highest minor version for i := len(versions) - 1; i >= 0; i-- { - if strings.Contains(versions[i], maxValue) { - defaultAKS = versions[i] - return + version := versions[i] + if forUpgrade { + if result := helpers.VersionCompare(version, maxValue); result == -1 { + return version, nil + } + } else { + if strings.Contains(version, maxValue) { + return version, nil + } } } @@ -318,10 +495,10 @@ func defaultAKS(client *rancher.Client, cloudCredentialID, region string) (defau } // GetK8sVersion returns the k8s version to be used by the test; -// this value can either be envvar DOWNSTREAM_K8S_MINOR_VERSION or the default UI value returned by DefaultAKS. -func GetK8sVersion(client *rancher.Client, cloudCredentialID, region string) (string, error) { +// this value can either be a variant of envvar DOWNSTREAM_K8S_MINOR_VERSION or the default UI value returned by defaultAKS. +func GetK8sVersion(client *rancher.Client, cloudCredentialID, region string, forUpgrade bool) (string, error) { if k8sMinorVersion := helpers.DownstreamK8sMinorVersion; k8sMinorVersion != "" { return GetK8sVersionVariantAKS(k8sMinorVersion, client, cloudCredentialID, region) } - return defaultAKS(client, cloudCredentialID, region) + return defaultAKS(client, cloudCredentialID, region, forUpgrade) } diff --git a/hosted/aks/k8s_chart_support/k8s_chart_support_import_test.go b/hosted/aks/k8s_chart_support/k8s_chart_support_import_test.go index 20f3a094..c2d96617 100644 --- a/hosted/aks/k8s_chart_support/k8s_chart_support_import_test.go +++ b/hosted/aks/k8s_chart_support/k8s_chart_support_import_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/aks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -17,18 +15,9 @@ var _ = Describe("K8sChartSupportImport", func() { var cluster *management.Cluster BeforeEach(func() { - var err error - err = helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1") + err := helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - - aksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - aksConfig.ResourceLocation = location - aksConfig.Tags = helper.GetTags() - }) - - cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) @@ -48,7 +37,7 @@ var _ = Describe("K8sChartSupportImport", func() { It("should successfully test k8s chart support import", func() { testCaseID = 254 // Report to Qase - commonchecks(&ctx, cluster) + commonchecks(ctx.RancherAdminClient, cluster) }) diff --git a/hosted/aks/k8s_chart_support/k8s_chart_support_provisioning_test.go b/hosted/aks/k8s_chart_support/k8s_chart_support_provisioning_test.go index 82c2cb0e..59f4068a 100644 --- a/hosted/aks/k8s_chart_support/k8s_chart_support_provisioning_test.go +++ b/hosted/aks/k8s_chart_support/k8s_chart_support_provisioning_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/aks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -19,16 +17,7 @@ var _ = Describe("K8sChartSupportProvisioning", func() { ) BeforeEach(func() { var err error - aksConfig := new(aks.ClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - dnsPrefix := clusterName + "-dns" - aksConfig.DNSPrefix = &dnsPrefix - aksConfig.ResourceLocation = location - aksConfig.Tags = helper.GetTags() - aksConfig.KubernetesVersion = &k8sVersion - }) - cluster, err = aks.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, ctx.CloudCred.ID, clusterName, k8sVersion, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) @@ -45,8 +34,7 @@ var _ = Describe("K8sChartSupportProvisioning", func() { It("should successfully test k8s chart support provisioning", func() { testCaseID = 252 // Report to Qase - commonchecks(&ctx, cluster) - + commonchecks(ctx.RancherAdminClient, cluster) }) }) diff --git a/hosted/aks/k8s_chart_support/k8s_chart_support_suite_test.go b/hosted/aks/k8s_chart_support/k8s_chart_support_suite_test.go index b48ec298..69d49a27 100644 --- a/hosted/aks/k8s_chart_support/k8s_chart_support_suite_test.go +++ b/hosted/aks/k8s_chart_support/k8s_chart_support_suite_test.go @@ -9,6 +9,7 @@ import ( . "github.com/onsi/gomega" "github.com/rancher-sandbox/ele-testhelpers/tools" . "github.com/rancher-sandbox/qase-ginkgo" + "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" "github.com/rancher/shepherd/extensions/clusters" namegen "github.com/rancher/shepherd/pkg/namegenerator" @@ -45,7 +46,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { var _ = BeforeEach(func() { var err error clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location) + k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location, false) Expect(err).To(BeNil()) Expect(k8sVersion).ToNot(BeEmpty()) GinkgoLogr.Info(fmt.Sprintf("Using AKS version %s", k8sVersion)) @@ -69,7 +70,7 @@ var _ = ReportAfterEach(func(report SpecReport) { Qase(testCaseID, report) }) -func commonchecks(ctx *helpers.Context, cluster *management.Cluster) { +func commonchecks(client *rancher.Client, cluster *management.Cluster) { var originalChartVersion string By("checking the chart version", func() { @@ -92,13 +93,8 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster) { By("making a change to the cluster to validate functionality after chart downgrade", func() { initialNodeCount := cluster.NodeCount var err error - cluster, err = helper.ScaleNodePool(cluster, ctx.RancherAdminClient, initialNodeCount+1) + cluster, err = helper.ScaleNodePool(cluster, client, initialNodeCount+1, true, true) Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - for i := range cluster.AKSConfig.NodePools { - Expect(*cluster.AKSConfig.NodePools[i].Count).To(BeNumerically("==", initialNodeCount+1)) - } }) By("uninstalling the operator chart", func() { @@ -108,16 +104,21 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster) { By("making a change(adding a nodepool) to the cluster to re-install the operator and validating it is re-installed to the latest/original version", func() { currentNodePoolNumber := len(cluster.AKSConfig.NodePools) var err error - cluster, err = helper.AddNodePool(cluster, 1, ctx.RancherAdminClient) + cluster, err = helper.AddNodePool(cluster, 1, client, false, false) Expect(err).To(BeNil()) + Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1)) By("ensuring that the chart is re-installed to the latest/original version", func() { helpers.WaitUntilOperatorChartInstallation(originalChartVersion, "", 0) }) - // We do not use WaitClusterToBeUpgraded because it has been flaky here and times out - Eventually(func() bool { - return len(cluster.AKSConfig.NodePools) == currentNodePoolNumber+1 - }, tools.SetTimeout(5*time.Minute), 2*time.Second).Should(BeTrue()) + err = clusters.WaitClusterToBeUpgraded(client, cluster.ID) + Expect(err).To(BeNil()) + // Check if the desired config has been applied in Rancher + Eventually(func() int { + cluster, err = client.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return len(cluster.AKSStatus.UpstreamSpec.NodePools) + }, tools.SetTimeout(10*time.Minute), 3*time.Second).Should(BeNumerically("==", currentNodePoolNumber+1)) }) } diff --git a/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go b/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go index 8927f42f..8de62ac5 100644 --- a/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go +++ b/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_import_upgrade_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/aks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -17,18 +15,9 @@ var _ = Describe("K8sChartSupportUpgradeImport", func() { var cluster *management.Cluster BeforeEach(func() { - var err error - err = helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1") + err := helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - - aksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - aksConfig.ResourceLocation = location - aksConfig.Tags = helper.GetTags() - }) - - cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) diff --git a/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go b/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go index dfff652d..3935f1fb 100644 --- a/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go +++ b/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_provisioning_upgrade_test.go @@ -6,8 +6,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters/aks" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -19,16 +17,7 @@ var _ = Describe("K8sChartSupportUpgradeProvisioning", func() { ) BeforeEach(func() { var err error - aksConfig := new(aks.ClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - dnsPrefix := clusterName + "-dns" - aksConfig.DNSPrefix = &dnsPrefix - aksConfig.ResourceLocation = location - aksConfig.Tags = helper.GetTags() - aksConfig.KubernetesVersion = &k8sVersion - }) - cluster, err = aks.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, ctx.CloudCred.ID, clusterName, k8sVersion, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) diff --git a/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go b/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go index fc6b464e..c3e66877 100644 --- a/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go +++ b/hosted/aks/k8s_chart_support/upgrade/k8s_chart_support_upgrade_suite_test.go @@ -61,7 +61,8 @@ var _ = SynchronizedBeforeSuite(func() []byte { var _ = BeforeEach(func() { var err error clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location) + // For k8s chart support upgrade we want to begin with the default k8s version; we will upgrade rancher and then upgrade k8s to the default available there. + k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location, false) Expect(err).To(BeNil()) Expect(k8sVersion).ToNot(BeEmpty()) GinkgoLogr.Info(fmt.Sprintf("Using AKS version %s", k8sVersion)) @@ -90,26 +91,7 @@ var _ = ReportAfterEach(func(report SpecReport) { }) func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName, rancherUpgradedVersion, hostname, k8sUpgradedVersion string) { - - By("checking cluster name is same", func() { - Expect(cluster.Name).To(BeEquivalentTo(clusterName)) - }) - - By("checking service account token secret", func() { - success, err := clusters.CheckServiceAccountTokenSecret(ctx.RancherAdminClient, clusterName) - Expect(err).To(BeNil()) - Expect(success).To(BeTrue()) - }) - - By("checking all management nodes are ready", func() { - err := nodestat.AllManagementNodeReady(ctx.RancherAdminClient, cluster.ID, helpers.Timeout) - Expect(err).To(BeNil()) - }) - - By("checking all pods are ready", func() { - podErrors := pods.StatusPods(ctx.RancherAdminClient, cluster.ID) - Expect(podErrors).To(BeEmpty()) - }) + helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName) var originalChartVersion string @@ -158,7 +140,7 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName }) By("making sure the local cluster is ready", func() { - localClusterID := "local" + const localClusterID = "local" By("checking all management nodes are ready", func() { err := nodestat.AllManagementNodeReady(ctx.RancherAdminClient, localClusterID, helpers.Timeout) Expect(err).To(BeNil()) @@ -178,24 +160,15 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName GinkgoLogr.Info("Upgraded chart version: " + upgradedChartVersion) }) - var latestK8sVersion *string + var latestK8sVersion string By(fmt.Sprintf("fetching a list of available k8s versions and ensure the v%s is present in the list and upgrading the cluster to it", k8sUpgradedVersion), func() { versions, err := helper.ListAKSAvailableVersions(ctx.RancherAdminClient, cluster.ID) Expect(err).To(BeNil()) - latestK8sVersion = &versions[len(versions)-1] - Expect(*latestK8sVersion).To(ContainSubstring(k8sUpgradedVersion)) - Expect(helpers.VersionCompare(*latestK8sVersion, cluster.Version.GitVersion)).To(BeNumerically("==", 1)) - - currentVersion := cluster.AKSConfig.KubernetesVersion - - cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) + latestK8sVersion = versions[len(versions)-1] + Expect(latestK8sVersion).To(ContainSubstring(k8sUpgradedVersion)) + Expect(helpers.VersionCompare(latestK8sVersion, cluster.Version.GitVersion)).To(BeNumerically("==", 1)) + cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient, true) Expect(err).To(BeNil()) - Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(latestK8sVersion)) - for _, np := range cluster.AKSConfig.NodePools { - Expect(np.OrchestratorVersion).To(BeEquivalentTo(currentVersion)) - } }) var downgradeVersion string @@ -209,14 +182,8 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName By("making a change to the cluster to validate functionality after chart downgrade", func() { var err error - cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) + cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient, true, true) Expect(err).To(BeNil()) - Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(latestK8sVersion)) - for _, np := range cluster.AKSConfig.NodePools { - Expect(np.OrchestratorVersion).To(BeEquivalentTo(latestK8sVersion)) - } }) By("uninstalling the operator chart", func() { @@ -226,8 +193,9 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName By("making a change(adding a nodepool) to the cluster to re-install the operator and validating it is re-installed to the latest/upgraded version", func() { currentNodePoolNumber := len(cluster.AKSConfig.NodePools) var err error - cluster, err = helper.AddNodePool(cluster, 1, ctx.RancherAdminClient) + cluster, err = helper.AddNodePool(cluster, 1, ctx.RancherAdminClient, false, false) Expect(err).To(BeNil()) + Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1)) By("ensuring that the chart is re-installed to the latest/upgraded version", func() { helpers.WaitUntilOperatorChartInstallation(upgradedChartVersion, "", 0) @@ -235,7 +203,13 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) Expect(err).To(BeNil()) - Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1)) + // Check if the desired config has been applied in Rancher + Eventually(func() int { + cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID) + Expect(err).To(BeNil()) + return len(cluster.AKSStatus.UpstreamSpec.NodePools) + }, tools.SetTimeout(10*time.Minute), 3*time.Second).Should(BeNumerically("==", currentNodePoolNumber+1)) + }) } diff --git a/hosted/aks/p0/p0_importing_test.go b/hosted/aks/p0/p0_importing_test.go index 6eba7649..b4ed6fe8 100644 --- a/hosted/aks/p0/p0_importing_test.go +++ b/hosted/aks/p0/p0_importing_test.go @@ -19,158 +19,68 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - + "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/clusters/aks" - nodestat "github.com/rancher/shepherd/extensions/nodes" - "github.com/rancher/shepherd/extensions/workloads/pods" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" ) var _ = Describe("P0Importing", func() { + for _, testData := range []struct { + qaseID int64 + isUpgrade bool + testBody func(cluster *management.Cluster, client *rancher.Client, clusterName string) + testTitle string + }{ + { + qaseID: 213, + isUpgrade: false, + testBody: p0NodesChecks, + testTitle: "should successfully import the cluster & add, delete, scale nodepool", + }, + { + qaseID: 232, + isUpgrade: true, + testBody: p0upgradeK8sVersionCheck, + testTitle: "should be able to upgrade k8s version of the cluster", + }, + } { + testData := testData + When("a cluster is imported", func() { + var cluster *management.Cluster - When("a cluster is imported", func() { - var cluster *management.Cluster - - BeforeEach(func() { - var err error - err = helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1") - Expect(err).To(BeNil()) - - aksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - aksConfig.ResourceLocation = location - aksConfig.Tags = helper.GetTags() - }) - - cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) - Expect(err).To(BeNil()) - cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - // Workaround to add new Nodegroup till https://github.com/rancher/aks-operator/issues/251 is fixed - cluster.AKSConfig = cluster.AKSStatus.UpstreamSpec - }) - AfterEach(func() { - if ctx.ClusterCleanup { - err := helper.DeleteAKSHostCluster(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = helper.DeleteAKSClusteronAzure(clusterName) - Expect(err).To(BeNil()) - } else { - fmt.Println("Skipping downstream cluster deletion: ", clusterName) - } - }) - It("should successfully import the cluster & add, delete, scale nodepool", func() { - // Report to Qase - testCaseID = 231 - - By("checking cluster name is same", func() { - Expect(cluster.Name).To(BeEquivalentTo(clusterName)) - }) - - By("checking service account token secret", func() { - success, err := clusters.CheckServiceAccountTokenSecret(ctx.RancherAdminClient, clusterName) - Expect(err).To(BeNil()) - Expect(success).To(BeTrue()) - }) - - By("checking all management nodes are ready", func() { - err := nodestat.AllManagementNodeReady(ctx.RancherAdminClient, cluster.ID, helpers.Timeout) - Expect(err).To(BeNil()) - }) - - By("checking all pods are ready", func() { - podErrors := pods.StatusPods(ctx.RancherAdminClient, cluster.ID) - Expect(podErrors).To(BeEmpty()) - }) - - currentNodePoolNumber := len(cluster.AKSConfig.NodePools) - initialNodeCount := *cluster.AKSConfig.NodePools[0].Count - - By("scaling up the nodepool", func() { - var err error - cluster, err = helper.ScaleNodePool(cluster, ctx.RancherAdminClient, initialNodeCount+1) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - for i := range cluster.AKSConfig.NodePools { - Expect(*cluster.AKSConfig.NodePools[i].Count).To(BeNumerically("==", initialNodeCount+1)) - } - }) + BeforeEach(func() { + k8sVersion, err := helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location, testData.isUpgrade) + Expect(err).NotTo(HaveOccurred()) + GinkgoLogr.Info("Using K8s version: " + k8sVersion) - By("scaling down the nodepool", func() { - var err error - cluster, err = helper.ScaleNodePool(cluster, ctx.RancherAdminClient, initialNodeCount) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) + err = helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - for i := range cluster.AKSConfig.NodePools { - Expect(*cluster.AKSConfig.NodePools[i].Count).To(BeNumerically("==", initialNodeCount)) - } - }) - By("adding a nodepool/s", func() { - var err error - cluster, err = helper.AddNodePool(cluster, increaseBy, ctx.RancherAdminClient) + cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) + cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) - Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1)) + // Workaround to add new Nodegroup till https://github.com/rancher/aks-operator/issues/251 is fixed + cluster.AKSConfig = cluster.AKSStatus.UpstreamSpec }) - By("deleting the nodepool", func() { - var err error - cluster, err = helper.DeleteNodePool(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber)) - }) - }) - Context("Upgrading K8s version", func() { - var upgradeToVersion, currentVersion *string - BeforeEach(func() { - currentVersion = cluster.AKSConfig.KubernetesVersion - versions, err := helper.ListAKSAvailableVersions(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - Expect(versions).ToNot(BeEmpty()) - upgradeToVersion = &versions[0] - }) - - It("should be able to upgrade k8s version of the cluster", func() { - // Report to Qase - testCaseID = 232 - - By("upgrading the ControlPlane", func() { - var err error - cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, upgradeToVersion, ctx.RancherAdminClient) + AfterEach(func() { + if ctx.ClusterCleanup { + err := helper.DeleteAKSHostCluster(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) - cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) + err = helper.DeleteAKSClusteronAzure(clusterName) Expect(err).To(BeNil()) - Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(upgradeToVersion)) - for _, np := range cluster.AKSConfig.NodePools { - Expect(np.OrchestratorVersion).To(BeEquivalentTo(currentVersion)) - } - }) + } else { + fmt.Println("Skipping downstream cluster deletion: ", clusterName) + } + }) - By("upgrading the NodePools", func() { - var err error - cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, upgradeToVersion, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(upgradeToVersion)) - for _, np := range cluster.AKSConfig.NodePools { - Expect(np.OrchestratorVersion).To(BeEquivalentTo(upgradeToVersion)) - } - }) + It(testData.testTitle, func() { + testCaseID = testData.qaseID + testData.testBody(cluster, ctx.RancherAdminClient, clusterName) }) }) - }) - + } }) diff --git a/hosted/aks/p0/p0_provisioning_test.go b/hosted/aks/p0/p0_provisioning_test.go index b8c7d487..d426408a 100644 --- a/hosted/aks/p0/p0_provisioning_test.go +++ b/hosted/aks/p0/p0_provisioning_test.go @@ -19,156 +19,60 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - nodestat "github.com/rancher/shepherd/extensions/nodes" - "github.com/rancher/shepherd/extensions/workloads/pods" - "github.com/rancher/shepherd/pkg/config" - - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/clusters/aks" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" ) var _ = Describe("P0Provisioning", func() { + for _, testData := range []struct { + qaseID int64 + isUpgrade bool + testBody func(cluster *management.Cluster, client *rancher.Client, clusterName string) + testTitle string + }{ + { + qaseID: 172, + isUpgrade: false, + testBody: p0NodesChecks, + testTitle: "should successfully provision the cluster & add, delete, scale nodepool", + }, + { + qaseID: 175, + isUpgrade: true, + testBody: p0upgradeK8sVersionCheck, + testTitle: "should be able to upgrade k8s version of the cluster", + }, + } { + testData := testData + When("a cluster is created", func() { + var cluster *management.Cluster - When("a cluster is created", func() { - var cluster *management.Cluster - - BeforeEach(func() { - var err error - aksConfig := new(aks.ClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - dnsPrefix := clusterName + "-dns" - aksConfig.DNSPrefix = &dnsPrefix - aksConfig.ResourceLocation = location - aksConfig.Tags = helper.GetTags() - aksConfig.KubernetesVersion = &k8sVersion - }) - cluster, err = aks.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) - Expect(err).To(BeNil()) - cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - }) - AfterEach(func() { - if ctx.ClusterCleanup { - err := helper.DeleteAKSHostCluster(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = helper.DeleteAKSClusteronAzure(clusterName) - Expect(err).To(BeNil()) - } else { - fmt.Println("Skipping downstream cluster deletion: ", clusterName) - } - }) - It("should successfully provision the cluster & add, delete, scale nodepool", func() { - // Report to Qase - testCaseID = 173 - - By("checking cluster name is same", func() { - Expect(cluster.Name).To(BeEquivalentTo(clusterName)) - }) - - By("checking service account token secret", func() { - success, err := clusters.CheckServiceAccountTokenSecret(ctx.RancherAdminClient, clusterName) - Expect(err).To(BeNil()) - Expect(success).To(BeTrue()) - }) - - By("checking all management nodes are ready", func() { - err := nodestat.AllManagementNodeReady(ctx.RancherAdminClient, cluster.ID, helpers.Timeout) - Expect(err).To(BeNil()) - }) - - By("checking all pods are ready", func() { - podErrors := pods.StatusPods(ctx.RancherAdminClient, cluster.ID) - Expect(podErrors).To(BeEmpty()) - }) - - currentNodePoolNumber := len(cluster.AKSConfig.NodePools) - initialNodeCount := *cluster.AKSConfig.NodePools[0].Count - - By("adding a nodepool", func() { - var err error - cluster, err = helper.AddNodePool(cluster, increaseBy, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1)) - }) - By("deleting the nodepool", func() { - var err error - cluster, err = helper.DeleteNodePool(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber)) - }) + BeforeEach(func() { + k8sVersion, err := helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location, testData.isUpgrade) + Expect(err).NotTo(HaveOccurred()) + GinkgoLogr.Info("Using K8s version: " + k8sVersion) - By("scaling up the nodepool", func() { - var err error - cluster, err = helper.ScaleNodePool(cluster, ctx.RancherAdminClient, initialNodeCount+1) + cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, ctx.CloudCred.ID, clusterName, k8sVersion, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) + cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) - for i := range cluster.AKSConfig.NodePools { - Expect(*cluster.AKSConfig.NodePools[i].Count).To(BeNumerically("==", initialNodeCount+1)) - } }) - - By("scaling down the nodepool", func() { - var err error - cluster, err = helper.ScaleNodePool(cluster, ctx.RancherAdminClient, initialNodeCount) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - for i := range cluster.AKSConfig.NodePools { - Expect(*cluster.AKSConfig.NodePools[i].Count).To(BeNumerically("==", initialNodeCount)) + AfterEach(func() { + if ctx.ClusterCleanup { + err := helper.DeleteAKSHostCluster(cluster, ctx.RancherAdminClient) + Expect(err).To(BeNil()) + } else { + fmt.Println("Skipping downstream cluster deletion: ", clusterName) } }) - }) - - Context("Upgrading K8s version", func() { - var upgradeToVersion, currentVersion *string - BeforeEach(func() { - currentVersion = cluster.AKSConfig.KubernetesVersion - versions, err := helper.ListAKSAvailableVersions(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - Expect(versions).ToNot(BeEmpty()) - upgradeToVersion = &versions[0] - }) - - It("should be able to upgrade k8s version of the cluster", func() { - // Report to Qase - testCaseID = 175 - - By("upgrading the ControlPlane", func() { - var err error - cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, upgradeToVersion, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(upgradeToVersion)) - for _, np := range cluster.AKSConfig.NodePools { - Expect(np.OrchestratorVersion).To(BeEquivalentTo(currentVersion)) - } - }) - - By("upgrading the NodePools", func() { - var err error - cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, upgradeToVersion, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID) - Expect(err).To(BeNil()) - Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(upgradeToVersion)) - for _, np := range cluster.AKSConfig.NodePools { - Expect(np.OrchestratorVersion).To(BeEquivalentTo(upgradeToVersion)) - } - }) + It(testData.testTitle, func() { + testCaseID = testData.qaseID + testData.testBody(cluster, ctx.RancherAdminClient, clusterName) }) }) - }) - + } }) diff --git a/hosted/aks/p0/p0_suite_test.go b/hosted/aks/p0/p0_suite_test.go index bb035ebb..c2ec9f07 100644 --- a/hosted/aks/p0/p0_suite_test.go +++ b/hosted/aks/p0/p0_suite_test.go @@ -15,11 +15,14 @@ limitations under the License. package p0_test import ( + "fmt" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/rancher-sandbox/qase-ginkgo" + "github.com/rancher/shepherd/clients/rancher" + management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" @@ -31,10 +34,10 @@ const ( ) var ( - ctx helpers.Context - clusterName, k8sVersion string - testCaseID int64 - location = helpers.GetAKSLocation() + ctx helpers.Context + clusterName string + testCaseID int64 + location = helpers.GetAKSLocation() ) func TestP0(t *testing.T) { @@ -50,10 +53,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { }) var _ = BeforeEach(func() { - var err error clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location) - Expect(err).To(BeNil()) }) var _ = ReportBeforeEach(func(report SpecReport) { @@ -65,3 +65,51 @@ var _ = ReportAfterEach(func(report SpecReport) { // Add result in Qase if asked Qase(testCaseID, report) }) + +func p0upgradeK8sVersionCheck(cluster *management.Cluster, client *rancher.Client, clusterName string) { + versions, err := helper.ListAKSAvailableVersions(client, cluster.ID) + Expect(err).To(BeNil()) + Expect(versions).ToNot(BeEmpty()) + upgradeToVersion := versions[0] + GinkgoLogr.Info(fmt.Sprintf("Upgrading cluster to AKS version %s", upgradeToVersion)) + + By("upgrading the ControlPlane", func() { + cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, upgradeToVersion, client, true) + Expect(err).To(BeNil()) + }) + + By("upgrading the NodePools", func() { + cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, upgradeToVersion, client, true, true) + Expect(err).To(BeNil()) + }) +} + +func p0NodesChecks(cluster *management.Cluster, client *rancher.Client, clusterName string) { + + helpers.ClusterIsReadyChecks(cluster, client, clusterName) + + initialNodeCount := *cluster.AKSConfig.NodePools[0].Count + + By("adding a nodepool", func() { + var err error + cluster, err = helper.AddNodePool(cluster, increaseBy, client, true, true) + Expect(err).To(BeNil()) + }) + By("deleting the nodepool", func() { + var err error + cluster, err = helper.DeleteNodePool(cluster, client, true, true) + Expect(err).To(BeNil()) + }) + + By("scaling up the nodepool", func() { + var err error + cluster, err = helper.ScaleNodePool(cluster, client, initialNodeCount+1, true, true) + Expect(err).To(BeNil()) + }) + + By("scaling down the nodepool", func() { + var err error + cluster, err = helper.ScaleNodePool(cluster, client, initialNodeCount, true, true) + Expect(err).To(BeNil()) + }) +} diff --git a/hosted/aks/support_matrix/support_matrix_importing_test.go b/hosted/aks/support_matrix/support_matrix_importing_test.go index c6f5b4c9..57ba8d30 100644 --- a/hosted/aks/support_matrix/support_matrix_importing_test.go +++ b/hosted/aks/support_matrix/support_matrix_importing_test.go @@ -21,11 +21,6 @@ import ( "fmt" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/clusters/aks" - nodestat "github.com/rancher/shepherd/extensions/nodes" - "github.com/rancher/shepherd/extensions/workloads/pods" - "github.com/rancher/shepherd/pkg/config" namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" @@ -44,16 +39,9 @@ var _ = Describe("SupportMatrixImporting", func() { ) BeforeEach(func() { clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) - var err error - err = helper.CreateAKSClusterOnAzure(location, clusterName, version, "1") + err := helper.CreateAKSClusterOnAzure(location, clusterName, version, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) - aksConfig := new(helper.ImportClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - aksConfig.ResourceLocation = location - aksConfig.Tags = helper.GetTags() - }) - cluster, err = helper.ImportAKSHostedCluster(ctx.StdUserClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.ImportAKSHostedCluster(ctx.StdUserClient, clusterName, ctx.CloudCred.ID, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) // Requires RancherAdminClient cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) @@ -73,26 +61,7 @@ var _ = Describe("SupportMatrixImporting", func() { It("should successfully import the cluster", func() { // Report to Qase testCaseID = 250 - - By("checking cluster name is same", func() { - Expect(cluster.Name).To(BeEquivalentTo(clusterName)) - }) - - By("checking service account token secret", func() { - success, err := clusters.CheckServiceAccountTokenSecret(ctx.StdUserClient, clusterName) - Expect(err).To(BeNil()) - Expect(success).To(BeTrue()) - }) - - By("checking all management nodes are ready", func() { - err := nodestat.AllManagementNodeReady(ctx.StdUserClient, cluster.ID, helpers.Timeout) - Expect(err).To(BeNil()) - }) - - By("checking all pods are ready", func() { - podErrors := pods.StatusPods(ctx.StdUserClient, cluster.ID) - Expect(podErrors).To(BeEmpty()) - }) + helpers.ClusterIsReadyChecks(cluster, ctx.StdUserClient, clusterName) }) }) } diff --git a/hosted/aks/support_matrix/support_matrix_provisioning_test.go b/hosted/aks/support_matrix/support_matrix_provisioning_test.go index 925bc92a..43676270 100644 --- a/hosted/aks/support_matrix/support_matrix_provisioning_test.go +++ b/hosted/aks/support_matrix/support_matrix_provisioning_test.go @@ -21,11 +21,6 @@ import ( "fmt" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/clusters/aks" - nodestat "github.com/rancher/shepherd/extensions/nodes" - "github.com/rancher/shepherd/extensions/workloads/pods" - "github.com/rancher/shepherd/pkg/config" namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" @@ -45,16 +40,7 @@ var _ = Describe("SupportMatrixProvisioning", func() { BeforeEach(func() { clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix) var err error - aksConfig := new(aks.ClusterConfig) - config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() { - aksConfig.ResourceGroup = clusterName - dnsPrefix := clusterName + "-dns" - aksConfig.DNSPrefix = &dnsPrefix - aksConfig.ResourceLocation = location - aksConfig.KubernetesVersion = &version - aksConfig.Tags = helper.GetTags() - }) - cluster, err = aks.CreateAKSHostedCluster(ctx.StdUserClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{}) + cluster, err = helper.CreateAKSHostedCluster(ctx.StdUserClient, ctx.CloudCred.ID, clusterName, version, location, helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) // Requires RancherAdminClient cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) @@ -74,26 +60,7 @@ var _ = Describe("SupportMatrixProvisioning", func() { It("should successfully provision the cluster", func() { // Report to Qase testCaseID = 249 - - By("checking cluster name is same", func() { - Expect(cluster.Name).To(BeEquivalentTo(clusterName)) - }) - - By("checking service account token secret", func() { - success, err := clusters.CheckServiceAccountTokenSecret(ctx.StdUserClient, clusterName) - Expect(err).To(BeNil()) - Expect(success).To(BeTrue()) - }) - - By("checking all management nodes are ready", func() { - err := nodestat.AllManagementNodeReady(ctx.StdUserClient, cluster.ID, helpers.Timeout) - Expect(err).To(BeNil()) - }) - - By("checking all pods are ready", func() { - podErrors := pods.StatusPods(ctx.StdUserClient, cluster.ID) - Expect(podErrors).To(BeEmpty()) - }) + helpers.ClusterIsReadyChecks(cluster, ctx.StdUserClient, clusterName) }) }) } diff --git a/hosted/aks/support_matrix/support_matrix_suite_test.go b/hosted/aks/support_matrix/support_matrix_suite_test.go index e3d43f0e..e7dbb3c1 100644 --- a/hosted/aks/support_matrix/support_matrix_suite_test.go +++ b/hosted/aks/support_matrix/support_matrix_suite_test.go @@ -15,12 +15,12 @@ limitations under the License. package support_matrix_test import ( + "testing" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/rancher-sandbox/qase-ginkgo" - "testing" - "github.com/rancher/hosted-providers-e2e/hosted/aks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" ) diff --git a/hosted/helpers/helper_common.go b/hosted/helpers/helper_common.go index 9cc8a143..c7ad7718 100644 --- a/hosted/helpers/helper_common.go +++ b/hosted/helpers/helper_common.go @@ -3,7 +3,6 @@ package helpers import ( "fmt" "os" - "os/user" "strings" "time" @@ -250,9 +249,6 @@ func GetGKEProjectID() string { // GetCommonMetadataLabels returns a list of common metadata labels/tabs func GetCommonMetadataLabels() map[string]string { - testuser, err := user.Current() - Expect(err).To(BeNil()) - specReport := ginkgo.CurrentSpecReport() // filename indicates the filename and line number of the test // we only use this information instead of the ginkgo.CurrentSpecReport().FullText() because of the 63 character limit