Skip to content

Commit

Permalink
Refactor AKS tests (#100)
Browse files Browse the repository at this point in the history
* Refactor the p0 and support_matrix tests

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Use second-highest k8s version as default and filter versions not supported by the UI

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Change k8s upgrade func name

<Co-authored-by>: Chandan Pinjani <chandan.pinjani@suse.com>
<Co-authored-by>: Parthvi Vala <parthvi.vala@suse.com>

* Refactor p0 tests and revert defaultAKS changes but modify to return a different value for upgrade cases

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Add ClusterIsReadyChecks common function and use it in AKS

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Review comment: Rename c -> testData

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Refactor the aks provisioning and import methods to not write to config file

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Attempt#2 at fixing AKS provisoning failures on nightly operator chart

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Refactor test checks; move checks to functions

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Attempt#1 at fixing flakes

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Remove debug env and fix panic error

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Remove AppliedSpec references

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Change the nodepool add/delete checks to check for name

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Remove AppliedSpec references (2)

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Review changes

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Use gomega for wait block

Signed-off-by: Parthvi <parthvi.vala@suse.com>

* Use client instead of ctx.RancherAdminClient in k8s chart support tests

Signed-off-by: Parthvi <parthvi.vala@suse.com>

---------

Signed-off-by: Parthvi <parthvi.vala@suse.com>
  • Loading branch information
valaparthvi authored May 20, 2024
1 parent aa88977 commit 142cbd3
Show file tree
Hide file tree
Showing 15 changed files with 448 additions and 546 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ Following are the common environment variables that need to be exported for runn
4. PROVIDER: Type of the hosted provider you want to test. Acceptable values - gke, eks, aks
5. DOWNSTREAM_K8S_MINOR_VERSION (optional): Downstream cluster Kubernetes version to test. If the env var is not provided, it uses a provider specific default value.
6. DOWNSTREAM_CLUSTER_CLEANUP (optional): If set to true, downstream cluster will be deleted. Default: false.
7. RANCHER_CLIENT_DEBUG (optional, debug): Set to true to watch API requests and responses being sent to rancher.

#### To run K8s Chart support test cases:
1. KUBECONFIG: Upstream K8s' Kubeconfig file; usually it is k3s.yaml.
Expand Down
345 changes: 261 additions & 84 deletions hosted/aks/helper/helper_cluster.go

Large diffs are not rendered by default.

17 changes: 3 additions & 14 deletions hosted/aks/k8s_chart_support/k8s_chart_support_import_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
"github.com/rancher/shepherd/extensions/clusters/aks"
"github.com/rancher/shepherd/pkg/config"

"github.com/rancher/hosted-providers-e2e/hosted/aks/helper"
"github.com/rancher/hosted-providers-e2e/hosted/helpers"
Expand All @@ -17,18 +15,9 @@ var _ = Describe("K8sChartSupportImport", func() {
var cluster *management.Cluster

BeforeEach(func() {
var err error
err = helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1")
err := helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())

aksConfig := new(helper.ImportClusterConfig)
config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() {
aksConfig.ResourceGroup = clusterName
aksConfig.ResourceLocation = location
aksConfig.Tags = helper.GetTags()
})

cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{})
cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, location, helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
Expand All @@ -48,7 +37,7 @@ var _ = Describe("K8sChartSupportImport", func() {

It("should successfully test k8s chart support import", func() {
testCaseID = 254 // Report to Qase
commonchecks(&ctx, cluster)
commonchecks(ctx.RancherAdminClient, cluster)

})

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
"github.com/rancher/shepherd/extensions/clusters/aks"
"github.com/rancher/shepherd/pkg/config"

"github.com/rancher/hosted-providers-e2e/hosted/aks/helper"
"github.com/rancher/hosted-providers-e2e/hosted/helpers"
Expand All @@ -19,16 +17,7 @@ var _ = Describe("K8sChartSupportProvisioning", func() {
)
BeforeEach(func() {
var err error
aksConfig := new(aks.ClusterConfig)
config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() {
aksConfig.ResourceGroup = clusterName
dnsPrefix := clusterName + "-dns"
aksConfig.DNSPrefix = &dnsPrefix
aksConfig.ResourceLocation = location
aksConfig.Tags = helper.GetTags()
aksConfig.KubernetesVersion = &k8sVersion
})
cluster, err = aks.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{})
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, ctx.CloudCred.ID, clusterName, k8sVersion, location, helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
Expand All @@ -45,8 +34,7 @@ var _ = Describe("K8sChartSupportProvisioning", func() {

It("should successfully test k8s chart support provisioning", func() {
testCaseID = 252 // Report to Qase
commonchecks(&ctx, cluster)

commonchecks(ctx.RancherAdminClient, cluster)
})

})
27 changes: 14 additions & 13 deletions hosted/aks/k8s_chart_support/k8s_chart_support_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
. "github.com/onsi/gomega"
"github.com/rancher-sandbox/ele-testhelpers/tools"
. "github.com/rancher-sandbox/qase-ginkgo"
"github.com/rancher/shepherd/clients/rancher"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
"github.com/rancher/shepherd/extensions/clusters"
namegen "github.com/rancher/shepherd/pkg/namegenerator"
Expand Down Expand Up @@ -45,7 +46,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
var _ = BeforeEach(func() {
var err error
clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix)
k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location)
k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location, false)
Expect(err).To(BeNil())
Expect(k8sVersion).ToNot(BeEmpty())
GinkgoLogr.Info(fmt.Sprintf("Using AKS version %s", k8sVersion))
Expand All @@ -69,7 +70,7 @@ var _ = ReportAfterEach(func(report SpecReport) {
Qase(testCaseID, report)
})

func commonchecks(ctx *helpers.Context, cluster *management.Cluster) {
func commonchecks(client *rancher.Client, cluster *management.Cluster) {
var originalChartVersion string

By("checking the chart version", func() {
Expand All @@ -92,13 +93,8 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster) {
By("making a change to the cluster to validate functionality after chart downgrade", func() {
initialNodeCount := cluster.NodeCount
var err error
cluster, err = helper.ScaleNodePool(cluster, ctx.RancherAdminClient, initialNodeCount+1)
cluster, err = helper.ScaleNodePool(cluster, client, initialNodeCount+1, true, true)
Expect(err).To(BeNil())
err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID)
Expect(err).To(BeNil())
for i := range cluster.AKSConfig.NodePools {
Expect(*cluster.AKSConfig.NodePools[i].Count).To(BeNumerically("==", initialNodeCount+1))
}
})

By("uninstalling the operator chart", func() {
Expand All @@ -108,16 +104,21 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster) {
By("making a change(adding a nodepool) to the cluster to re-install the operator and validating it is re-installed to the latest/original version", func() {
currentNodePoolNumber := len(cluster.AKSConfig.NodePools)
var err error
cluster, err = helper.AddNodePool(cluster, 1, ctx.RancherAdminClient)
cluster, err = helper.AddNodePool(cluster, 1, client, false, false)
Expect(err).To(BeNil())
Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1))

By("ensuring that the chart is re-installed to the latest/original version", func() {
helpers.WaitUntilOperatorChartInstallation(originalChartVersion, "", 0)
})

// We do not use WaitClusterToBeUpgraded because it has been flaky here and times out
Eventually(func() bool {
return len(cluster.AKSConfig.NodePools) == currentNodePoolNumber+1
}, tools.SetTimeout(5*time.Minute), 2*time.Second).Should(BeTrue())
err = clusters.WaitClusterToBeUpgraded(client, cluster.ID)
Expect(err).To(BeNil())
// Check if the desired config has been applied in Rancher
Eventually(func() int {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
return len(cluster.AKSStatus.UpstreamSpec.NodePools)
}, tools.SetTimeout(10*time.Minute), 3*time.Second).Should(BeNumerically("==", currentNodePoolNumber+1))
})
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
"github.com/rancher/shepherd/extensions/clusters/aks"
"github.com/rancher/shepherd/pkg/config"

"github.com/rancher/hosted-providers-e2e/hosted/aks/helper"
"github.com/rancher/hosted-providers-e2e/hosted/helpers"
Expand All @@ -17,18 +15,9 @@ var _ = Describe("K8sChartSupportUpgradeImport", func() {
var cluster *management.Cluster

BeforeEach(func() {
var err error
err = helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1")
err := helper.CreateAKSClusterOnAzure(location, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())

aksConfig := new(helper.ImportClusterConfig)
config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() {
aksConfig.ResourceGroup = clusterName
aksConfig.ResourceLocation = location
aksConfig.Tags = helper.GetTags()
})

cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{})
cluster, err = helper.ImportAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, location, helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
"github.com/rancher/shepherd/extensions/clusters/aks"
"github.com/rancher/shepherd/pkg/config"

"github.com/rancher/hosted-providers-e2e/hosted/aks/helper"
"github.com/rancher/hosted-providers-e2e/hosted/helpers"
Expand All @@ -19,16 +17,7 @@ var _ = Describe("K8sChartSupportUpgradeProvisioning", func() {
)
BeforeEach(func() {
var err error
aksConfig := new(aks.ClusterConfig)
config.LoadAndUpdateConfig(aks.AKSClusterConfigConfigurationFileKey, aksConfig, func() {
aksConfig.ResourceGroup = clusterName
dnsPrefix := clusterName + "-dns"
aksConfig.DNSPrefix = &dnsPrefix
aksConfig.ResourceLocation = location
aksConfig.Tags = helper.GetTags()
aksConfig.KubernetesVersion = &k8sVersion
})
cluster, err = aks.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{})
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, ctx.CloudCred.ID, clusterName, k8sVersion, location, helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ var _ = SynchronizedBeforeSuite(func() []byte {
var _ = BeforeEach(func() {
var err error
clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix)
k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location)
// For k8s chart support upgrade we want to begin with the default k8s version; we will upgrade rancher and then upgrade k8s to the default available there.
k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCred.ID, location, false)
Expect(err).To(BeNil())
Expect(k8sVersion).ToNot(BeEmpty())
GinkgoLogr.Info(fmt.Sprintf("Using AKS version %s", k8sVersion))
Expand Down Expand Up @@ -90,26 +91,7 @@ var _ = ReportAfterEach(func(report SpecReport) {
})

func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName, rancherUpgradedVersion, hostname, k8sUpgradedVersion string) {

By("checking cluster name is same", func() {
Expect(cluster.Name).To(BeEquivalentTo(clusterName))
})

By("checking service account token secret", func() {
success, err := clusters.CheckServiceAccountTokenSecret(ctx.RancherAdminClient, clusterName)
Expect(err).To(BeNil())
Expect(success).To(BeTrue())
})

By("checking all management nodes are ready", func() {
err := nodestat.AllManagementNodeReady(ctx.RancherAdminClient, cluster.ID, helpers.Timeout)
Expect(err).To(BeNil())
})

By("checking all pods are ready", func() {
podErrors := pods.StatusPods(ctx.RancherAdminClient, cluster.ID)
Expect(podErrors).To(BeEmpty())
})
helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)

var originalChartVersion string

Expand Down Expand Up @@ -158,7 +140,7 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName
})

By("making sure the local cluster is ready", func() {
localClusterID := "local"
const localClusterID = "local"
By("checking all management nodes are ready", func() {
err := nodestat.AllManagementNodeReady(ctx.RancherAdminClient, localClusterID, helpers.Timeout)
Expect(err).To(BeNil())
Expand All @@ -178,24 +160,15 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName
GinkgoLogr.Info("Upgraded chart version: " + upgradedChartVersion)
})

var latestK8sVersion *string
var latestK8sVersion string
By(fmt.Sprintf("fetching a list of available k8s versions and ensure the v%s is present in the list and upgrading the cluster to it", k8sUpgradedVersion), func() {
versions, err := helper.ListAKSAvailableVersions(ctx.RancherAdminClient, cluster.ID)
Expect(err).To(BeNil())
latestK8sVersion = &versions[len(versions)-1]
Expect(*latestK8sVersion).To(ContainSubstring(k8sUpgradedVersion))
Expect(helpers.VersionCompare(*latestK8sVersion, cluster.Version.GitVersion)).To(BeNumerically("==", 1))

currentVersion := cluster.AKSConfig.KubernetesVersion

cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
latestK8sVersion = versions[len(versions)-1]
Expect(latestK8sVersion).To(ContainSubstring(k8sUpgradedVersion))
Expect(helpers.VersionCompare(latestK8sVersion, cluster.Version.GitVersion)).To(BeNumerically("==", 1))
cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient, true)
Expect(err).To(BeNil())
Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(latestK8sVersion))
for _, np := range cluster.AKSConfig.NodePools {
Expect(np.OrchestratorVersion).To(BeEquivalentTo(currentVersion))
}
})

var downgradeVersion string
Expand All @@ -209,14 +182,8 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName

By("making a change to the cluster to validate functionality after chart downgrade", func() {
var err error
cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient)
Expect(err).To(BeNil())
err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID)
cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, latestK8sVersion, ctx.RancherAdminClient, true, true)
Expect(err).To(BeNil())
Expect(cluster.AKSConfig.KubernetesVersion).To(BeEquivalentTo(latestK8sVersion))
for _, np := range cluster.AKSConfig.NodePools {
Expect(np.OrchestratorVersion).To(BeEquivalentTo(latestK8sVersion))
}
})

By("uninstalling the operator chart", func() {
Expand All @@ -226,16 +193,23 @@ func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName
By("making a change(adding a nodepool) to the cluster to re-install the operator and validating it is re-installed to the latest/upgraded version", func() {
currentNodePoolNumber := len(cluster.AKSConfig.NodePools)
var err error
cluster, err = helper.AddNodePool(cluster, 1, ctx.RancherAdminClient)
cluster, err = helper.AddNodePool(cluster, 1, ctx.RancherAdminClient, false, false)
Expect(err).To(BeNil())
Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1))

By("ensuring that the chart is re-installed to the latest/upgraded version", func() {
helpers.WaitUntilOperatorChartInstallation(upgradedChartVersion, "", 0)
})

err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID)
Expect(err).To(BeNil())
Expect(len(cluster.AKSConfig.NodePools)).To(BeNumerically("==", currentNodePoolNumber+1))
// Check if the desired config has been applied in Rancher
Eventually(func() int {
cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
return len(cluster.AKSStatus.UpstreamSpec.NodePools)
}, tools.SetTimeout(10*time.Minute), 3*time.Second).Should(BeNumerically("==", currentNodePoolNumber+1))

})

}
Loading

0 comments on commit 142cbd3

Please sign in to comment.