Skip to content

Commit

Permalink
EKS Refactor (#110)
Browse files Browse the repository at this point in the history
Signed-off-by: Chandan Pinjani <chandan.pinjani@suse.com>
  • Loading branch information
cpinjani authored May 28, 2024
1 parent 1595e65 commit def47f2
Show file tree
Hide file tree
Showing 15 changed files with 427 additions and 400 deletions.
196 changes: 86 additions & 110 deletions hosted/aks/helper/helper_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,20 +33,91 @@ var (
subscriptionID = os.Getenv("AKS_SUBSCRIPTION_ID")
)

func CreateAKSHostedCluster(client *rancher.Client, cloudCredentialID, clusterName, k8sVersion, location string, tags map[string]string) (*management.Cluster, error) {
var aksClusterConfig aks.ClusterConfig
config.LoadConfig(aks.AKSClusterConfigConfigurationFileKey, &aksClusterConfig)
var aksNodePools []management.AKSNodePool
for _, aksNodePoolConfig := range *aksClusterConfig.NodePools {
aksNodePool := management.AKSNodePool{
AvailabilityZones: aksNodePoolConfig.AvailabilityZones,
Count: aksNodePoolConfig.NodeCount,
EnableAutoScaling: aksNodePoolConfig.EnableAutoScaling,
MaxPods: aksNodePoolConfig.MaxPods,
MaxCount: aksNodePoolConfig.MaxCount,
MinCount: aksNodePoolConfig.MinCount,
Mode: aksNodePoolConfig.Mode,
Name: aksNodePoolConfig.Name,
OrchestratorVersion: &k8sVersion,
OsDiskSizeGB: aksNodePoolConfig.OsDiskSizeGB,
OsDiskType: aksNodePoolConfig.OsDiskType,
OsType: aksNodePoolConfig.OsType,
VMSize: aksNodePoolConfig.VMSize,
}
aksNodePools = append(aksNodePools, aksNodePool)
}

cluster := &management.Cluster{
AKSConfig: &management.AKSClusterConfigSpec{
AzureCredentialSecret: cloudCredentialID,
ClusterName: clusterName,
DNSPrefix: pointer.String(clusterName + "-dns"),
Imported: false,
KubernetesVersion: &k8sVersion,
LinuxAdminUsername: aksClusterConfig.LinuxAdminUsername,
LoadBalancerSKU: aksClusterConfig.LoadBalancerSKU,
NetworkPlugin: aksClusterConfig.NetworkPlugin,
NodePools: aksNodePools,
PrivateCluster: aksClusterConfig.PrivateCluster,
ResourceGroup: clusterName,
ResourceLocation: location,
Tags: tags,
},
DockerRootDir: "/var/lib/docker",
Name: clusterName,
}

clusterResp, err := client.Management.Cluster.Create(cluster)
Expect(err).To(BeNil())

return clusterResp, err
}

// ImportAKSHostedCluster imports an AKS cluster to Rancher
func ImportAKSHostedCluster(client *rancher.Client, clusterName, cloudCredentialID, location string, tags map[string]string) (*management.Cluster, error) {
cluster := &management.Cluster{
DockerRootDir: "/var/lib/docker",
AKSConfig: &management.AKSClusterConfigSpec{
AzureCredentialSecret: cloudCredentialID,
ClusterName: clusterName,
Imported: true,
ResourceLocation: location,
ResourceGroup: clusterName,
Tags: tags,
},
Name: clusterName,
}

clusterResp, err := client.Management.Cluster.Create(cluster)
Expect(err).To(BeNil())

return clusterResp, err
}

// DeleteAKSHostCluster deletes the AKS cluster
func DeleteAKSHostCluster(cluster *management.Cluster, client *rancher.Client) error {
return client.Management.Cluster.Delete(cluster)
}

// UpgradeClusterKubernetesVersion upgrades the k8s version to the value defined by upgradeToVersion;
// if checkClusterConfig is set to true, it will validate that the cluster control plane has been upgrade successfully
func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, checkClusterConfig bool) (*management.Cluster, error) {
upgradedCluster := cluster
currentVersion := *cluster.AKSConfig.KubernetesVersion
upgradedCluster := new(management.Cluster)
upgradedCluster.Name = cluster.Name
upgradedCluster.AKSConfig = cluster.AKSConfig
upgradedCluster.AKSConfig.KubernetesVersion = &upgradeToVersion

var err error
cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster)
if err != nil {
return nil, err
}
Expect(err).To(BeNil())

if checkClusterConfig {
// Check if the desired config is set correctly
Expand All @@ -60,7 +131,7 @@ func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersi
Eventually(func() string {
ginkgo.GinkgoLogr.Info("Waiting for k8s upgrade to appear in AKSStatus.UpstreamSpec ...")
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
Expect(err).To(BeNil())
return *cluster.AKSStatus.UpstreamSpec.KubernetesVersion
}, tools.SetTimeout(10*time.Minute), 5*time.Second).Should(Equal(upgradeToVersion))
// ensure nodepool version is same in Rancher
Expand All @@ -76,17 +147,13 @@ func UpgradeClusterKubernetesVersion(cluster *management.Cluster, upgradeToVersi
// if wait is set to true, it will wait until the cluster finishes upgrading;
// if checkClusterConfig is set to true, it will validate that nodepool has been upgraded successfully
func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion string, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) {
upgradedCluster := new(management.Cluster)
upgradedCluster.Name = cluster.Name
upgradedCluster.AKSConfig = cluster.AKSConfig
upgradedCluster := cluster
for i := range upgradedCluster.AKSConfig.NodePools {
upgradedCluster.AKSConfig.NodePools[i].OrchestratorVersion = &upgradeToVersion
}
var err error
cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster)
if err != nil {
return nil, err
}
Expect(err).To(BeNil())

if checkClusterConfig {
// Check if the desired config is set correctly
Expand Down Expand Up @@ -117,62 +184,6 @@ func UpgradeNodeKubernetesVersion(cluster *management.Cluster, upgradeToVersion
return cluster, nil
}

func CreateAKSHostedCluster(client *rancher.Client, cloudCredentialID, clusterName, k8sVersion, location string, tags map[string]string) (*management.Cluster, error) {
var aksClusterConfig aks.ClusterConfig
config.LoadConfig(aks.AKSClusterConfigConfigurationFileKey, &aksClusterConfig)
var aksNodePools []management.AKSNodePool
for _, aksNodePoolConfig := range *aksClusterConfig.NodePools {
aksNodePool := management.AKSNodePool{
AvailabilityZones: aksNodePoolConfig.AvailabilityZones,
Count: aksNodePoolConfig.NodeCount,
EnableAutoScaling: aksNodePoolConfig.EnableAutoScaling,
MaxPods: aksNodePoolConfig.MaxPods,
MaxCount: aksNodePoolConfig.MaxCount,
MinCount: aksNodePoolConfig.MinCount,
Mode: aksNodePoolConfig.Mode,
Name: aksNodePoolConfig.Name,
OrchestratorVersion: &k8sVersion,
OsDiskSizeGB: aksNodePoolConfig.OsDiskSizeGB,
OsDiskType: aksNodePoolConfig.OsDiskType,
OsType: aksNodePoolConfig.OsType,
VMSize: aksNodePoolConfig.VMSize,
}
aksNodePools = append(aksNodePools, aksNodePool)
}

cluster := &management.Cluster{
AKSConfig: &management.AKSClusterConfigSpec{
AzureCredentialSecret: cloudCredentialID,
ClusterName: clusterName,
DNSPrefix: pointer.String(clusterName + "-dns"),
Imported: false,
KubernetesVersion: &k8sVersion,
LinuxAdminUsername: aksClusterConfig.LinuxAdminUsername,
LoadBalancerSKU: aksClusterConfig.LoadBalancerSKU,
NetworkPlugin: aksClusterConfig.NetworkPlugin,
NodePools: aksNodePools,
PrivateCluster: aksClusterConfig.PrivateCluster,
ResourceGroup: clusterName,
ResourceLocation: location,
Tags: tags,
},
DockerRootDir: "/var/lib/docker",
Name: clusterName,
}

clusterResp, err := client.Management.Cluster.Create(cluster)
if err != nil {
return nil, err
}

return clusterResp, err
}

// DeleteAKSHostCluster deletes the AKS cluster
func DeleteAKSHostCluster(cluster *management.Cluster, client *rancher.Client) error {
return client.Management.Cluster.Delete(cluster)
}

// ListSingleVariantAKSAvailableVersions returns a list of single variants of minor versions
// For e.g 1.27.5, 1.26.6, 1.25.8
func ListSingleVariantAKSAvailableVersions(client *rancher.Client, cloudCredentialID, region string) (availableVersions []string, err error) {
Expand Down Expand Up @@ -210,12 +221,9 @@ func GetK8sVersionVariantAKS(minorVersion string, client *rancher.Client, cloudC
// AddNodePool adds a nodepool to the list; if wait is set to true, it will wait until the cluster finishes upgrading;
// if checkClusterConfig is set to true, it will validate that nodepool has been added successfully
func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) {
upgradedCluster := cluster
currentNodePoolNumber := len(cluster.AKSConfig.NodePools)

upgradedCluster := new(management.Cluster)
upgradedCluster.Name = cluster.Name
upgradedCluster.AKSConfig = cluster.AKSConfig

updateNodePoolsList := cluster.AKSConfig.NodePools
for i := 1; i <= increaseBy; i++ {
for _, np := range cluster.AKSConfig.NodePools {
Expand All @@ -233,9 +241,7 @@ func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Cl

var err error
cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster)
if err != nil {
return nil, err
}
Expect(err).To(BeNil())

if checkClusterConfig {
// Check if the desired config is set correctly
Expand Down Expand Up @@ -271,17 +277,13 @@ func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Cl
func DeleteNodePool(cluster *management.Cluster, client *rancher.Client, wait, checkClusterConfig bool) (*management.Cluster, error) {
currentNodePoolNumber := len(cluster.AKSConfig.NodePools)

upgradedCluster := new(management.Cluster)
upgradedCluster.Name = cluster.Name
upgradedCluster.AKSConfig = cluster.AKSConfig
upgradedCluster := cluster
updatedNodePoolsList := cluster.AKSConfig.NodePools[:1]
upgradedCluster.AKSConfig.NodePools = updatedNodePoolsList

var err error
cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster)
if err != nil {
return nil, err
}
Expect(err).To(BeNil())

if checkClusterConfig {
// Check if the desired config is set correctly
Expand Down Expand Up @@ -314,18 +316,14 @@ func DeleteNodePool(cluster *management.Cluster, client *rancher.Client, wait, c
// if wait is set to true, it will wait until the cluster finishes upgrading;
// if checkClusterConfig is set to true, it will validate that nodepool has been scaled successfully
func ScaleNodePool(cluster *management.Cluster, client *rancher.Client, nodeCount int64, wait, checkClusterConfig bool) (*management.Cluster, error) {
upgradedCluster := new(management.Cluster)
upgradedCluster.Name = cluster.Name
upgradedCluster.AKSConfig = cluster.AKSConfig
upgradedCluster := cluster
for i := range upgradedCluster.AKSConfig.NodePools {
upgradedCluster.AKSConfig.NodePools[i].Count = pointer.Int64(nodeCount)
}

var err error
cluster, err = client.Management.Cluster.Update(cluster, &upgradedCluster)
if err != nil {
return nil, err
}
Expect(err).To(BeNil())

if checkClusterConfig {
// Check if the desired config is set correctly
Expand Down Expand Up @@ -423,28 +421,6 @@ func DeleteAKSClusteronAzure(clusterName string) error {
return nil
}

// ImportAKSHostedCluster imports an AKS cluster to Rancher
func ImportAKSHostedCluster(client *rancher.Client, clusterName, cloudCredentialID, location string, tags map[string]string) (*management.Cluster, error) {
cluster := &management.Cluster{
DockerRootDir: "/var/lib/docker",
AKSConfig: &management.AKSClusterConfigSpec{
AzureCredentialSecret: cloudCredentialID,
ClusterName: clusterName,
Imported: true,
ResourceLocation: location,
ResourceGroup: clusterName,
Tags: tags,
},
Name: clusterName,
}

clusterResp, err := client.Management.Cluster.Create(cluster)
if err != nil {
return nil, err
}
return clusterResp, err
}

// defaultAKS returns the default AKS version used by Rancher; if forUpgrade is true, it returns the second-highest minor k8s version
func defaultAKS(client *rancher.Client, cloudCredentialID, region string, forUpgrade bool) (defaultAKS string, err error) {
url := fmt.Sprintf("%s://%s/meta/aksVersions", "https", client.RancherConfig.Host)
Expand Down
Loading

0 comments on commit def47f2

Please sign in to comment.