Skip to content

Commit

Permalink
fix evaluator weight load
Browse files Browse the repository at this point in the history
  • Loading branch information
IHIaadj committed Jun 20, 2023
1 parent 4018d05 commit cf2c70e
Show file tree
Hide file tree
Showing 7 changed files with 74 additions and 32 deletions.
5 changes: 3 additions & 2 deletions analogainas/evaluators/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

from sklearn.preprocessing import StandardScaler
from analognas.evaluators import Evaluator
from analognas.utils import accuracy_mse

Expand Down Expand Up @@ -94,7 +94,8 @@ def fit(self, xtrain, ytrain,
self.mean = np.mean(ytrain)
self.std = np.std(ytrain)

# TODO: Add encoding
scaler = StandardScaler()
_xtrain = scaler.fit_transform(xtrain)

_xtrain = xtrain
_ytrain = np.array(ytrain)
Expand Down
22 changes: 15 additions & 7 deletions analogainas/evaluators/prepare_data.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,28 @@
import numpy as np
import numpy as np
from numpy import genfromtxt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

class AccuracyDataLoader:
def __init__(self, dataset_file="dataset_cifar10.csv", transforms=None):
self.dataset_file = dataset_file
self.data = genfromtxt(self.dataset_file, delimiter=',')

# Applies encoding
if transforms != None:
if transforms is not None:
self.data = transforms(self.data)

def get_train(self):
def get_train(self):
X = self.data[1:24]
y = self.data[27]
slope = self.data[26]-self.data[-1]

x_train, y_train, x_test, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
return (x_train, y_train), (x_test,y_test), slope
slope = self.data[26] - self.data[-1]

# Split the data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

# Scale the data using StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

return (X_train, y_train), (X_test, y_test), slope
14 changes: 10 additions & 4 deletions analogainas/evaluators/xgboost.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@ def __init__(
model_type = "XGBRanker",
load_weight = True,
hpo_wrapper=False,
hparams_from_file=False
hparams_from_file=False,
avm_predictor_path = "weights\surrogate_xgboost_avm.json",
std_predictor_path = "weights\surrogate_xgboost_std.json",
ranker_path = "weights\surrogate_xgboost_ranker.json"
):
self.model_type = model_type
self.hpo_wrapper = hpo_wrapper
Expand All @@ -34,23 +37,26 @@ def __init__(
self.ranker = self.get_ranker()
self.avm_predictor = self.get_avm_predictor()
self.std_predictor = self.get_std_predictor()
self.ranker_path = ranker_path
self.avm_predictor_path = avm_predictor_path
self.std_predictor_path = std_predictor_path

def get_ranker(self):
ranker = xgb.XGBRanker(**self.default_hyperparams)
if self.load_weight == True:
ranker.load_model(r"C:\Users\hadjer\analog-nas\analogainas\evaluators\weights\surrogate_xgboost_ranker.json")
ranker.load_model(self.ranker_path)
return ranker

def get_avm_predictor(self):
avm_predictor = xgb.XGBRegressor()
if self.load_weight == True:
avm_predictor.load_model(r"C:\Users\hadjer\analog-nas\analogainas\evaluators\weights\surrogate_xgboost_avm.json")
avm_predictor.load_model(self.avm_predictor_path)
return avm_predictor

def get_std_predictor(self):
std_predictor = xgb.XGBRegressor()
if self.load_weight == True:
std_predictor.load_model(r"C:\Users\hadjer\analog-nas\analogainas\evaluators\weights\surrogate_xgboost_std.json")
std_predictor.load_model(self.std_predictor_path)
return std_predictor

def fit(self, x_train, y_train, train_info_file="xgboost.txt", hyperparameters=None, epochs=500, verbose=True):
Expand Down
28 changes: 13 additions & 15 deletions analogainas/search_algorithms/ea.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,8 @@
import random
from analogainas.search_spaces.sample import random_sample


class EAOptimizer:
def __init__(self,
max_iter,
population_size,
mutation_prob):
def __init__(self, max_iter, population_size, mutation_prob):
self.max_iter = max_iter
self.population_size = population_size
self.mutation_prob = mutation_prob
Expand All @@ -18,17 +14,19 @@ def mutate(self, architecture):
return architecture

def run(self):
D = [self.cs.sample()]*self.population_size
D = [self.cs.sample() for _ in range(self.population_size)]
best_f = 0.0
best_x = [None]*D
best_x = [None] * self.population_size

for i in range(self.n_iter):
# use an "operator" to generate a new candidate solution
# this is "uniform mutation" in MOEA lin
new_x = self.mutate(D)
new_f = self.evaluation(new_x)
if new_f > best_f:
best_f = new_f
best_x = new_x
for _ in range(self.max_iter):
new_x = [self.mutate(x) for x in D]
new_f = [self.evaluation(x) for x in new_x]

for j in range(self.population_size):
if new_f[j] > best_f:
best_f = new_f[j]
best_x = new_x[j]

D = new_x

return {'best_x': best_x, 'best_f': best_f}
35 changes: 33 additions & 2 deletions analogainas/search_spaces/sample.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""High level Sampling strategies."""
import random
import numpy as np
from analogainas.search_spaces.resnet_macro_architecture import Network
from analogainas.search_spaces.config_space import ConfigSpace
from analogainas.search_algorithms.worker import Worker
Expand All @@ -9,6 +10,36 @@
EPOCHS = 40
LEARNING_RATE = 0.05

def latin_hypercube_sample(dataset, n):
"""Latin Hypercube Sampling of n architectures from ConfigSpace."""
cs = ConfigSpace(dataset)
num_parameters = len(cs.get_hyperparameters())
ranges = np.arange(0, 1, 1/n)

sampled_architectures = []
for _ in range(n):
config = {}
for i, hyperparameter in enumerate(cs.get_hyperparameters()):
min_val, max_val = hyperparameter.lower, hyperparameter.upper
val_range = max_val - min_val
offset = random.uniform(0, val_range/n)
config[hyperparameter.name] = min_val + ranges[_] * val_range + offset
sampled_architectures.append(config)

keys = sampled_architectures[0].keys()

for config in sampled_architectures:
model = Network(config)
model_name = "resnet_{}_{}".format(config["M"], get_nb_convs(config))

with open("./configs/"+model_name+".config",
'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(config)

train(model, model_name, LEARNING_RATE, EPOCHS)


def random_sample(dataset, n):
"""Randomly samples n architectures from ConfigSpace."""
Expand All @@ -35,4 +66,4 @@ def ea_sample(dataset, n, n_iter):
using an evolutionary algorithm."""
cs = ConfigSpace(dataset)
worker = Worker(dataset, cs, 3, n_iter)
worker.search(population_size=n)
worker.search(population_size=n)
1 change: 0 additions & 1 deletion analogainas/search_spaces/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
continue_analog = True
device = 'cuda' if torch.cuda.is_available() else 'cpu'


def create_rpu_config(g_max=25,
tile_size=256,
dac_res=256,
Expand Down
1 change: 0 additions & 1 deletion analogainas/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

ALPHA = 0.05


def accuracy_mse(prediction, target, scale=100.0):
prediction = prediction.detach() * scale
target = (target) * scale
Expand Down

0 comments on commit cf2c70e

Please sign in to comment.