Skip to content
Snippets Groups Projects
Commit 391e7e92 authored by Nikolai's avatar Nikolai
Browse files

option to evaluate before activation

parent 290a8013
No related branches found
No related tags found
No related merge requests found
......@@ -38,6 +38,7 @@ from keras.callbacks import History, EarlyStopping, CSVLogger, ModelCheckpoint,
from keras.optimizers import SGD
import keras.optimizers
from keras.utils.vis_utils import model_to_dot
from keras import backend as K
import matplotlib.pyplot as plt
from .utils import WeightedRobustScaler, weighted_quantile, poisson_asimov_significance
......@@ -896,24 +897,42 @@ class ClassificationProject(object):
self._write_info("epochs", self.total_epochs)
def evaluate_train_test(self, do_train=True, do_test=True):
def evaluate_train_test(self, do_train=True, do_test=True, mode=None):
logger.info("Reloading (and re-transforming) unshuffled training data")
self.load(reload=True)
if mode is not None:
self._write_info("scores_mode", mode)
logger.info("Create/Update scores for train/test sample")
if do_test:
self.scores_test = self.model.predict(self.x_test)
self.scores_test = self.predict(self.x_test, mode=mode)
self._dump_to_hdf5("scores_test")
if do_train:
self.scores_train = self.model.predict(self.x_train)
self.scores_train = self.predict(self.x_train, mode=mode)
self._dump_to_hdf5("scores_train")
def evaluate(self, x_eval):
def predict(self, x, mode=None):
if mode is None:
# normal output - after activation function output layer
return self.model.predict(x)
elif mode == "skip_activation":
# output before applying activation function
# (after weighted sum + bias of last hidden layer)
return K.get_session().run(
self.model.output.op.inputs[0],
feed_dict={self.model.input : x}
)
else:
raise ValueError("Unknown mode {}".format(mode))
def evaluate(self, x_eval, mode=None):
logger.debug("Evaluate score for {}".format(x_eval))
x_eval = self.scaler.transform(x_eval)
logger.debug("Evaluate for transformed array: {}".format(x_eval))
return self.model.predict(x_eval)
return self.predict(x_eval, mode=mode)
def write_friend_tree(self, score_name,
......@@ -1623,17 +1642,25 @@ class ClassificationProjectRNN(ClassificationProject):
return x_val_input, y_val, w_val
def evaluate_train_test(self, do_train=True, do_test=True, batch_size=10000):
def evaluate_train_test(self, do_train=True, do_test=True, batch_size=10000, mode=None):
logger.info("Reloading (and re-transforming) unshuffled training data")
self.load(reload=True)
if mode is not None:
self._write_info("scores_mode", mode)
def eval_score(data_name):
logger.info("Create/Update scores for {} sample".format(data_name))
n_events = len(getattr(self, "x_"+data_name))
setattr(self, "scores_"+data_name, np.empty(n_events))
for start in range(0, n_events, batch_size):
stop = start+batch_size
getattr(self, "scores_"+data_name)[start:stop] = self.model.predict(self.get_input_list(getattr(self, "x_"+data_name)[start:stop])).reshape(-1)
getattr(self, "scores_"+data_name)[start:stop] = (
self.predict(
self.get_input_list(getattr(self, "x_"+data_name)[start:stop]),
mode=mode
).reshape(-1)
)
self._dump_to_hdf5("scores_"+data_name)
if do_test:
......@@ -1642,14 +1669,14 @@ class ClassificationProjectRNN(ClassificationProject):
eval_score("train")
def evaluate(self, x_eval):
def evaluate(self, x_eval, mode=None):
logger.debug("Evaluate score for {}".format(x_eval))
x_eval = np.array(x_eval) # copy
x_eval[x_eval==self.mask_value] = np.nan
x_eval = self.scaler.transform(x_eval)
x_eval[np.isnan(x_eval)] = self.mask_value
logger.debug("Evaluate for transformed array: {}".format(x_eval))
return self.model.predict(self.get_input_list(x_eval))
return self.predict(self.get_input_list(x_eval), mode=mode)
if __name__ == "__main__":
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment