Newer
Older
import logging
import numpy as np
import keras.backend as K
from meme import cache
logger.addHandler(logging.NullHandler())
def get_single_neuron_function(model, layer, neuron, scaler=None):
f = K.function([model.input]+[K.learning_phase()], [model.layers[layer].output[:,neuron]])
def eval_single_neuron(x):
if scaler is not None:
x_eval = scaler.transform(x)
else:
x_eval = x
return f([x_eval])[0]
return eval_single_neuron
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def create_random_event(ranges):
random_event = np.array([p[0]+(p[1]-p[0])*np.random.rand() for p in ranges])
random_event = random_event.reshape(-1, len(random_event))
return random_event
def max_activation_wrt_input(gradient_function, random_event, threshold=None, maxthreshold=None, maxit=100, step=1, const_indices=[]):
for i in range(maxit):
loss_value, grads_value = gradient_function([random_event])
for const_index in const_indices:
grads_value[0][const_index] = 0
if threshold is not None:
if loss_value > threshold and (maxthreshold is None or loss_value < maxthreshold):
# found an event within the thresholds
return loss_value, random_event
elif (maxthreshold is not None and loss_value > maxthreshold):
random_event -= grads_value*step
else:
random_event += grads_value*step
else:
random_event += grads_value*step
else:
if threshold is not None:
# no event found
return None
# if no threshold requested, always return last status
return loss_value, random_event
def get_grad_function(model, layer, neuron):
loss = model.layers[layer].output[:,neuron]
grads = K.gradients(loss, model.input)[0]
# trick from https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
return K.function([model.input], [loss, grads])
@cache(useJSON=True,
argHashFunctions=[
lambda model: [hash(i.tostring()) for i in model.get_weights()],
lambda ranges: [hash(i.tostring()) for i in ranges],
],
)
def get_max_activation_events(model, ranges, ntries, layer, neuron, seed=42, **kwargs):
gradient_function = get_grad_function(model, layer, neuron)
events = None
losses = None
for i in range(ntries):
if not (i%100):
logger.info(i)
res = max_activation_wrt_input(gradient_function, create_random_event(ranges), **kwargs)
if res is not None:
loss, event = res
else:
continue
if events is None:
events = event
losses = loss
else:
events = np.concatenate([events, event])
losses = np.concatenate([losses, loss])
return losses, events