Skip to content
Snippets Groups Projects
utils.py 2.96 KiB
Newer Older
Nikolai.Hartmann's avatar
Nikolai.Hartmann committed
"Helper functions using keras or tensorflow"

import logging

import numpy as np

import keras.backend as K

from meme import cache

logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())

def get_single_neuron_function(model, layer, neuron, scaler=None):
Nikolai.Hartmann's avatar
Nikolai.Hartmann committed

    f = K.function([model.input]+[K.learning_phase()], [model.layers[layer].output[:,neuron]])

    def eval_single_neuron(x):
        if scaler is not None:
            x_eval = scaler.transform(x)
        else:
            x_eval = x
        return f([x_eval])[0]

    return eval_single_neuron


def create_random_event(ranges):
    random_event = np.array([p[0]+(p[1]-p[0])*np.random.rand() for p in ranges])
    random_event = random_event.reshape(-1, len(random_event))
    return random_event


def max_activation_wrt_input(gradient_function, random_event, threshold=None, maxthreshold=None, maxit=100, step=1, const_indices=[]):
    for i in range(maxit):
        loss_value, grads_value = gradient_function([random_event])
        for const_index in const_indices:
            grads_value[0][const_index] = 0
        if threshold is not None:
            if loss_value > threshold and (maxthreshold is None or loss_value < maxthreshold):
                # found an event within the thresholds
                return loss_value, random_event
            elif (maxthreshold is not None and loss_value > maxthreshold):
                random_event -= grads_value*step
            else:
                random_event += grads_value*step
        else:
            random_event += grads_value*step
    else:
        if threshold is not None:
            # no event found
            return None
    # if no threshold requested, always return last status
    return loss_value, random_event


def get_grad_function(model, layer, neuron):

    loss = model.layers[layer].output[:,neuron]

    grads = K.gradients(loss, model.input)[0]

    # trick from https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

    return K.function([model.input], [loss, grads])


@cache(useJSON=True,
       argHashFunctions=[
           lambda model: [hash(i.tostring()) for i in model.get_weights()],
           lambda ranges: [hash(i.tostring()) for i in ranges],
       ],
)
def get_max_activation_events(model, ranges, ntries, layer, neuron, seed=42, **kwargs):

    gradient_function = get_grad_function(model, layer, neuron)

    events = None
    losses = None
    np.random.seed(seed)
    for i in range(ntries):
        if not (i%100):
            logger.info(i)
        res = max_activation_wrt_input(gradient_function, create_random_event(ranges), **kwargs)
        if res is not None:
            loss, event = res
        else:
            continue
        if events is None:
            events = event
            losses = loss
        else:
            events = np.concatenate([events, event])
            losses = np.concatenate([losses, loss])

    return losses, events