From 4fa1fcff31d31c106998ae63ffab7a2f5f0d6909 Mon Sep 17 00:00:00 2001
From: Nikolai Hartmann <Nikolai.Hartmann@physik.uni-muenchen.de>
Date: Fri, 3 Aug 2018 17:56:50 +0200
Subject: [PATCH] more config options for plotting

---
 plotting.py                   |  4 ++-
 scripts/plot_single_neuron.py | 55 ++++++++++++++++++++++++++++-------
 tfhelpers.py                  |  3 +-
 3 files changed, 50 insertions(+), 12 deletions(-)

diff --git a/plotting.py b/plotting.py
index 0511052..01061b2 100644
--- a/plotting.py
+++ b/plotting.py
@@ -255,6 +255,8 @@ def plot_cond_avg_actmax_2D(plotname, model, layer, neuron, ranges,
                             nbinsx, xmin, xmax, nbinsy, ymin, ymax,
                             scaler=None,
                             ntries=20,
+                            step=1,
+                            maxit=1,
                             **kwargs):
 
     xedges = np.linspace(xmin, xmax, nbinsx)
@@ -273,7 +275,7 @@ def plot_cond_avg_actmax_2D(plotname, model, layer, neuron, ranges,
                 random_event[0][index] = val
             if scaler is not None:
                 random_event = scaler.transform(random_event)
-            act = np.mean([max_activation_wrt_input(gradient_function, random_event, maxit=1, const_indices=[varx_index, vary_index])[0][0] for i in range(ntries)])
+            act = np.mean([max_activation_wrt_input(gradient_function, random_event, maxit=maxit, step=step, const_indices=[varx_index, vary_index])[0][0] for i in range(ntries)])
             hist[ix][iy] = act
 
     hist = hist.T
diff --git a/scripts/plot_single_neuron.py b/scripts/plot_single_neuron.py
index ce6728f..59819d1 100755
--- a/scripts/plot_single_neuron.py
+++ b/scripts/plot_single_neuron.py
@@ -2,6 +2,8 @@
 
 import sys
 import argparse
+import logging
+logging.basicConfig()
 
 import numpy as np
 
@@ -13,7 +15,7 @@ from KerasROOTClassification.plotting import (
     plot_hist_2D_events,
     plot_cond_avg_actmax_2D
 )
-from KerasROOTClassification.tfhelpers import get_single_neuron_function
+from KerasROOTClassification.tfhelpers import get_single_neuron_function, get_max_activation_events
 
 parser = argparse.ArgumentParser(description='Create various 2D plots for a single neuron')
 parser.add_argument("project_dir")
@@ -21,7 +23,7 @@ parser.add_argument("output_filename")
 parser.add_argument("varx")
 parser.add_argument("vary")
 parser.add_argument("-m", "--mode",
-                    choices=["mean_sig", "mean_bkg", "profile_sig", "profile_bkg", "hist_sig", "hist_bkg", "cond_actmax"],
+                    choices=["mean_sig", "mean_bkg", "profile_sig", "profile_bkg", "hist_sig", "hist_bkg", "hist_actmax", "cond_actmax"],
                     default="mean_sig")
 parser.add_argument("-l", "--layer", type=int, help="Layer index (takes last layer by default)")
 parser.add_argument("-n", "--neuron", type=int, default=0, help="Neuron index (takes first neuron by default)")
@@ -32,11 +34,21 @@ parser.add_argument("-x", "--xrange", type=float, nargs="+", help="xrange (low,
 parser.add_argument("-y", "--yrange", type=float, nargs="+", help="yrange (low, high)")
 parser.add_argument("-p", "--profile-metric", help="metric for profile modes", default="average", choices=["mean", "average", "max"])
 parser.add_argument("--ntries-cond-actmax", help="number of random events to be maximised and averaged per bin", default=20, type=int)
+parser.add_argument("--nit-cond-actmax", help="number of iterations for maximisation per bin", default=1, type=int)
+parser.add_argument("--ntries-actmax", help="number of random events to be maximised for hist_actmax", default=10000, type=int)
+parser.add_argument("-t", "--threshold", help="minimum activation threshold", default=0.2, type=float)
+parser.add_argument("-v", "--verbose", action="store_true")
+parser.add_argument("-s", "--step-size", help="step size for activation maximisation", default=1., type=float)
 
 args = parser.parse_args()
 
+if args.verbose:
+    logging.getLogger().setLevel(logging.DEBUG)
+
 c = ClassificationProject(args.project_dir)
 
+plot_vs_activation = (args.vary == "activation")
+
 layer = args.layer
 neuron = args.neuron
 
@@ -44,7 +56,10 @@ if layer is None:
     layer = c.layers
 
 varx_index = c.branches.index(args.varx)
-vary_index = c.branches.index(args.vary)
+if not plot_vs_activation:
+    vary_index = c.branches.index(args.vary)
+else:
+    vary_index = 0 # dummy value in this case
 
 varx_label = args.varx
 vary_label = args.vary
@@ -65,6 +80,8 @@ if args.yrange is not None:
     vary_range = args.yrange
 else:
     vary_range = (percentilesy[0], percentilesy[1], args.nbins)
+    if plot_vs_activation:
+        vary_range = (0, 1, args.nbins)
 
 if args.mode.startswith("mean"):
 
@@ -119,14 +136,30 @@ elif args.mode.startswith("profile"):
 
 elif args.mode.startswith("hist"):
 
-    if args.mode == "hist_sig":
-        class_index = 1
+    if not args.mode == "hist_actmax":
+        if args.mode == "hist_sig":
+            class_index = 1
+        else:
+            class_index = 0
+
+        valsx = c.x_test[c.y_test==class_index][:,varx_index]
+        if not plot_vs_activation:
+            valsy = c.x_test[c.y_test==class_index][:,vary_index]
+        else:
+            valsy = c.scores_test[c.y_test==class_index].reshape(-1)
+        weights = c.w_test[c.y_test==class_index]
     else:
-        class_index = 0
-
-    valsx = c.x_test[c.y_test==class_index][:,varx_index]
-    valsy = c.x_test[c.y_test==class_index][:,vary_index]
-    weights = c.w_test[c.y_test==class_index]
+        # ranges in which to sample the random events
+        x_test_scaled = c.scaler.transform(c.x_test)
+        ranges = [np.percentile(x_test_scaled[:,var_index], [1,99]) for var_index in range(len(c.branches))]
+        losses, events = get_max_activation_events(c.model, ranges, ntries=args.ntries_actmax, step=args.step_size, layer=layer, neuron=neuron, threshold=args.threshold)
+        events = c.scaler.inverse_transform(events)
+        valsx = events[:,varx_index]
+        if not plot_vs_activation:
+            valsy = events[:,vary_index]
+        else:
+            valsy = losses
+        weights = None
 
     plot_hist_2D_events(
         args.output_filename,
@@ -154,6 +187,8 @@ elif args.mode.startswith("cond_actmax"):
         ymin=vary_range[0], ymax=vary_range[1], nbinsy=vary_range[2],
         scaler=c.scaler,
         ntries=args.ntries_cond_actmax,
+        maxit=args.nit_cond_actmax,
+        step=args.step_size,
         varx_label=varx_label, vary_label=vary_label,
         log=args.log,
     )
diff --git a/tfhelpers.py b/tfhelpers.py
index d31fe79..8f3c7b4 100644
--- a/tfhelpers.py
+++ b/tfhelpers.py
@@ -72,12 +72,13 @@ def get_grad_function(model, layer, neuron):
            lambda ranges: [hash(i.tostring()) for i in ranges],
        ],
 )
-def get_max_activation_events(model, ranges, ntries, layer, neuron, **kwargs):
+def get_max_activation_events(model, ranges, ntries, layer, neuron, seed=42, **kwargs):
 
     gradient_function = get_grad_function(model, layer, neuron)
 
     events = None
     losses = None
+    np.random.seed(seed)
     for i in range(ntries):
         if not (i%100):
             logger.info(i)
-- 
GitLab