diff --git a/toolkit.py b/toolkit.py
index 5fd767425e5fff17aea994b7ce47a86aae3817e0..5280d07aedec891605415b0283fd12882bbc1017 100755
--- a/toolkit.py
+++ b/toolkit.py
@@ -45,21 +45,8 @@ import matplotlib.pyplot as plt
 from .utils import WeightedRobustScaler, weighted_quantile, poisson_asimov_significance
 from .plotting import save_show
 
-# configure number of cores
-# this doesn't seem to work, but at least with these settings keras only uses 4 processes
-# import tensorflow as tf
-# from keras import backend as K
-# num_cores = 1
-# config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,
-#                         inter_op_parallelism_threads=num_cores,
-#                         allow_soft_placement=True,
-#                         device_count = {'CPU': num_cores})
-# session = tf.Session(config=config)
-# K.set_session(session)
-
 import ROOT
 
-
 def byteify(input):
     "From stackoverflow https://stackoverflow.com/a/13105359"
     if isinstance(input, dict):
@@ -1328,7 +1315,7 @@ class ClassificationProject(object):
         return lambda y : np.log(y/(1-y))
 
 
-    def plot_significance(self, significance_function=None, maxsteps=1000, lumifactor=1., vectorized=False, invert_activation=False):
+    def plot_significance(self, significance_function=None, maxsteps=None, lumifactor=1., vectorized=False, invert_activation=False):
         """
         Plot the significance when cutting on all posible thresholds and plot against signal efficiency.
         """
@@ -1353,7 +1340,10 @@ class ClassificationProject(object):
         ):
             scores = trf(scores)
             s_sumws, s_errs, b_sumws, b_errs, thresholds = self.calc_s_ds_b_db(scores, y, w)
-            stepsize = int(len(s_sumws))/int(maxsteps)
+            if maxsteps is not None:
+                stepsize = int(len(s_sumws))/int(maxsteps)
+            else:
+                stepsize = 1
             if stepsize == 0:
                 stepsize = 1
             s_sumws = s_sumws[::stepsize]*lumifactor*self.step_signal