diff --git a/toolkit.py b/toolkit.py
index c6a2a917fae4e471b18ae21feb8ad362f0e98d05..6f2437876daf24ef80489dd2065c4a380336426e 100755
--- a/toolkit.py
+++ b/toolkit.py
@@ -108,7 +108,7 @@ class ClassificationProject(object):
 
     :param layers: number of layers in the neural network
 
-    :param nodes: number of nodes in each layer
+    :param nodes: list number of nodes in each layer. If only a single number is given, use this number for every layer
 
     :param dropout: dropout fraction after each hidden layer. Set to None for no Dropout
 
@@ -236,6 +236,12 @@ class ClassificationProject(object):
         self.identifiers = identifiers
         self.layers = layers
         self.nodes = nodes
+        if not isinstance(self.nodes, list):
+            self.nodes = [self.nodes for i in range(self.layers)]
+        if len(self.nodes) != self.layers:
+            self.layers = len(self.nodes)
+            logger.warning("Number of layers not equal to the given nodes "
+                           "per layer - adjusted to " + str(self.layers))
         self.dropout = dropout
         self.batch_size = batch_size
         self.validation_split = validation_split
@@ -583,10 +589,10 @@ class ClassificationProject(object):
             self._model = Sequential()
 
             # first hidden layer
-            self._model.add(Dense(self.nodes, input_dim=len(self.fields), activation=self.activation_function))
+            self._model.add(Dense(self.nodes[0], input_dim=len(self.fields), activation=self.activation_function))
             # the other hidden layers
-            for layer_number in range(self.layers-1):
-                self._model.add(Dense(self.nodes, activation=self.activation_function))
+            for node_count, layer_number in zip(self.nodes[1:], range(self.layers-1)):
+                self._model.add(Dense(node_count, activation=self.activation_function))
                 if self.dropout is not None:
                     self._model.add(Dropout(rate=self.dropout))
             # last layer is one neuron (binary classification)
diff --git a/utils.py b/utils.py
index adf213f7edb71086fa8e9148a77062565f098f0e..1da306ca79f3b350d9e12708c942254ddebea067 100644
--- a/utils.py
+++ b/utils.py
@@ -6,6 +6,7 @@ import numpy as np
 
 import keras.backend as K
 from sklearn.preprocessing import RobustScaler
+from sklearn.preprocessing.data import _handle_zeros_in_scale
 
 from meme import cache
 
@@ -140,5 +141,6 @@ class WeightedRobustScaler(RobustScaler):
             wqs = np.array([weighted_quantile(X[:,i], [0.25, 0.5, 0.75], sample_weight=weights) for i in range(X.shape[1])])
             self.center_ = wqs[:,1]
             self.scale_ = wqs[:,2]-wqs[:,0]
+            self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
             return self