From ab3785adc4d72179e0c59f6adfa51d4376a31c07 Mon Sep 17 00:00:00 2001
From: Nikolai Hartmann <Nikolai.Hartmann@physik.uni-muenchen.de>
Date: Wed, 19 Sep 2018 13:43:41 +0200
Subject: [PATCH] Switch to functional interface and make usage of bias vectors
 configurable

---
 toolkit.py | 40 +++++++++++++++++++++++++++-------------
 1 file changed, 27 insertions(+), 13 deletions(-)

diff --git a/toolkit.py b/toolkit.py
index 65c526f..b224fed 100755
--- a/toolkit.py
+++ b/toolkit.py
@@ -124,6 +124,8 @@ class ClassificationProject(object):
 
     :param dropout_input: dropout fraction for the input layer. Set to None for no Dropout.
 
+    :param use_bias: use bias constant for each neuron? (default: True). You can also pass a list of booleans for each layer.
+
     :param batch_size: size of the training batches
 
     :param validation_split: split off this fraction of training events for loss evaluation
@@ -229,6 +231,7 @@ class ClassificationProject(object):
                         nodes=64,
                         dropout=None,
                         dropout_input=None,
+                        use_bias=True,
                         batch_size=128,
                         validation_split=0.33,
                         kfold_splits=None,
@@ -288,9 +291,14 @@ class ClassificationProject(object):
         self.dropout = dropout
         if not isinstance(self.dropout, list):
             self.dropout = [self.dropout for i in range(self.layers)]
+        self.dropout_input = dropout_input
+        self.use_bias = use_bias
+        if not isinstance(self.use_bias, list):
+            self.use_bias = [self.use_bias for i in range(self.layers)]
         if len(self.dropout) != self.layers:
             raise ValueError("List of dropout fractions has to be of equal size as the number of layers!")
-        self.dropout_input = dropout_input
+        if len(self.use_bias) != self.layers:
+            raise ValueError("List biases has to be of equal size as the number of layers!")
         self.batch_size = batch_size
         self.validation_split = validation_split
         self.kfold_splits = kfold_splits
@@ -703,23 +711,29 @@ class ClassificationProject(object):
 
         if self._model is None:
 
-            self._model = Sequential()
+            # input
+            input_layer = Input((len(self.fields),))
 
+            # optional dropout on inputs
             if self.dropout_input is None:
-                self._model.add(Dense(self.nodes[0], input_dim=len(self.fields), activation=self.activation_function))
-                # in case of no Dropout we already have the first hidden layer
-                start_layer = 1
+                hidden_layer = input_layer
             else:
-                self._model.add(Dropout(rate=self.dropout_input, input_shape=(len(self.fields),)))
-                start_layer = 0
-            # the (other) hidden layers
-            for node_count, dropout_fraction in zip(self.nodes[start_layer:], self.dropout[start_layer:]):
-                self._model.add(Dense(node_count, activation=self.activation_function))
+                hidden_layer = Dropout(rate=self.dropout_input)(input_layer)
+
+            # densely connected hidden layers
+            for node_count, dropout_fraction, use_bias in zip(
+                    self.nodes,
+                    self.dropout,
+                    self.use_bias,
+            ):
+                hidden_layer = Dense(node_count, activation=self.activation_function, use_bias=use_bias)(hidden_layer)
                 if (dropout_fraction is not None) and (dropout_fraction > 0):
-                    self._model.add(Dropout(rate=dropout_fraction))
-            # last layer is one neuron (binary classification)
-            self._model.add(Dense(1, activation=self.activation_function_output))
+                    hidden_layer = Dropout(rate=dropout_fraction)(hidden_layer)
+
+            # one output node for binary classification
+            output_layer = Dense(1, activation=self.activation_function_output)(hidden_layer)
 
+            self._model = Model(inputs=[input_layer], outputs=[output_layer])
             self._compile_or_load_model()
 
         return self._model
-- 
GitLab