diff --git a/keras-visualize-activations/.gitignore b/keras-visualize-activations/.gitignore
deleted file mode 100644
index b9d6d01b6f198cfe96bae995139f4e6d13966973..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/.gitignore
+++ /dev/null
@@ -1,103 +0,0 @@
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-.DS_Store
-.idea/
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-env/
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-
-# PyInstaller
-#  Usually these files are written by a python script from a template
-#  before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-.hypothesis/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# pyenv
-.python-version
-
-# celery beat schedule file
-celerybeat-schedule
-
-# SageMath parsed files
-*.sage.py
-
-# dotenv
-.env
-
-# virtualenv
-.venv
-venv/
-ENV/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
diff --git a/keras-visualize-activations/LICENSE b/keras-visualize-activations/LICENSE
deleted file mode 100644
index 8dada3edaf50dbc082c9a125058f25def75e625a..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/keras-visualize-activations/README.md b/keras-visualize-activations/README.md
deleted file mode 100644
index db3ead1bb98663fa32d7bc31a62a4d3124f76f11..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-# Extract the activation maps of your Keras models
-[![license](https://img.shields.io/badge/License-Apache_2.0-brightgreen.svg)](https://github.com/philipperemy/keras-attention-mechanism/blob/master/LICENSE) [![dep1](https://img.shields.io/badge/Tensorflow-1.2+-blue.svg)](https://www.tensorflow.org/) [![dep2](https://img.shields.io/badge/Keras-2.0+-blue.svg)](https://keras.io/) 
-
-*Short code and useful examples to show how to get the activations for each layer for Keras.*
-
-**-> Works for any kind of model (recurrent, convolutional, residuals...). Not only for images!**
-
-## Example of MNIST
-
-Shapes of the activations (one sample) on Keras CNN MNIST:
-```
------ activations -----
-(1, 26, 26, 32)
-(1, 24, 24, 64)
-(1, 12, 12, 64)
-(1, 12, 12, 64)
-(1, 9216)
-(1, 128)
-(1, 128)
-(1, 10) # softmax output!
-```
-
-Shapes of the activations (batch of 200 samples) on Keras CNN MNIST:
-```
------ activations -----
-(200, 26, 26, 32)
-(200, 24, 24, 64)
-(200, 12, 12, 64)
-(200, 12, 12, 64)
-(200, 9216)
-(200, 128)
-(200, 128)
-(200, 10)
-```
-
-<p align="center">
-  <img src="assets/0.png" width="50">
-  <br><i>A random seven from MNIST</i>
-</p>
-
-
-<p align="center">
-  <img src="assets/1.png">
-  <br><i>Activation map of CONV1 of LeNet</i>
-</p>
-
-<p align="center">
-  <img src="assets/2.png" width="200">
-  <br><i>Activation map of FC1 of LeNet</i>
-</p>
-
-
-<p align="center">
-  <img src="assets/3.png" width="300">
-  <br><i>Activation map of Softmax of LeNet. <b>Yes it's a seven!</b></i>
-</p>
-
-<hr/>
-
-The function for visualizing the activations is in the script [read_activations.py](https://github.com/philipperemy/keras-visualize-activations/blob/master/read_activations.py)
-
-Inputs:
-- `model`: Keras model
-- `model_inputs`: Model inputs for which we want to get the activations (for example 200 MNIST images)
-- `print_shape_only`: If set to True, will print the entire activations arrays (might be very verbose!)
-- `layer_name`: Will retrieve the activations of a specific layer, if the name matches one of the existing layers of the model.
-
-Outputs:
-- returns a list of each layer (by order of definition) and its corresponding activations.
-
-I provide a simple example to see how it works with the MNIST model. I separated the training and the visualizations because if the two were to be done sequentially, we would have to re-train the model every time we would like to visualize the activations! Not very practical! Here are the main steps:
-
-Running `python model_train.py` will do:
-
-- define the model
-- if no checkpoints are detected:
-  - train the model
-  - save the best model in checkpoints/
-- load the model from the best checkpoint
-- read the activations
-
-`model_multi_inputs_train.py` contains very simple examples to visualize activations with multi inputs models. 
diff --git a/keras-visualize-activations/assets/0.png b/keras-visualize-activations/assets/0.png
deleted file mode 100644
index bb63ae85c5e043c1e637304e220c0bff8d1e1ea3..0000000000000000000000000000000000000000
Binary files a/keras-visualize-activations/assets/0.png and /dev/null differ
diff --git a/keras-visualize-activations/assets/1.png b/keras-visualize-activations/assets/1.png
deleted file mode 100644
index b42ea9bfe6f93884ba9566ab2914952eef4bbf07..0000000000000000000000000000000000000000
Binary files a/keras-visualize-activations/assets/1.png and /dev/null differ
diff --git a/keras-visualize-activations/assets/2.png b/keras-visualize-activations/assets/2.png
deleted file mode 100644
index 9d8650e740c2c9dca5e73a2cc99c8bf1ad879f34..0000000000000000000000000000000000000000
Binary files a/keras-visualize-activations/assets/2.png and /dev/null differ
diff --git a/keras-visualize-activations/assets/3.png b/keras-visualize-activations/assets/3.png
deleted file mode 100644
index d13d209a8b436b336ce4ec8e66b325344f520172..0000000000000000000000000000000000000000
Binary files a/keras-visualize-activations/assets/3.png and /dev/null differ
diff --git a/keras-visualize-activations/checkpoints/model_09_0.990.h5 b/keras-visualize-activations/checkpoints/model_09_0.990.h5
deleted file mode 100644
index 0ba2c701786c2e104b2a736d48f22adadc8dbbae..0000000000000000000000000000000000000000
Binary files a/keras-visualize-activations/checkpoints/model_09_0.990.h5 and /dev/null differ
diff --git a/keras-visualize-activations/data.py b/keras-visualize-activations/data.py
deleted file mode 100644
index 76d5e78e52d465ad89bb420600f5da8e14873389..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/data.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import keras
-import keras.backend as K
-from keras.datasets import mnist
-
-# input image dimensions
-img_rows, img_cols = 28, 28
-input_shape = (img_rows, img_cols, 1)
-num_classes = 10
-
-
-def get_mnist_data():
-
-    # the data, shuffled and split between train and test sets
-    (x_train, y_train), (x_test, y_test) = mnist.load_data()
-
-    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
-    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
-
-    x_train = x_train.astype('float32')
-    x_test = x_test.astype('float32')
-    x_train /= 255
-    x_test /= 255
-    print('x_train shape:', x_train.shape)
-    print(x_train.shape[0], 'train samples')
-    print(x_test.shape[0], 'test samples')
-
-    # convert class vectors to binary class matrices
-    y_train = keras.utils.to_categorical(y_train, num_classes)
-    y_test = keras.utils.to_categorical(y_test, num_classes)
-    return x_train, y_train, x_test, y_test
\ No newline at end of file
diff --git a/keras-visualize-activations/model_multi_inputs_train.py b/keras-visualize-activations/model_multi_inputs_train.py
deleted file mode 100644
index d47c2ae537c2831cd6c4086da4b50ad2950fd930..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/model_multi_inputs_train.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import keras.backend as K
-import numpy as np
-from keras.layers import merge, Dense
-from keras.models import Input, Model, Sequential
-
-
-def get_multi_inputs_model():
-    a = Input(shape=(10,))
-    b = Input(shape=(10,))
-    c = merge([a, b], mode='mul')
-    c = Dense(1, activation='sigmoid', name='only_this_layer')(c)
-    m_multi = Model(inputs=[a, b], outputs=c)
-    return m_multi
-
-
-def get_single_inputs_model():
-    m_single = Sequential()
-    m_single.add(Dense(1, activation='sigmoid', input_shape=(10,)))
-    return m_single
-
-
-if __name__ == '__main__':
-
-    m = get_multi_inputs_model()
-    m.compile(optimizer='adam',
-              loss='binary_crossentropy')
-
-    inp_a = np.random.uniform(size=(100, 10))
-    inp_b = np.random.uniform(size=(100, 10))
-    inp_o = np.random.randint(low=0, high=2, size=(100, 1))
-    m.fit([inp_a, inp_b], inp_o)
-
-    from read_activations import *
-
-    get_activations(m, [inp_a[0:1], inp_b[0:1]], print_shape_only=True)
-    get_activations(m, [inp_a[0:1], inp_b[0:1]], print_shape_only=True, layer_name='only_this_layer')
-
-    m2 = get_single_inputs_model()
-    m2.compile(optimizer='adam',
-               loss='binary_crossentropy')
-    m2.fit([inp_a], inp_o)
-
-    get_activations(m2, [inp_a[0]], print_shape_only=True)
diff --git a/keras-visualize-activations/model_train.py b/keras-visualize-activations/model_train.py
deleted file mode 100644
index 62454722d2e75320501f3342fda6404ee77fc84e..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/model_train.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from __future__ import print_function
-
-from glob import glob
-
-import keras
-from keras.callbacks import ModelCheckpoint
-from keras.layers import Conv2D, MaxPooling2D
-from keras.layers import Dense, Dropout, Flatten
-from keras.models import Sequential
-
-from data import get_mnist_data, num_classes, input_shape
-from read_activations import get_activations, display_activations
-
-if __name__ == '__main__':
-
-    checkpoints = glob('checkpoints/*.h5')
-    # pip3 install natsort
-    from natsort import natsorted
-
-    from keras.models import load_model
-
-    if len(checkpoints) > 0:
-
-        checkpoints = natsorted(checkpoints)
-        assert len(checkpoints) != 0, 'No checkpoints found.'
-        checkpoint_file = checkpoints[-1]
-        print('Loading [{}]'.format(checkpoint_file))
-        model = load_model(checkpoint_file)
-
-        model.compile(optimizer='adam',
-                      loss='categorical_crossentropy',
-                      metrics=['accuracy'])
-
-        print(model.summary())
-
-        x_train, y_train, x_test, y_test = get_mnist_data()
-
-        # checking that the accuracy is the same as before 99% at the first epoch.
-        test_loss, test_acc = model.evaluate(x_test, y_test, verbose=1, batch_size=128)
-        print('')
-        assert test_acc > 0.98
-
-        a = get_activations(model, x_test[0:1], print_shape_only=True)  # with just one sample.
-        display_activations(a)
-
-        get_activations(model, x_test[0:200], print_shape_only=True)  # with 200 samples.
-
-        # import numpy as np
-        # import matplotlib.pyplot as plt
-        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
-    else:
-        x_train, y_train, x_test, y_test = get_mnist_data()
-
-        model = Sequential()
-        model.add(Conv2D(32, kernel_size=(3, 3),
-                         activation='relu',
-                         input_shape=input_shape))
-        model.add(Conv2D(64, (3, 3), activation='relu'))
-        model.add(MaxPooling2D(pool_size=(2, 2)))
-        model.add(Dropout(0.25))
-        model.add(Flatten())
-        model.add(Dense(128, activation='relu'))
-        model.add(Dropout(0.5))
-        model.add(Dense(num_classes, activation='softmax'))
-
-        model.compile(loss=keras.losses.categorical_crossentropy,
-                      optimizer=keras.optimizers.Adadelta(),
-                      metrics=['accuracy'])
-
-        # Change starts here
-        import shutil
-        import os
-
-        # delete folder and its content and creates a new one.
-        try:
-            shutil.rmtree('checkpoints')
-        except:
-            pass
-        os.mkdir('checkpoints')
-
-        checkpoint = ModelCheckpoint(monitor='val_acc',
-                                     filepath='checkpoints/model_{epoch:02d}_{val_acc:.3f}.h5',
-                                     save_best_only=True)
-
-        model.fit(x_train, y_train,
-                  batch_size=128,
-                  epochs=12,
-                  verbose=1,
-                  validation_data=(x_test, y_test),
-                  callbacks=[checkpoint])
-
-        # Change finishes here
-
-        score = model.evaluate(x_test, y_test, verbose=0)
-        print('Test loss:', score[0])
-        print('Test accuracy:', score[1])
diff --git a/keras-visualize-activations/read_activations.py b/keras-visualize-activations/read_activations.py
deleted file mode 100644
index 0e4641ac32bfcd48aa42cefb676d021e27697310..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/read_activations.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import keras.backend as K
-
-
-def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
-    print('----- activations -----')
-    activations = []
-    inp = model.input
-
-    model_multi_inputs_cond = True
-    if not isinstance(inp, list):
-        # only one input! let's wrap it in a list.
-        inp = [inp]
-        model_multi_inputs_cond = False
-
-    outputs = [layer.output for layer in model.layers if
-               layer.name == layer_name or layer_name is None]  # all layer outputs
-
-    funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs]  # evaluation functions
-
-    if model_multi_inputs_cond:
-        list_inputs = []
-        list_inputs.extend(model_inputs)
-        list_inputs.append(0.)
-    else:
-        list_inputs = [model_inputs, 0.]
-
-    # Learning phase. 0 = Test mode (no dropout or batch normalization)
-    # layer_outputs = [func([model_inputs, 0.])[0] for func in funcs]
-    layer_outputs = [func(list_inputs)[0] for func in funcs]
-    for layer_activations in layer_outputs:
-        activations.append(layer_activations)
-        if print_shape_only:
-            print(layer_activations.shape)
-        else:
-            print(layer_activations)
-    return activations
-
-
-def display_activations(activation_maps):
-    import numpy as np
-    import matplotlib.pyplot as plt
-    """
-    (1, 26, 26, 32)
-    (1, 24, 24, 64)
-    (1, 12, 12, 64)
-    (1, 12, 12, 64)
-    (1, 9216)
-    (1, 128)
-    (1, 128)
-    (1, 10)
-    """
-    batch_size = activation_maps[0].shape[0]
-    assert batch_size == 1, 'One image at a time to visualize.'
-    for i, activation_map in enumerate(activation_maps):
-        print('Displaying activation map {}'.format(i))
-        shape = activation_map.shape
-        if len(shape) == 4:
-            activations = np.hstack(np.transpose(activation_map[0], (2, 0, 1)))
-        elif len(shape) == 2:
-            # try to make it square as much as possible. we can skip some activations.
-            activations = activation_map[0]
-            num_activations = len(activations)
-            if num_activations > 1024:  # too hard to display it on the screen.
-                square_param = int(np.floor(np.sqrt(num_activations)))
-                activations = activations[0: square_param * square_param]
-                activations = np.reshape(activations, (square_param, square_param))
-            else:
-                activations = np.expand_dims(activations, axis=0)
-        else:
-            raise Exception('len(shape) = 3 has not been implemented.')
-        plt.imshow(activations, interpolation='None', cmap='jet')
-        plt.show()
diff --git a/keras-visualize-activations/requirements.txt b/keras-visualize-activations/requirements.txt
deleted file mode 100644
index 7fdf5d210f59e02ad3a757c0c7416be9c7e3dfcd..0000000000000000000000000000000000000000
--- a/keras-visualize-activations/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-Keras==2.0.2
-natsort==5.0.2
-numpy==1.12.1
-h5py
\ No newline at end of file
diff --git a/keras_visualize_activations/__init__.py b/keras_visualize_activations/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/plotting.py b/plotting.py
index ecad66023599bc44be1917d0476c44dabe958378..210cab1db657852fc0634f1443239baee4097111 100644
--- a/plotting.py
+++ b/plotting.py
@@ -9,7 +9,7 @@ from matplotlib.ticker import LogFormatter
 from mpl_toolkits.axes_grid1 import ImageGrid
 import numpy as np
 
-from .keras-visualize-activations.read_activations import get_activations
+from .keras_visualize_activations.read_activations import get_activations
 
 import meme
 
@@ -39,6 +39,7 @@ def plot_NN_vs_var_1D(plotname, means, scorefun, var_index, var_range, var_label
         ax.set_xlabel(var_label)
     ax.set_ylabel("NN output")
     fig.savefig(plotname)
+    plt.close(fig)
 
 
 def plot_NN_vs_var_2D(plotname, means,
@@ -98,6 +99,7 @@ def plot_NN_vs_var_2D(plotname, means,
     if var2_label is not None:
         ax.set_ylabel(var2_label)
     fig.savefig(plotname)
+    plt.close(fig)
 
 
 def plot_NN_vs_var_2D_all(plotname, model, means,
@@ -120,8 +122,8 @@ def plot_NN_vs_var_2D_all(plotname, model, means,
     events = np.tile(means, len(var1_vals)*len(var2_vals)).reshape(len(var2_vals), len(var1_vals), -1)
     for i, y in enumerate(var2_vals):
         for j, x in enumerate(var1_vals):
-            events[i][j][index1] = x
-            events[i][j][index2] = y
+            events[i][j][var1_index] = x
+            events[i][j][var2_index] = y
 
     # convert back into 1d array
     events = events.reshape(-1, len(means))
@@ -157,6 +159,7 @@ def plot_NN_vs_var_2D_all(plotname, model, means,
             ax.text(0., 0.5, "{}, {}".format(layer, neuron), transform=ax.transAxes)
 
     fig.savefig(plotname, bbox_inches='tight')
+    plt.close(fig)