Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
K
KerasROOTClassification
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Nikolai.Hartmann
KerasROOTClassification
Compare revisions
413f92b18b2f08fa087184529ec4603b7346b99f to master
Compare revisions
Changes are shown as if the
source
revision was being merged into the
target
revision.
Learn more about comparing revisions.
Source
Nikolai.Hartmann/KerasROOTClassification
Select target project
No results found
master
Select Git revision
Swap
Target
Eric.Schanet/KerasROOTClassification
Select target project
Eric.Schanet/KerasROOTClassification
Nikolai.Hartmann/KerasROOTClassification
2 results
413f92b18b2f08fa087184529ec4603b7346b99f
Select Git revision
Show changes
Only incoming changes from source
Include changes to target since source was created
Compare
Commits on Source (3)
adding option for leaky relu
· ef262112
Nikolai.Hartmann
authored
6 years ago
ef262112
fix WeightedRobustScaler
· 79807735
Nikolai Hartmann
authored
6 years ago
79807735
fix plot_input for usage with training batches
· 367e8290
Nikolai.Hartmann
authored
6 years ago
367e8290
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
toolkit.py
+13
-0
13 additions, 0 deletions
toolkit.py
utils.py
+18
-6
18 additions, 6 deletions
utils.py
with
31 additions
and
6 deletions
toolkit.py
View file @
367e8290
...
...
@@ -38,6 +38,7 @@ from keras.models import Sequential, Model, model_from_json
from
keras.layers
import
Dense
,
Dropout
,
Input
,
Masking
,
GRU
,
LSTM
,
concatenate
,
SimpleRNN
from
keras.callbacks
import
History
,
EarlyStopping
,
CSVLogger
,
ModelCheckpoint
,
TensorBoard
from
keras.optimizers
import
SGD
from
keras.activations
import
relu
import
keras.initializers
import
keras.optimizers
from
keras.utils.vis_utils
import
model_to_dot
...
...
@@ -161,6 +162,8 @@ class ClassificationProject(object):
:param activation_function_output: activation function in the output layer
:param leaky_relu_alpha: set this to a non-zero value to use the LeakyReLU variant with a slope in the negative part
:param out_dir: base directory in which the project directories should be stored
:param scaler_type: sklearn scaler class name to transform the data before training (options:
"
StandardScaler
"
,
"
RobustScaler
"
)
...
...
@@ -269,6 +272,7 @@ class ClassificationProject(object):
kfold_splits
=
None
,
kfold_index
=
0
,
activation_function
=
'
relu
'
,
leaky_relu_alpha
=
None
,
activation_function_output
=
'
sigmoid
'
,
scaler_type
=
"
WeightedRobustScaler
"
,
step_signal
=
2
,
...
...
@@ -344,6 +348,9 @@ class ClassificationProject(object):
self
.
kfold_splits
=
kfold_splits
self
.
kfold_index
=
kfold_index
self
.
activation_function
=
activation_function
self
.
leaky_relu_alpha
=
leaky_relu_alpha
if
self
.
activation_function
==
"
relu
"
and
self
.
leaky_relu_alpha
:
self
.
activation_function
=
lambda
x
:
relu
(
x
,
alpha
=
self
.
leaky_relu_alpha
)
self
.
activation_function_output
=
activation_function_output
self
.
scaler_type
=
scaler_type
self
.
step_signal
=
step_signal
...
...
@@ -644,6 +651,7 @@ class ClassificationProject(object):
elif
self
.
scaler_type
==
"
WeightedRobustScaler
"
:
self
.
_scaler
=
WeightedRobustScaler
()
scaler_fit_kwargs
[
"
weights
"
]
=
self
.
w_train_tot
scaler_fit_kwargs
[
"
mask_value
"
]
=
self
.
mask_value
else
:
raise
ValueError
(
"
Scaler type {} unknown
"
.
format
(
self
.
scaler_type
))
logger
.
info
(
"
Fitting {} to training data
"
.
format
(
self
.
scaler_type
))
...
...
@@ -805,6 +813,7 @@ class ClassificationProject(object):
if
self
.
_model
is
None
:
# input
input_layer
=
Input
((
len
(
self
.
fields
),))
...
...
@@ -1326,6 +1335,10 @@ class ClassificationProject(object):
break
if
self
.
target_fields
:
y
=
y
[
0
]
try
:
x
=
self
.
get_input_flat
(
x
)
except
NameError
:
pass
bkg_batch
=
x
[:,
var_index
][
y
==
0
]
sig_batch
=
x
[:,
var_index
][
y
==
1
]
bkg_weights_batch
=
w
[
y
==
0
]
...
...
This diff is collapsed.
Click to expand it.
utils.py
View file @
367e8290
...
...
@@ -197,14 +197,26 @@ def weighted_quantile(values, quantiles, sample_weight=None, values_sorted=False
class
WeightedRobustScaler
(
RobustScaler
):
def
fit
(
self
,
X
,
y
=
None
,
weights
=
None
):
if
not
np
.
isnan
(
X
).
any
():
def
fit
(
self
,
X
,
y
=
None
,
weights
=
None
,
mask_value
=
None
):
if
not
np
.
isnan
(
X
).
any
()
and
mask_value
is
not
None
and
weights
is
None
:
# these checks don't work for nan values
super
(
WeightedRobustScaler
,
self
).
fit
(
X
,
y
)
if
weights
is
None
:
return
self
return
super
(
WeightedRobustScaler
,
self
).
fit
(
X
,
y
)
else
:
wqs
=
np
.
array
([
weighted_quantile
(
X
[:,
i
][
~
np
.
isnan
(
X
[:,
i
])],
[
0.25
,
0.5
,
0.75
],
sample_weight
=
weights
)
for
i
in
range
(
X
.
shape
[
1
])])
if
weights
is
None
:
weights
=
np
.
ones
(
len
(
self
.
X
))
wqs
=
[]
for
i
in
range
(
X
.
shape
[
1
]):
mask
=
~
np
.
isnan
(
X
[:,
i
])
if
mask_value
is
not
None
:
mask
&=
(
X
[:,
i
]
!=
mask_value
)
wqs
.
append
(
weighted_quantile
(
X
[:,
i
][
mask
],
[
0.25
,
0.5
,
0.75
],
sample_weight
=
weights
[
mask
]
)
)
wqs
=
np
.
array
(
wqs
)
self
.
center_
=
wqs
[:,
1
]
self
.
scale_
=
wqs
[:,
2
]
-
wqs
[:,
0
]
self
.
scale_
=
_handle_zeros_in_scale
(
self
.
scale_
,
copy
=
False
)
...
...
This diff is collapsed.
Click to expand it.