refactor training - separate staggered training; make differences as small as possible
This commit is contained in:
@@ -3,7 +3,6 @@ from collections import namedtuple
|
||||
import keras
|
||||
from keras.engine import Input, Model as KerasModel
|
||||
from keras.layers import Activation, Conv1D, Dense, Dropout, Embedding, GlobalMaxPooling1D, TimeDistributed
|
||||
from keras.regularizers import l2
|
||||
|
||||
import dataset
|
||||
|
||||
@@ -58,7 +57,7 @@ def get_model(cnnDropout, flow_features, domain_features, window_size, domain_le
|
||||
# remove temporal dimension by global max pooling
|
||||
y = GlobalMaxPooling1D()(y)
|
||||
y = Dropout(cnnDropout)(y)
|
||||
y = Dense(dense_dim, kernel_regularizer=l2(0.1), activation='relu')(y)
|
||||
y = Dense(dense_dim, activation='relu')(y)
|
||||
out_client = Dense(1, activation='sigmoid', name="client")(y)
|
||||
out_server = Dense(1, activation='sigmoid', name="server")(y)
|
||||
|
||||
|
Reference in New Issue
Block a user