86 lines
2.7 KiB
Python
86 lines
2.7 KiB
Python
import keras.backend as K
|
|
|
|
from . import pauls_networks
|
|
from . import renes_networks
|
|
|
|
|
|
def get_models_by_params(params: dict):
|
|
# decomposing param section
|
|
# mainly embedding model
|
|
network_type = params.get("type")
|
|
embedding_size = params.get("embedding_size")
|
|
input_length = params.get("input_length")
|
|
filter_embedding = params.get("filter_embedding")
|
|
kernel_embedding = params.get("kernel_embedding")
|
|
hidden_embedding = params.get("hidden_embedding")
|
|
dropout = params.get("dropout")
|
|
# mainly prediction model
|
|
flow_features = params.get("flow_features")
|
|
domain_features = params.get("domain_features")
|
|
window_size = params.get("window_size")
|
|
domain_length = params.get("domain_length")
|
|
filter_main = params.get("filter_main")
|
|
kernel_main = params.get("kernels_main")
|
|
dense_dim = params.get("dense_main")
|
|
model_output = params.get("model_output", "both")
|
|
# create models
|
|
networks = renes_networks if network_type == "rene" else pauls_networks
|
|
embedding_model = networks.get_embedding(embedding_size, input_length, filter_embedding, kernel_embedding,
|
|
hidden_embedding, dropout)
|
|
|
|
predict_model = networks.get_model(dropout, flow_features, domain_features, window_size, domain_length,
|
|
filter_main, kernel_main, dense_dim, embedding_model, model_output)
|
|
|
|
new_model = networks.get_new_model(dropout, flow_features, domain_features, window_size, domain_length,
|
|
filter_main, kernel_main, dense_dim, embedding_model, model_output)
|
|
|
|
return embedding_model, predict_model, new_model
|
|
|
|
|
|
def get_metrics():
|
|
return dict([
|
|
("precision", precision),
|
|
("recall", recall),
|
|
("f1_score", f1_score),
|
|
])
|
|
|
|
|
|
def get_metric_functions():
|
|
return [precision, recall, f1_score]
|
|
|
|
|
|
def precision(y_true, y_pred):
|
|
# Count positive samples.
|
|
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
|
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
|
|
return true_positives / (predicted_positives + K.epsilon())
|
|
|
|
|
|
def recall(y_true, y_pred):
|
|
# Count positive samples.
|
|
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
|
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
|
|
return true_positives / (possible_positives + K.epsilon())
|
|
|
|
|
|
def f1_score(y_true, y_pred):
|
|
return f_score(1)(y_true, y_pred)
|
|
|
|
|
|
def f05_score(y_true, y_pred):
|
|
return f_score(0.5)(y_true, y_pred)
|
|
|
|
|
|
def f_score(beta):
|
|
def _f(y_true, y_pred):
|
|
p = precision(y_true, y_pred)
|
|
r = recall(y_true, y_pred)
|
|
|
|
bb = beta ** 2
|
|
|
|
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
|
|
|
|
return fbeta_score
|
|
|
|
return _f
|