reformat code
This commit is contained in:
parent
24d677e101
commit
87b927cdc9
@ -1,33 +1,21 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
import joblib
|
||||||
|
import keras
|
||||||
|
import numpy as np
|
||||||
|
import tensorflow as tf
|
||||||
|
from keras.layers import Dense, Dropout, Conv1D, GlobalMaxPooling1D, Reshape
|
||||||
|
from keras.layers import Input
|
||||||
|
from keras.models import Model
|
||||||
|
from keras.utils import np_utils
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
import tensorflow as tf
|
import stackedNeuralModels as stackedNeuralModels
|
||||||
|
|
||||||
config = tf.ConfigProto(log_device_placement=True)
|
config = tf.ConfigProto(log_device_placement=True)
|
||||||
config.gpu_options.per_process_gpu_memory_fraction = 0.5
|
config.gpu_options.per_process_gpu_memory_fraction = 0.5
|
||||||
config.gpu_options.allow_growth = True
|
config.gpu_options.allow_growth = True
|
||||||
session = tf.Session(config=config)
|
session = tf.Session(config=config)
|
||||||
|
|
||||||
from pymongo import MongoClient
|
|
||||||
import joblib
|
|
||||||
import pickle
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import ciscoProcessing as ciscoProcessing
|
|
||||||
import stackedNeuralModels as stackedNeuralModels
|
|
||||||
|
|
||||||
from sklearn.metrics import precision_recall_curve
|
|
||||||
from sklearn.metrics import auc, roc_curve
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
|
|
||||||
import keras
|
|
||||||
from keras.models import Sequential
|
|
||||||
from keras.layers import Dense, Activation,LSTM,Embedding,Dropout,Conv1D, GlobalMaxPooling1D, Merge, Reshape, Lambda
|
|
||||||
from keras.layers import Convolution1D
|
|
||||||
from keras.layers import Input
|
|
||||||
from keras.models import Model
|
|
||||||
from keras.utils import np_utils
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# parameter
|
# parameter
|
||||||
innerCNNFilters = 512
|
innerCNNFilters = 512
|
||||||
@ -67,14 +55,16 @@ if __name__ == "__main__":
|
|||||||
if 'testDFs' not in locals():
|
if 'testDFs' not in locals():
|
||||||
tmpLoad = joblib.load(testDataPath)
|
tmpLoad = joblib.load(testDataPath)
|
||||||
|
|
||||||
|
sharedCNNFun = stackedNeuralModels.getCNNWitoutLastLayerFunctional(len(characterDict) + 1, embeddingSize, maxLen,
|
||||||
sharedCNNFun = stackedNeuralModels.getCNNWitoutLastLayerFunctional(len(characterDict)+1,embeddingSize,maxLen,domainFeatures,kernel_size,domainFeatures,0.5)
|
domainFeatures, kernel_size, domainFeatures, 0.5)
|
||||||
|
|
||||||
domainLists = []
|
domainLists = []
|
||||||
dfLists = []
|
dfLists = []
|
||||||
for i in tqdm(np.arange(len(trainDFs)), miniters=10):
|
for i in tqdm(np.arange(len(trainDFs)), miniters=10):
|
||||||
(domainListsTmp, dfListsTmp) = stackedNeuralModels.getChunksFromUserDataFrame(trainDFs[i],
|
(domainListsTmp, dfListsTmp) = stackedNeuralModels.getChunksFromUserDataFrame(trainDFs[i],
|
||||||
windowSize=windowSize,overlapping=False,maxLengthInSeconds=maxLengthInSeconds)
|
windowSize=windowSize,
|
||||||
|
overlapping=False,
|
||||||
|
maxLengthInSeconds=maxLengthInSeconds)
|
||||||
domainLists += domainListsTmp
|
domainLists += domainListsTmp
|
||||||
dfLists += dfListsTmp
|
dfLists += dfListsTmp
|
||||||
if i == 100:
|
if i == 100:
|
||||||
@ -89,14 +79,12 @@ if __name__ == "__main__":
|
|||||||
useIDs = np.where(testLabel == 1.0)[0]
|
useIDs = np.where(testLabel == 1.0)[0]
|
||||||
useIDs = np.concatenate([useIDs, np.where(testLabel == 0.0)[0]])
|
useIDs = np.concatenate([useIDs, np.where(testLabel == 0.0)[0]])
|
||||||
|
|
||||||
|
|
||||||
testLabel = testLabel[useIDs]
|
testLabel = testLabel[useIDs]
|
||||||
testHits = testHits[useIDs]
|
testHits = testHits[useIDs]
|
||||||
testNames = testNames[useIDs]
|
testNames = testNames[useIDs]
|
||||||
for i in range(len(testData)):
|
for i in range(len(testData)):
|
||||||
testData[i] = testData[i][useIDs]
|
testData[i] = testData[i][useIDs]
|
||||||
|
|
||||||
|
|
||||||
inputList = []
|
inputList = []
|
||||||
encodedList = []
|
encodedList = []
|
||||||
numFeatures = flowFeatures
|
numFeatures = flowFeatures
|
||||||
@ -110,7 +98,6 @@ if __name__ == "__main__":
|
|||||||
merge_layer_input.append(encodedList[i])
|
merge_layer_input.append(encodedList[i])
|
||||||
merge_layer_input.append(inputList[(2 * i) + 1])
|
merge_layer_input.append(inputList[(2 * i) + 1])
|
||||||
|
|
||||||
|
|
||||||
# We can then concatenate the two vectors:
|
# We can then concatenate the two vectors:
|
||||||
merged_vector = keras.layers.concatenate(merge_layer_input, axis=-1)
|
merged_vector = keras.layers.concatenate(merge_layer_input, axis=-1)
|
||||||
reshape = Reshape((windowSize, domainFeatures + numFeatures))(merged_vector)
|
reshape = Reshape((windowSize, domainFeatures + numFeatures))(merged_vector)
|
||||||
@ -133,11 +120,8 @@ if __name__ == "__main__":
|
|||||||
loss='binary_crossentropy',
|
loss='binary_crossentropy',
|
||||||
metrics=['accuracy'])
|
metrics=['accuracy'])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
epochNumber = 0
|
epochNumber = 0
|
||||||
trainLabel = np_utils.to_categorical(testLabel, 2)
|
trainLabel = np_utils.to_categorical(testLabel, 2)
|
||||||
model.fit(x=testData, y=trainLabel,
|
model.fit(x=testData, y=trainLabel,
|
||||||
epochs=epochNumber + 1, shuffle=True, initial_epoch=epochNumber) # ,
|
epochs=epochNumber + 1, shuffle=True, initial_epoch=epochNumber) # ,
|
||||||
# validation_data=(testData,testLabel))
|
# validation_data=(testData,testLabel))
|
||||||
|
|
Loading…
Reference in New Issue
Block a user