# -*- coding: utf-8 -*- import string import numpy as np import pandas as pd from tqdm import tqdm # config = tf.ConfigProto(log_device_placement=True) # config.gpu_options.per_process_gpu_memory_fraction = 0.5 # config.gpu_options.allow_growth = True # session = tf.Session(config=config) def get_character_dict(): return dict((char, idx) for (idx, char) in enumerate(string.ascii_lowercase + string.punctuation)) def get_user_chunks(dataFrame, windowSize=10, overlapping=False, maxLengthInSeconds=300): # print('maxLength: ' + str(maxLengthInSeconds)) maxMilliSeconds = maxLengthInSeconds * 1000 outDomainLists = [] outDFFrames = [] if overlapping == False: numBlocks = int(np.ceil(float(len(dataFrame)) / float(windowSize))) userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[(blockID * windowSize):((blockID + 1) * windowSize)] # print(curIDs) useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) else: numBlocks = len(dataFrame) + 1 - windowSize userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[blockID:blockID + windowSize] # print(curIDs) useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) return (outDomainLists, outDFFrames) def getFeatureVecForDomain(domain, characterDict, maxLen=40): curFeature = np.zeros([maxLen, ]) for j in range(np.min([len(domain), maxLen])): # print(j) curCharacter = domain[-j] if curCharacter in characterDict: curFeature[j] = characterDict[curCharacter] return curFeature def getFlowFeatures(curDataLine): useKeys = ['duration', 'bytes_down', 'bytes_up'] curFeature = np.zeros([len(useKeys), ]) for i in range(len(useKeys)): curKey = useKeys[i] try: curFeature[i] = np.log1p(curDataLine[curKey]).astype(float) except: pass return curFeature def getCiscoFeatures(curDataLine, urlSIPDict): numCiscoFeatures = 30 try: ciscoFeatures = urlSIPDict[str(curDataLine['domain']) + str(curDataLine['server_ip'])] # log transform ciscoFeatures = np.log1p(ciscoFeatures).astype(float) return ciscoFeatures.ravel() except: return np.zeros([numCiscoFeatures, ]).ravel() def create_dataset_from_flows(user_flow_df, char_dict, maxLen, threshold=3, windowSize=10): domainLists = [] dfLists = [] print("get chunks from user data frames") for i, user_flow in enumerate(get_flow_per_user(user_flow_df)): (domainListsTmp, dfListsTmp) = get_user_chunks(user_flow, windowSize=windowSize, overlapping=False, maxLengthInSeconds=-1) domainLists += domainListsTmp dfLists += dfListsTmp if i >= 10: break print("create training dataset") return create_dataset_from_lists( domainLists=domainLists, dfLists=dfLists, charachterDict=char_dict, maxLen=maxLen, threshold=threshold, flagUseCiscoFeatures=False, urlSIPDIct=dict(), windowSize=windowSize) def create_dataset_from_lists(domainLists, dfLists, charachterDict, maxLen, threshold=3, flagUseCiscoFeatures=False, urlSIPDIct=dict(), windowSize=10): if 'hits' in dfLists[0].keys(): hitName = 'hits' elif 'virusTotalHits' in dfLists[0].keys(): hitName = 'virusTotalHits' numFlowFeatures = 3 numCiscoFeatures = 30 numFeatures = numFlowFeatures if flagUseCiscoFeatures: numFeatures += numCiscoFeatures outputFeatures = [] label = [] hits = [] trainNames = [] for i in range(windowSize): outputFeatures.append(np.zeros([len(domainLists), maxLen])) outputFeatures.append(np.zeros([len(domainLists), numFeatures])) for i in tqdm(np.arange(len(domainLists)), miniters=10): curCounter = 0 # print('len domainList: ' + str(len(domainLists[i]))) # print('len df: ' + str(len(dfLists[i]))) for j in range(np.min([windowSize, len(domainLists[i])])): outputFeatures[curCounter][i, :] = getFeatureVecForDomain(domainLists[i][j], charachterDict, maxLen) curCounter += 1 if flagUseCiscoFeatures: outputFeatures[curCounter][i, 0:numFlowFeatures] = getFlowFeatures(dfLists[i].iloc[j]) outputFeatures[curCounter][i, numFlowFeatures:] = getCiscoFeatures(dfLists[i].iloc[j], urlSIPDIct) else: outputFeatures[curCounter][i, :] = getFlowFeatures(dfLists[i].iloc[j]) curCounter += 1 curLabel = 0.0 if np.max(dfLists[i][hitName]) >= threshold: curLabel = 1.0 elif np.max(dfLists[i][hitName]) == -1: curLabel = -1.0 elif np.max(dfLists[i][hitName]) > 0 and np.max(dfLists[i][hitName]) < threshold: curLabel = -2.0 label.append(curLabel) hits.append(np.max(dfLists[i][hitName])) trainNames.append(np.unique(dfLists[i]['user_hash'])) return (outputFeatures, np.array(label), np.array(hits), np.array(trainNames)) def get_user_flow_data(): # load train and test data from joblib # created with createTrainDataMultipleTaskLearning.py # rk: changed to csv file trainDFs = pd.read_csv("data/rk_data.csv.gz") trainDFs.drop("Unnamed: 0", 1, inplace=True) trainDFs.set_index(keys=['user_hash'], drop=False, inplace=True) users = trainDFs['user_hash'].unique().tolist() u0 = trainDFs.loc[trainDFs.user_hash == users[0]] return trainDFs def get_flow_per_user(df): users = df['user_hash'].unique().tolist() for user in users: yield df.loc[df.user_hash == user]