# -*- coding: utf-8 -*- import string import numpy as np import pandas as pd from tqdm import tqdm def get_character_dict(): return dict((char, idx) for (idx, char) in enumerate(string.ascii_lowercase + string.punctuation)) def get_user_chunks(dataFrame, windowSize=10, overlapping=False, maxLengthInSeconds=300): maxMilliSeconds = maxLengthInSeconds * 1000 outDomainLists = [] outDFFrames = [] if overlapping == False: numBlocks = int(np.ceil(float(len(dataFrame)) / float(windowSize))) userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[(blockID * windowSize):((blockID + 1) * windowSize)] # print(curIDs) useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) else: numBlocks = len(dataFrame) + 1 - windowSize userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[blockID:blockID + windowSize] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) return (outDomainLists, outDFFrames) def get_domain_features(domain, vocab, max_length=40): curFeature = np.zeros([max_length, ]) for j in range(np.min([len(domain), max_length])): curCharacter = domain[-j] if curCharacter in vocab: curFeature[j] = vocab[curCharacter] return curFeature def get_flow_features(flow): keys = ['duration', 'bytes_down', 'bytes_up'] features = np.zeros([len(keys), ]) for i, key in enumerate(keys): # TODO: does it still works after exceptions occur -- default: zero! # i wonder whether something brokes # if there are exceptions regarding to inconsistent feature length try: features[i] = np.log1p(flow[key]).astype(float) except: pass return features def get_cisco_features(curDataLine, urlSIPDict): numCiscoFeatures = 30 try: ciscoFeatures = urlSIPDict[str(curDataLine['domain']) + str(curDataLine['server_ip'])] # log transform ciscoFeatures = np.log1p(ciscoFeatures).astype(float) return ciscoFeatures.ravel() except: return np.zeros([numCiscoFeatures, ]).ravel() def create_dataset_from_flows(user_flow_df, char_dict, maxLen, windowSize=10, use_cisco_features=False): domainLists = [] dfLists = [] print("get chunks from user data frames") for i, user_flow in enumerate(get_flow_per_user(user_flow_df)): (domainListsTmp, dfListsTmp) = get_user_chunks(user_flow, windowSize=windowSize, overlapping=True, maxLengthInSeconds=-1) domainLists += domainListsTmp dfLists += dfListsTmp # TODO: remove later if i >= 10: break print("create training dataset") return create_dataset_from_lists( domains=domainLists, dfs=dfLists, vocab=char_dict, maxLen=maxLen, use_cisco_features=use_cisco_features, urlSIPDIct=dict(), window_size=windowSize) def create_dataset_from_lists(domains, dfs, vocab, maxLen, use_cisco_features=False, urlSIPDIct=dict(), window_size=10): # TODO: check for hits vs vth consistency if 'hits' in dfs[0].keys(): hitName = 'hits' elif 'virusTotalHits' in dfs[0].keys(): hitName = 'virusTotalHits' numFlowFeatures = 3 numCiscoFeatures = 30 numFeatures = numFlowFeatures if use_cisco_features: numFeatures += numCiscoFeatures Xs = [] hits = [] names = [] servers = [] trusted_hits = [] for i in range(window_size): Xs.append(np.zeros([len(domains), maxLen])) Xs.append(np.zeros([len(domains), numFeatures])) for i in tqdm(np.arange(len(domains)), miniters=10): ctr = 0 for j in range(np.min([window_size, len(domains[i])])): Xs[ctr][i, :] = get_domain_features(domains[i][j], vocab, maxLen) ctr += 1 if use_cisco_features: Xs[ctr][i, 0:numFlowFeatures] = get_flow_features(dfs[i].iloc[j]) Xs[ctr][i, numFlowFeatures:] = get_cisco_features(dfs[i].iloc[j], urlSIPDIct) else: Xs[ctr][i, :] = get_flow_features(dfs[i].iloc[j]) ctr += 1 hits.append(np.max(dfs[i][hitName])) names.append(np.unique(dfs[i]['user_hash'])) servers.append(np.max(dfs[i]['serverLabel'])) trusted_hits.append(np.max(dfs[i]['trustedHits'])) return Xs, np.array(hits), np.array(names), np.array(servers), np.array(trusted_hits) def discretize_label(values, threshold): maxVal = np.max(values) if maxVal >= threshold: return 1.0 elif maxVal == -1: return -1.0 elif 0 < maxVal < threshold: return -2.0 else: return 0.0 def get_user_flow_data(): df = pd.read_csv("data/rk_data.csv.gz") df.drop("Unnamed: 0", 1, inplace=True) df.set_index(keys=['user_hash'], drop=False, inplace=True) return df def get_flow_per_user(df): users = df['user_hash'].unique().tolist() for user in users: yield df.loc[df.user_hash == user]