2017-06-27 20:29:19 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2017-07-12 10:25:55 +02:00
|
|
|
import logging
|
2017-06-30 09:04:24 +02:00
|
|
|
import string
|
2017-07-16 09:42:52 +02:00
|
|
|
from multiprocessing import Pool
|
2017-06-30 09:04:24 +02:00
|
|
|
|
2017-07-08 17:46:07 +02:00
|
|
|
import h5py
|
2017-09-08 19:10:23 +02:00
|
|
|
import joblib
|
2017-06-29 09:19:36 +02:00
|
|
|
import numpy as np
|
2017-06-30 09:04:24 +02:00
|
|
|
import pandas as pd
|
2017-06-29 09:19:36 +02:00
|
|
|
from tqdm import tqdm
|
2017-06-27 20:29:19 +02:00
|
|
|
|
2017-10-19 17:39:37 +02:00
|
|
|
logger = logging.getLogger('cisco_logger')
|
2017-07-12 10:25:55 +02:00
|
|
|
|
2017-10-19 17:39:37 +02:00
|
|
|
char2idx = dict((char, idx + 1) for (idx, char) in
|
2017-11-04 12:47:08 +01:00
|
|
|
enumerate(string.ascii_lowercase + string.punctuation + string.digits + " "))
|
2017-10-19 17:39:37 +02:00
|
|
|
|
|
|
|
idx2char = {v: k for k, v in char2idx.items()}
|
2017-07-04 09:18:50 +02:00
|
|
|
|
2017-06-27 20:29:19 +02:00
|
|
|
|
2017-06-30 09:04:24 +02:00
|
|
|
def get_character_dict():
|
2017-10-19 17:39:37 +02:00
|
|
|
return char2idx
|
2017-07-04 09:18:50 +02:00
|
|
|
|
|
|
|
|
2017-07-30 13:47:11 +02:00
|
|
|
def get_vocab_size():
|
2017-10-19 17:39:37 +02:00
|
|
|
return len(char2idx) + 1
|
2017-07-30 13:47:11 +02:00
|
|
|
|
|
|
|
|
2017-07-04 09:18:50 +02:00
|
|
|
def encode_char(c):
|
2017-10-19 17:39:37 +02:00
|
|
|
return char2idx.get(c, 0)
|
|
|
|
|
|
|
|
|
|
|
|
def decode_char(i):
|
|
|
|
return idx2char.get(i, "")
|
2017-07-04 09:18:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
encode_char = np.vectorize(encode_char)
|
2017-11-04 12:47:08 +01:00
|
|
|
decode_char = np.vectorize(decode_char)
|
|
|
|
|
|
|
|
|
|
|
|
def encode_domain(domain: string):
|
|
|
|
return encode_char(list(domain))
|
|
|
|
|
|
|
|
|
|
|
|
def decode_domain(domain):
|
|
|
|
return "".join(decode_char(domain))
|
2017-06-30 09:04:24 +02:00
|
|
|
|
|
|
|
|
2017-07-16 09:42:52 +02:00
|
|
|
def get_user_chunks(user_flow, window=10):
|
2017-07-16 18:49:14 +02:00
|
|
|
result = []
|
2017-07-16 09:42:52 +02:00
|
|
|
chunk_size = (len(user_flow) // window)
|
2017-07-16 18:49:14 +02:00
|
|
|
for i in range(chunk_size):
|
|
|
|
result.append(user_flow.iloc[i * window:(i + 1) * window])
|
|
|
|
if result and len(result[-1]) != window:
|
|
|
|
result.pop()
|
|
|
|
return result
|
2017-06-30 09:04:24 +02:00
|
|
|
|
|
|
|
|
2017-11-04 12:47:08 +01:00
|
|
|
def get_domain_features(domain: string, max_length=40):
|
2017-07-04 09:18:50 +02:00
|
|
|
encoding = np.zeros((max_length,))
|
2017-07-11 21:06:58 +02:00
|
|
|
for j in range(min(len(domain), max_length)):
|
2017-11-04 12:47:08 +01:00
|
|
|
c = domain[len(domain) - 1 - j]
|
|
|
|
encoding[max_length - 1 - j] = encode_char(c)
|
2017-07-04 09:18:50 +02:00
|
|
|
return encoding
|
2017-06-30 09:04:24 +02:00
|
|
|
|
|
|
|
|
2017-07-11 13:46:25 +02:00
|
|
|
def get_all_flow_features(features):
|
2017-07-16 09:42:52 +02:00
|
|
|
flows = np.stack(
|
|
|
|
map(lambda f: f[["duration", "bytes_up", "bytes_down"]], features)
|
2017-07-11 13:46:25 +02:00
|
|
|
)
|
|
|
|
return np.log1p(flows)
|
|
|
|
|
|
|
|
|
2017-09-08 17:11:13 +02:00
|
|
|
def filter_window_dataset_by_hits(domain, flow, name, hits, trusted_hits, server):
|
2017-07-05 21:19:19 +02:00
|
|
|
# select only 1.0 and 0.0 from training data
|
2017-09-02 16:02:48 +02:00
|
|
|
pos_idx = np.where(np.logical_or(hits == 1.0, trusted_hits >= 1.0))[0]
|
|
|
|
neg_idx = np.where(hits == 0.0)[0]
|
2017-07-05 21:19:19 +02:00
|
|
|
idx = np.concatenate((pos_idx, neg_idx))
|
|
|
|
# choose selected sample to train on
|
2017-09-02 16:02:48 +02:00
|
|
|
domain = domain[idx]
|
|
|
|
flow = flow[idx]
|
2017-09-08 17:11:13 +02:00
|
|
|
client = np.zeros_like(idx, float)
|
|
|
|
client[:pos_idx.shape[-1]] = 1.0
|
2017-09-02 16:02:48 +02:00
|
|
|
server = server[idx]
|
2017-09-08 17:11:13 +02:00
|
|
|
name = name[idx]
|
2017-07-05 21:19:19 +02:00
|
|
|
|
2017-09-08 17:11:13 +02:00
|
|
|
return domain, flow, name, client, server
|
2017-07-08 15:04:58 +02:00
|
|
|
|
2017-07-05 21:19:19 +02:00
|
|
|
|
2017-10-19 17:39:37 +02:00
|
|
|
def create_raw_dataset_from_flows(user_flow_df, max_len, window_size=10):
|
2017-09-08 13:55:13 +02:00
|
|
|
logger.info("get chunks from user data frames")
|
|
|
|
with Pool() as pool:
|
|
|
|
results = []
|
|
|
|
for user_flow in tqdm(get_flow_per_user(user_flow_df), total=len(user_flow_df['user_hash'].unique().tolist())):
|
|
|
|
results.append(pool.apply_async(get_user_chunks, (user_flow, window_size)))
|
|
|
|
windows = [window for res in results for window in res.get()]
|
|
|
|
logger.info("create training dataset")
|
2017-09-08 17:11:13 +02:00
|
|
|
domain, flow, hits, name, server, trusted_hits = create_dataset_from_windows(chunks=windows,
|
|
|
|
max_len=max_len)
|
2017-09-08 13:55:13 +02:00
|
|
|
# make client labels discrete with 4 different values
|
2017-09-08 17:11:13 +02:00
|
|
|
hits = np.apply_along_axis(lambda x: make_label_discrete(x, 3), 0, np.atleast_2d(hits))
|
2017-09-08 13:55:13 +02:00
|
|
|
|
2017-09-08 17:11:13 +02:00
|
|
|
return domain, flow, name, hits, trusted_hits, server
|
2017-09-08 13:55:13 +02:00
|
|
|
|
|
|
|
|
|
|
|
def store_h5dataset(path, data: dict):
|
2017-09-08 19:10:23 +02:00
|
|
|
f = h5py.File(path + ".h5", "w")
|
2017-09-08 13:55:13 +02:00
|
|
|
for key, val in data.items():
|
|
|
|
f.create_dataset(key, data=val)
|
2017-07-08 17:46:07 +02:00
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
2017-09-08 19:10:23 +02:00
|
|
|
def check_h5dataset(path):
|
|
|
|
return open(path + ".h5", "r")
|
|
|
|
|
|
|
|
|
2017-07-09 23:58:08 +02:00
|
|
|
def load_h5dataset(path):
|
2017-09-08 19:10:23 +02:00
|
|
|
f = h5py.File(path + ".h5", "r")
|
2017-09-08 13:55:13 +02:00
|
|
|
data = {}
|
2017-09-08 17:11:13 +02:00
|
|
|
for k in f.keys():
|
2017-09-08 13:55:13 +02:00
|
|
|
data[k] = f[k]
|
|
|
|
return data
|
2017-07-09 23:58:08 +02:00
|
|
|
|
|
|
|
|
2017-10-19 17:39:37 +02:00
|
|
|
def create_dataset_from_windows(chunks, max_len):
|
2017-07-04 09:18:50 +02:00
|
|
|
"""
|
|
|
|
combines domain and feature windows to sequential training data
|
2017-07-16 09:42:52 +02:00
|
|
|
:param chunks: list of flow feature windows
|
2017-07-04 09:18:50 +02:00
|
|
|
:param vocab:
|
|
|
|
:param max_len:
|
|
|
|
:return:
|
|
|
|
"""
|
2017-08-03 12:27:17 +02:00
|
|
|
|
2017-07-16 09:42:52 +02:00
|
|
|
def get_domain_features_reduced(d):
|
2017-10-19 17:39:37 +02:00
|
|
|
return get_domain_features(d[0], max_len)
|
2017-07-16 09:42:52 +02:00
|
|
|
|
|
|
|
logger.info(" compute domain features")
|
|
|
|
domain_features = []
|
|
|
|
for ds in tqdm(map(lambda f: f.domain, chunks)):
|
|
|
|
domain_features.append(np.apply_along_axis(get_domain_features_reduced, 2, np.atleast_3d(ds)))
|
|
|
|
domain_features = np.concatenate(domain_features, 0)
|
|
|
|
logger.info(" compute flow features")
|
|
|
|
flow_features = get_all_flow_features(chunks)
|
|
|
|
logger.info(" select hits")
|
|
|
|
hits = np.max(np.stack(map(lambda f: f.virusTotalHits, chunks)), axis=1)
|
|
|
|
logger.info(" select names")
|
2017-09-02 16:02:48 +02:00
|
|
|
names = np.stack(map(lambda f: f.user_hash, chunks))
|
|
|
|
assert (names[:, :1].repeat(10, axis=1) == names).all()
|
|
|
|
names = names[:, 0]
|
2017-07-16 09:42:52 +02:00
|
|
|
logger.info(" select servers")
|
2017-07-29 19:42:36 +02:00
|
|
|
servers = np.stack(map(lambda f: f.serverLabel, chunks))
|
2017-07-16 09:42:52 +02:00
|
|
|
logger.info(" select trusted hits")
|
|
|
|
trusted_hits = np.max(np.stack(map(lambda f: f.trustedHits, chunks)), axis=1)
|
2017-07-11 21:06:58 +02:00
|
|
|
|
2017-07-05 18:37:29 +02:00
|
|
|
return (domain_features, flow_features,
|
2017-07-11 21:06:58 +02:00
|
|
|
hits, names, servers, trusted_hits)
|
2017-06-30 17:19:04 +02:00
|
|
|
|
|
|
|
|
2017-09-08 17:11:13 +02:00
|
|
|
def make_label_discrete(values, threshold):
|
2017-07-16 09:42:52 +02:00
|
|
|
max_val = np.max(values)
|
|
|
|
if max_val >= threshold:
|
2017-06-30 17:19:04 +02:00
|
|
|
return 1.0
|
2017-07-16 09:42:52 +02:00
|
|
|
elif max_val == -1:
|
2017-06-30 17:19:04 +02:00
|
|
|
return -1.0
|
2017-07-16 09:42:52 +02:00
|
|
|
elif 0 < max_val < threshold:
|
2017-06-30 17:19:04 +02:00
|
|
|
return -2.0
|
|
|
|
else:
|
|
|
|
return 0.0
|
2017-06-30 09:04:24 +02:00
|
|
|
|
|
|
|
|
2017-07-05 21:19:19 +02:00
|
|
|
def get_user_flow_data(csv_file):
|
2017-07-08 17:46:07 +02:00
|
|
|
types = {
|
|
|
|
"duration": int,
|
|
|
|
"bytes_down": int,
|
|
|
|
"bytes_up": int,
|
|
|
|
"domain": object,
|
|
|
|
"timeStamp": float,
|
2017-11-04 12:47:08 +01:00
|
|
|
"http_method": object,
|
2017-07-08 17:46:07 +02:00
|
|
|
"server_ip": object,
|
|
|
|
"user_hash": float,
|
|
|
|
"virusTotalHits": int,
|
|
|
|
"serverLabel": int,
|
|
|
|
"trustedHits": int
|
|
|
|
}
|
2017-08-31 13:49:33 +02:00
|
|
|
df = pd.read_csv(csv_file, index_col=False)
|
2017-07-08 17:46:07 +02:00
|
|
|
df = df[list(types.keys())]
|
2017-08-31 13:49:33 +02:00
|
|
|
# df.set_index(keys=['user_hash'], drop=False, inplace=True)
|
2017-06-30 10:42:21 +02:00
|
|
|
return df
|
2017-06-30 09:04:24 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_flow_per_user(df):
|
|
|
|
users = df['user_hash'].unique().tolist()
|
|
|
|
for user in users:
|
2017-07-16 09:42:52 +02:00
|
|
|
yield df.loc[df.user_hash == user].dropna(axis=0, how="any")
|
2017-07-14 14:58:17 +02:00
|
|
|
|
|
|
|
|
2017-11-07 20:47:41 +01:00
|
|
|
def load_or_generate_h5data(train_data, domain_length, window_size):
|
|
|
|
logger.info(f"check for h5data {train_data}")
|
2017-07-14 14:58:17 +02:00
|
|
|
try:
|
2017-11-07 20:47:41 +01:00
|
|
|
check_h5dataset(train_data)
|
2017-07-14 14:58:17 +02:00
|
|
|
except FileNotFoundError:
|
2017-09-08 19:10:23 +02:00
|
|
|
logger.info("load raw training dataset")
|
2017-11-07 20:47:41 +01:00
|
|
|
domain, flow, name, hits, trusted_hits, server = load_or_generate_raw_h5data(train_data, domain_length,
|
|
|
|
window_size)
|
2017-09-08 19:10:23 +02:00
|
|
|
logger.info("filter training dataset")
|
|
|
|
domain, flow, name, client, server = filter_window_dataset_by_hits(domain.value, flow.value,
|
|
|
|
name.value, hits.value,
|
|
|
|
trusted_hits.value, server.value)
|
2017-07-14 14:58:17 +02:00
|
|
|
logger.info("store training dataset as h5 file")
|
2017-09-08 13:55:13 +02:00
|
|
|
data = {
|
|
|
|
"domain": domain.astype(np.int8),
|
|
|
|
"flow": flow,
|
|
|
|
"name": name,
|
|
|
|
"client": client.astype(np.bool),
|
|
|
|
"server": server.astype(np.bool)
|
|
|
|
}
|
2017-11-07 20:47:41 +01:00
|
|
|
store_h5dataset(train_data, data)
|
2017-07-14 14:58:17 +02:00
|
|
|
logger.info("load h5 dataset")
|
2017-11-07 20:47:41 +01:00
|
|
|
data = load_h5dataset(train_data)
|
2017-09-08 13:55:13 +02:00
|
|
|
return data["domain"], data["flow"], data["name"], data["client"], data["server"]
|
2017-07-29 10:43:59 +02:00
|
|
|
|
|
|
|
|
2017-11-07 20:47:41 +01:00
|
|
|
def load_or_generate_raw_h5data(train_data, domain_length, window_size):
|
|
|
|
h5data = train_data + "_raw"
|
2017-09-08 19:10:23 +02:00
|
|
|
logger.info(f"check for h5data {h5data}")
|
|
|
|
try:
|
|
|
|
check_h5dataset(h5data)
|
|
|
|
except FileNotFoundError:
|
|
|
|
logger.info("h5 data not found - load csv file")
|
|
|
|
user_flow_df = get_user_flow_data(train_data)
|
|
|
|
logger.info("create raw training dataset")
|
2017-10-19 17:39:37 +02:00
|
|
|
domain, flow, name, hits, trusted_hits, server = create_raw_dataset_from_flows(user_flow_df, domain_length,
|
|
|
|
window_size)
|
2017-09-08 19:10:23 +02:00
|
|
|
logger.info("store raw training dataset as h5 file")
|
|
|
|
data = {
|
|
|
|
"domain": domain.astype(np.int8),
|
|
|
|
"flow": flow,
|
|
|
|
"name": name,
|
|
|
|
"hits_vt": hits.astype(np.int8),
|
|
|
|
"hits_trusted": hits.astype(np.int8),
|
|
|
|
"server": server.astype(np.bool)
|
|
|
|
}
|
|
|
|
store_h5dataset(h5data, data)
|
|
|
|
logger.info("load h5 dataset")
|
|
|
|
data = load_h5dataset(h5data)
|
|
|
|
return data["domain"], data["flow"], data["name"], data["hits_vt"], data["hits_trusted"], data["server"]
|
|
|
|
|
|
|
|
|
2017-09-02 16:02:48 +02:00
|
|
|
def generate_names(train_data, window_size):
|
|
|
|
user_flow_df = get_user_flow_data(train_data)
|
|
|
|
with Pool() as pool:
|
|
|
|
results = []
|
|
|
|
for user_flow in tqdm(get_flow_per_user(user_flow_df),
|
|
|
|
total=len(user_flow_df['user_hash'].unique().tolist())):
|
|
|
|
results.append(pool.apply_async(get_user_chunks, (user_flow, window_size)))
|
|
|
|
windows = [window for res in results for window in res.get()]
|
|
|
|
names = np.stack(map(lambda f: f.user_hash, windows))
|
|
|
|
names = names[:, 0]
|
|
|
|
|
|
|
|
return names
|
|
|
|
|
|
|
|
|
2017-07-29 10:43:59 +02:00
|
|
|
def load_or_generate_domains(train_data, domain_length):
|
2017-08-03 12:27:17 +02:00
|
|
|
fn = f"{train_data}_domains.gz"
|
|
|
|
|
|
|
|
try:
|
2017-11-10 10:18:13 +01:00
|
|
|
logger.info(f"Load file {fn}.")
|
2017-08-03 12:27:17 +02:00
|
|
|
user_flow_df = pd.read_csv(fn)
|
2017-11-10 10:18:13 +01:00
|
|
|
logger.info(f"File successfully loaded.")
|
2017-08-31 13:49:33 +02:00
|
|
|
except FileNotFoundError:
|
2017-11-10 10:18:13 +01:00
|
|
|
logger.info(f"File {fn} not found, recreate.")
|
2017-08-03 12:27:17 +02:00
|
|
|
user_flow_df = get_user_flow_data(train_data)
|
2017-08-31 13:49:33 +02:00
|
|
|
# user_flow_df.reset_index(inplace=True)
|
2017-08-03 12:27:17 +02:00
|
|
|
user_flow_df = user_flow_df[["domain", "serverLabel", "trustedHits", "virusTotalHits"]].dropna(axis=0,
|
|
|
|
how="any")
|
|
|
|
user_flow_df = user_flow_df.groupby(user_flow_df.domain).mean()
|
|
|
|
user_flow_df.reset_index(inplace=True)
|
|
|
|
|
|
|
|
user_flow_df["clientLabel"] = np.where(
|
|
|
|
np.logical_or(user_flow_df.trustedHits > 0, user_flow_df.virusTotalHits >= 3), True, False)
|
|
|
|
user_flow_df[["serverLabel", "clientLabel"]] = user_flow_df[["serverLabel", "clientLabel"]].astype(bool)
|
|
|
|
user_flow_df = user_flow_df[["domain", "serverLabel", "clientLabel"]]
|
|
|
|
|
|
|
|
user_flow_df.to_csv(fn, compression="gzip")
|
2017-07-29 10:43:59 +02:00
|
|
|
|
2017-11-10 10:18:13 +01:00
|
|
|
logger.info(f"Extract features from domains")
|
2017-10-19 17:39:37 +02:00
|
|
|
domain_encs = user_flow_df.domain.apply(lambda d: get_domain_features(d, domain_length))
|
2017-07-29 10:43:59 +02:00
|
|
|
domain_encs = np.stack(domain_encs)
|
|
|
|
|
2017-11-04 12:47:08 +01:00
|
|
|
return domain_encs, user_flow_df.domain, user_flow_df[["clientLabel", "serverLabel"]].as_matrix().astype(bool)
|
2017-07-30 15:49:37 +02:00
|
|
|
|
|
|
|
|
2017-09-08 19:10:23 +02:00
|
|
|
def save_predictions(path, results):
|
|
|
|
joblib.dump(results, path + "/results.joblib", compress=3)
|
2017-07-30 15:49:37 +02:00
|
|
|
|
|
|
|
|
|
|
|
def load_predictions(path):
|
2017-09-08 19:10:23 +02:00
|
|
|
return joblib.load(path + "/results.joblib")
|