repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
PIBConv
PIBConv-main/cnn/utils.py
import os import numpy as np import pandas as pd import torch import shutil import torchvision.transforms as transforms from torch.autograd import Variable from torchvision.datasets.utils import check_integrity,\ extract_archive, verify_str_arg, download_and_extract_archive from torchvision.datasets.folder import default_loader from torch.utils.data import Dataset from ADP_utils.classesADP import classesADP from typing import Any import pickle import re class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.sum_accuracy = 0 self.avg = 0 self.sum = 0 self.cnt = 0 def update(self, val, n=1): self.val = val self.sum_accuracy += val self.sum += val * n self.cnt += n self.avg = self.sum / self.cnt def accuracy(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res # for ADP dataset (also used for BCSS dataset) def accuracyADP(preds, targets): acc5 = 0 targets_all = targets.data.int() acc1 = torch.sum(preds == targets_all) preds_cpu = preds.cpu() targets_all_cpu = targets_all.cpu() for i, pred_sample in enumerate(preds_cpu): labelv = targets_all_cpu[i] numerator = torch.sum(np.bitwise_and(pred_sample, labelv)) denominator = torch.sum(np.bitwise_or(pred_sample, labelv)) acc5 += (numerator.double()/denominator.double()) return acc1, acc5 class Cutout(object): def __init__(self, length): self.length = length def __call__(self, img): h, w = img.size(1), img.size(2) mask = np.ones((h, w), np.float32) y = np.random.randint(h) x = np.random.randint(w) y1 = np.clip(y - self.length // 2, 0, h) y2 = np.clip(y + self.length // 2, 0, h) x1 = np.clip(x - self.length // 2, 0, w) x2 = np.clip(x + self.length // 2, 0, w) mask[y1: y2, x1: x2] = 0. mask = torch.from_numpy(mask) mask = mask.expand_as(img) img *= mask return img class ColorDistortion: def __init__(self, distortion): self.distortion = distortion def __call__(self, image): color_jitter = transforms.ColorJitter(0.8*self.distortion, 0.8*self.distortion, 0.8*self.distortion, 0.2*self.distortion) rnd_color_jitter = transforms.RandomApply([color_jitter], p=1.0) rnd_gray = transforms.RandomGrayscale(p=0.2) color_distort = transforms.Compose([ rnd_color_jitter, # rnd_gray ]) transformed_image = color_distort(image) return transformed_image def _data_transforms_cifar10(args): CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] train_transform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) return train_transform, valid_transform """From https://github.com/chenxin061/pdarts/""" def _data_transforms_cifar100(args): CIFAR_MEAN = [0.5071, 0.4867, 0.4408] CIFAR_STD = [0.2675, 0.2565, 0.2761] train_transform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) return train_transform, valid_transform # for ADP dataset def _data_transforms_adp(args): ADP_MEAN = [0.81233799, 0.64032477, 0.81902153] ADP_STD = [0.18129702, 0.25731668, 0.16800649] degrees = 45 horizontal_shift, vertical_shift = 0.1, 0.1 # train transform train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomAffine(degrees=degrees, translate=( horizontal_shift, vertical_shift)), transforms.ToTensor(), transforms.Normalize(ADP_MEAN, ADP_STD) ]) if args.color_aug: ColorAugmentation = ColorDistortion(args.color_distortion) train_transform.transforms.insert(3, ColorAugmentation) if args.image_size != 272: train_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) # valid transform valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(ADP_MEAN, ADP_STD) ]) if args.image_size != 272: valid_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) return train_transform, valid_transform # for BCSS dataset def _data_transforms_bcss(args): BCSS_MEAN = [0.7107, 0.4878, 0.6726] BCSS_STD = [0.1788, 0.2152, 0.1615] degrees = 45 horizontal_shift, vertical_shift = 0.1, 0.1 # train transform train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomAffine(degrees=degrees, translate=( horizontal_shift, vertical_shift)), transforms.ToTensor(), transforms.Normalize(BCSS_MEAN, BCSS_STD) ]) if args.color_aug: ColorAugmentation = ColorDistortion(args.color_distortion) train_transform.transforms.insert(3, ColorAugmentation) if args.image_size != 272: train_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) # valid transform valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(BCSS_MEAN, BCSS_STD) ]) if args.image_size != 272: valid_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) return train_transform, valid_transform # for CRC dataset def _data_transforms_crc(args): CRC_MEAN = [0.6976, 0.5340, 0.6687] CRC_STD = [0.2272, 0.2697, 0.2247] degrees = 45 horizontal_shift, vertical_shift = 0.1, 0.1 # train transform train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomAffine(degrees=degrees, translate=( horizontal_shift, vertical_shift)), transforms.ToTensor(), transforms.Normalize(CRC_MEAN, CRC_STD) ]) if args.color_aug: ColorAugmentation = ColorDistortion(args.color_distortion) train_transform.transforms.insert(3, ColorAugmentation) if args.image_size != 272: train_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) # valid transform valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(CRC_MEAN, CRC_STD) ]) if args.image_size != 272: valid_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) return train_transform, valid_transform # for BACH dataset def _data_transforms_bach(args): BACH_MEAN = [0.6880, 0.5881, 0.8209] BACH_STD = [0.1632, 0.1841, 0.1175] degrees = 45 horizontal_shift, vertical_shift = 0.1, 0.1 # train transform train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomAffine(degrees=degrees, translate=( horizontal_shift, vertical_shift)), transforms.ToTensor(), transforms.Normalize(BACH_MEAN, BACH_STD) ]) if args.color_aug: ColorAugmentation = ColorDistortion(args.color_distortion) train_transform.transforms.insert(3, ColorAugmentation) if args.image_size != 272: train_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) # valid transform valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(BACH_MEAN, BACH_STD) ]) if args.image_size != 272: valid_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) return train_transform, valid_transform # for OS dataset def _data_transforms_os(args): OS_MEAN = [0.8414, 0.6492, 0.7377] OS_STD = [0.1379, 0.2508, 0.1979] degrees = 45 horizontal_shift, vertical_shift = 0.1, 0.1 # train transform train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomAffine(degrees=degrees, translate=( horizontal_shift, vertical_shift)), transforms.ToTensor(), transforms.Normalize(OS_MEAN, OS_STD) ]) if args.color_aug: ColorAugmentation = ColorDistortion(args.color_distortion) train_transform.transforms.insert(3, ColorAugmentation) if args.image_size != 272: train_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) # valid transform valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(OS_MEAN, OS_STD) ]) if args.image_size != 272: valid_transform.transforms.insert(0, transforms.Resize( (args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC)) return train_transform, valid_transform # for ADP dataset class ADP_dataset(Dataset): db_name = 'ADP V1.0 Release' ROI = 'img_res_1um_bicubic' csv_file = 'ADP_EncodedLabels_Release1_Flat.csv' def __init__(self, level, transform, root, split='train', portion=0.5, loader=default_loader): ''' Args: level (str): a string corresponding to a dict defined in "ADP_scripts\classes\classesADP.py" defines the hierarchy to be trained on transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` root (string): Root directory of the ImageNet Dataset. split (string, optional): The dataset split, supports ``train``, ``valid``, or ``test``. loader (callable, optional): A function to load an image given its path. Defaults to default_loader defined in torchvision Attributes: self.full_image_paths (list) : a list of image paths self.class_labels (np.ndarray) : a numpy array of class labels (num_samples, num_classes) ''' self.root = root self.split = verify_str_arg( split, "split", ("train", "valid", "test", "train_search", "valid_search")) self.transform = transform self.loader = loader self.portion = portion # getting paths: csv_file_path = os.path.join(self.root, self.db_name, self.csv_file) # reads data and returns a pd.dataframe ADP_data = pd.read_csv(filepath_or_buffer=csv_file_path, header=0) # rows are integers starting from 0, columns are strings: e.g. "Patch Names", "E", ... split_folder = os.path.join(self.root, self.db_name, 'splits') if self.split == "train": train_inds = np.load(os.path.join(split_folder, 'train.npy')) out_df = ADP_data.loc[train_inds, :] elif self.split == "valid": valid_inds = np.load(os.path.join(split_folder, 'valid.npy')) out_df = ADP_data.loc[valid_inds, :] elif self.split == "test": test_inds = np.load(os.path.join(split_folder, 'test.npy')) out_df = ADP_data.loc[test_inds, :] # for darts search elif self.split == "train_search": train_inds = np.load(os.path.join(split_folder, 'train.npy')) train_search_inds = train_inds[: int( np.floor(self.portion * len(train_inds)))] out_df = ADP_data.loc[train_search_inds, :] elif self.split == "valid_search": train_inds = np.load(os.path.join(split_folder, 'train.npy')) valid_search_inds = train_inds[int( np.floor(self.portion * len(train_inds))):] out_df = ADP_data.loc[valid_search_inds, :] self.full_image_paths = [os.path.join( self.root, self.db_name, self.ROI, image_name) for image_name in out_df['Patch Names']] self.class_labels = out_df[classesADP[level] ['classesNames']].to_numpy(dtype=np.float32) def __getitem__(self, idx) -> torch.Tensor: path = self.full_image_paths[idx] label = self.class_labels[idx] sample = self.loader(path) # Loading image if self.transform is not None: # PyTorch implementation sample = self.transform(sample) return sample, torch.tensor(label) def __len__(self) -> int: return(len(self.full_image_paths)) # for BCSS dataset class BCSSDataset(Dataset): db_name = 'BCSS_transformed' def __init__(self, root, split="train", transform=None, loader=default_loader, multi_labelled=True) -> None: """ Retrieved from: https://bcsegmentation.grand-challenge.org/ Args: root (string): Directory of the transformed dataset, e.g. "/home/BCSS_transformed" split (string, optional): The dataset split, supports ``train``, ``valid``, or ``test``. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` loader (callable, optional): A function to load an image given its path. Defaults to default_loader defined in torchvision multi_labelled (bool): a boolean controlling whether the output labels are a multilabelled array or an index corresponding to the single label """ self.root = root self.split = verify_str_arg(split, "split", ("train", "valid", "test")) self.transform = transform self.loader = loader # getting samples from preprocessed pickle file if multi_labelled: df = pd.read_csv(os.path.join( self.root, self.db_name, self.split + ".csv"), index_col="image") else: df = pd.read_csv(os.path.join( self.root, self.db_name, self.split + "_with_norm_mass.csv"), index_col="image") self.samples = [(image.replace('\\', '/'), label) for image, label in zip(df.index, df.to_records(index=False))] if multi_labelled: self.samples = [(os.path.join(self.root, self.db_name, path), list( label)) for path, label in self.samples] else: self.samples = [(os.path.join(self.root, self.db_name, path), np.argmax( list(label))) for path, label in self.samples] self.class_to_idx = {cls: idx for idx, cls in enumerate(df.columns)} self.class_labels = df.to_numpy(dtype=np.float32) def __getitem__(self, idx) -> [Any, torch.Tensor]: path, label = self.samples[idx] sample = self.loader(path) # Loading image if self.transform is not None: # PyTorch implementation sample = self.transform(sample) return sample, torch.tensor(label, dtype=torch.int64) def __len__(self) -> int: return len(self.samples) # for CRC dataset class CRC_transformed(Dataset): db_name = 'CRC_transformed' def __init__(self, root, split="train", transform=None, loader=default_loader) -> None: """ Args: root (string): Directory of the transformed dataset, e.g. /home/CRC_transformed split (string, optional): The dataset split, supports ``train``, ``valid``, or ``test``. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` loader (callable, optional): A function to load an image given its path. Defaults to default_loader defined in torchvision """ self.root = root self.split = verify_str_arg(split, "split", ("train", "valid", "test")) self.transform = transform self.loader = loader # getting samples from preprocessed pickle file self.samples = pickle.load( open(os.path.join(self.root, self.db_name, self.split+".pickle"), "rb")) self.samples = [(os.path.join(self.root, self.db_name, path), label) for path, label in self.samples] self.class_to_idx = pickle.load( open(os.path.join(self.root, self.db_name, "class_to_idx.pickle"), "rb")) def __getitem__(self, idx) -> [Any, torch.Tensor]: path, label = self.samples[idx] sample = self.loader(path) # Loading image if self.transform is not None: # PyTorch implementation sample = self.transform(sample) return sample, torch.tensor(label) def __len__(self) -> int: return len(self.samples) # for BACH dataset class BACH_transformed(Dataset): db_name = 'BACH_transformed' def __init__(self, root, split="train", transform=None, loader=default_loader) -> None: """ Args: root (string): Directory of the transformed dataset, e.g. /home/BACH_transformed split (string, optional): The dataset split, supports ``train``, ``valid``, or ``test``. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` loader (callable, optional): A function to load an image given its path. Defaults to default_loader defined in torchvision """ self.root = root self.split = verify_str_arg(split, "split", ("train", "valid", "test")) self.transform = transform self.loader = loader # getting samples from preprocessed pickle file self.samples = pickle.load( open(os.path.join(self.root, self.db_name, self.split+".pickle"), "rb")) self.samples = [(os.path.join(self.root, self.db_name, path), label) for path, label in self.samples] self.class_to_idx = pickle.load( open(os.path.join(self.root, self.db_name, "class_to_idx.pickle"), "rb")) def __getitem__(self, idx) -> [Any, torch.Tensor]: path, label = self.samples[idx] sample = self.loader(path) # Loading image if self.transform is not None: # PyTorch implementation sample = self.transform(sample) return sample, torch.tensor(label) def __len__(self) -> int: return len(self.samples) # for OS dataset class OS_transformed(Dataset): db_name = 'OS_transformed' def __init__(self, root, split="train", transform=None, loader=default_loader) -> None: """ Args: root (string): Directory of the transformed dataset, e.g. /home/OS_transformed split (string, optional): The dataset split, supports ``train``, ``valid``, or ``test``. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` loader (callable, optional): A function to load an image given its path. Defaults to default_loader defined in torchvision """ self.root = root self.split = verify_str_arg(split, "split", ("train", "valid", "test")) self.transform = transform self.loader = loader # getting samples from preprocessed pickle file self.samples = pickle.load( open(os.path.join(self.root, self.db_name, self.split+".pickle"), "rb")) self.samples = [(os.path.join(self.root, self.db_name, path), label) for path, label in self.samples] self.class_to_idx = pickle.load( open(os.path.join(self.root, self.db_name, "class_to_idx.pickle"), "rb")) def __getitem__(self, idx) -> [Any, torch.Tensor]: path, label = self.samples[idx] sample = self.loader(path) # Loading image if self.transform is not None: # PyTorch implementation sample = self.transform(sample) return sample, torch.tensor(label) def __len__(self) -> int: return len(self.samples) def count_parameters_in_MB(model): return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name) / 1e6 def save_checkpoint(state, is_best, save): filename = os.path.join(save, 'checkpoint.pth.tar') torch.save(state, filename) if is_best: best_filename = os.path.join(save, 'model_best.pth.tar') shutil.copyfile(filename, best_filename) def save(model, model_path): torch.save(model.state_dict(), model_path) def load(model, model_path): model.load_state_dict(torch.load(model_path)) def drop_path(x, drop_prob): if drop_prob > 0.: keep_prob = 1. - drop_prob mask = Variable(torch.cuda.FloatTensor( x.size(0), 1, 1, 1).bernoulli_(keep_prob)) x.div_(keep_prob) x.mul_(mask) return x def create_exp_dir(path, scripts_to_save=None): if not os.path.exists(path): os.mkdir(path) print('Experiment dir : {}'.format(path)) if scripts_to_save is not None: os.mkdir(os.path.join(path, 'scripts')) for script in scripts_to_save: dst_file = os.path.join(path, 'scripts', os.path.basename(script)) shutil.copyfile(script, dst_file) def get_channel_size(path, model): f_cell = os.path.join(path, 'cell_info.txt') with open(f_cell, 'a') as fh: for i, cell in enumerate(model.cells): fh.write(f"{i} Cell Info: {cell}") fh.write(f"----------------------\n Intermidery Tensors ----------------------") #for index,op in enumerate(cell.ops): f_layer = os.path.join(path, 'layer_info.txt') cell_mem = np.zeros(len(model.cells)) cell_name_pat = r"cells\.([0-9]+)\..*" for name, v in model.named_parameters(): m = re.match(cell_name_pat, name) print(f"match {m}") if m is not None: cell_id = int(m.group(1)) print(f"cell_id {cell_id}") cell_mem[cell_id] += np.prod(v.size())/1e6 with open(f_layer, 'a') as fh: for i in range(0, len(model.cells)): fh.write(f"Cell{i} mem_size: {cell_mem[i]} \n") # # fh.write(f"param name:{name} shape:{v.size()} mem:{np.prod(v.size())/1e6}")
25,313
33.161943
111
py
PIBConv
PIBConv-main/cnn/model.py
import torch import torch.nn as nn from operations import * from torch.autograd import Variable from utils import drop_path class Cell(nn.Module): def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev): super(Cell, self).__init__() print(C_prev_prev, C_prev, C) if reduction_prev: self.preprocess0 = FactorizedReduce(C_prev_prev, C) else: self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0) self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0) if reduction: op_names, indices = zip(*genotype.reduce) concat = genotype.reduce_concat else: op_names, indices = zip(*genotype.normal) concat = genotype.normal_concat self._compile(C, op_names, indices, concat, reduction) def _compile(self, C, op_names, indices, concat, reduction): assert len(op_names) == len(indices) self._steps = len(op_names) // 2 self._concat = concat self.multiplier = len(concat) self._ops = nn.ModuleList() for name, index in zip(op_names, indices): stride = 2 if reduction and index < 2 else 1 op = OPS[name](C, stride, True) self._ops += [op] self._indices = indices def forward(self, s0, s1, drop_prob): s0 = self.preprocess0(s0) s1 = self.preprocess1(s1) states = [s0, s1] for i in range(self._steps): h1 = states[self._indices[2*i]] h2 = states[self._indices[2*i+1]] op1 = self._ops[2*i] op2 = self._ops[2*i+1] h1 = op1(h1) h2 = op2(h2) if self.training and drop_prob > 0.: if not isinstance(op1, Identity): h1 = drop_path(h1, drop_prob) if not isinstance(op2, Identity): h2 = drop_path(h2, drop_prob) s = h1 + h2 states += [s] return torch.cat([states[i] for i in self._concat], dim=1) class AuxiliaryHeadCIFAR(nn.Module): def __init__(self, C, num_classes): """assuming input size 8x8""" super(AuxiliaryHeadCIFAR, self).__init__() self.features = nn.Sequential( nn.ReLU(inplace=True), # image size = 2 x 2 nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True) ) self.classifier = nn.Linear(768, num_classes) def forward(self, x): # print('before',x.shape) x = self.features(x) # print('after',x.shape) x = self.classifier(x.view(x.size(0), -1)) return x class AuxiliaryHeadImageNet(nn.Module): def __init__(self, C, num_classes): """assuming input size 14x14""" super(AuxiliaryHeadImageNet, self).__init__() self.features = nn.Sequential( nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), # NOTE: This batchnorm was omitted in my earlier implementation due to a typo. # Commenting it out for consistency with the experiments in the paper. # nn.BatchNorm2d(768), nn.ReLU(inplace=True) ) self.classifier = nn.Linear(768, num_classes) def forward(self, x): x = self.features(x) x = self.classifier(x.view(x.size(0), -1)) return x class AuxiliaryHeadADP(nn.Module): def __init__(self, C, num_classes): """assuming input size 17x17""" super(AuxiliaryHeadADP, self).__init__() self.features = nn.Sequential( nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(2), # image size = 2 x 2 nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True) ) self.classifier = nn.Linear(768, num_classes) def forward(self, x): # print('before',x.shape) x = self.features(x) # print('after',x.shape) x = self.classifier(x.view(x.size(0), -1)) return x class NetworkCIFAR(nn.Module): def __init__(self, C, num_classes, layers, auxiliary, genotype): super(NetworkCIFAR, self).__init__() self._layers = layers self._auxiliary = auxiliary stem_multiplier = 3 C_curr = stem_multiplier*C self.stem = nn.Sequential( nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr) ) C_prev_prev, C_prev, C_curr = C_curr, C_curr, C self.cells = nn.ModuleList() reduction_prev = False for i in range(layers): if i in [layers//3, 2*layers//3]: C_curr *= 2 reduction = True else: reduction = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) reduction_prev = reduction self.cells += [cell] C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr if i == 2*layers//3: C_to_auxiliary = C_prev if auxiliary: self.auxiliary_head = AuxiliaryHeadCIFAR( C_to_auxiliary, num_classes) self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C_prev, num_classes) def forward(self, input): logits_aux = None s0 = s1 = self.stem(input) for i, cell in enumerate(self.cells): s0, s1 = s1, cell(s0, s1, self.drop_path_prob) if i == 2*self._layers//3: if self._auxiliary and self.training: logits_aux = self.auxiliary_head(s1) out = self.global_pooling(s1) logits = self.classifier(out.view(out.size(0), -1)) return logits, logits_aux class NetworkImageNet(nn.Module): def __init__(self, C, num_classes, layers, auxiliary, genotype): super(NetworkImageNet, self).__init__() self._layers = layers self._auxiliary = auxiliary self.stem0 = nn.Sequential( nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C // 2), nn.ReLU(inplace=True), nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) self.stem1 = nn.Sequential( nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) C_prev_prev, C_prev, C_curr = C, C, C self.cells = nn.ModuleList() reduction_prev = True for i in range(layers): if i in [layers // 3, 2 * layers // 3]: C_curr *= 2 reduction = True else: reduction = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) reduction_prev = reduction self.cells += [cell] C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr if i == 2 * layers // 3: C_to_auxiliary = C_prev if auxiliary: self.auxiliary_head = AuxiliaryHeadImageNet( C_to_auxiliary, num_classes) self.global_pooling = nn.AvgPool2d(7) self.classifier = nn.Linear(C_prev, num_classes) def forward(self, input): logits_aux = None s0 = self.stem0(input) s1 = self.stem1(s0) for i, cell in enumerate(self.cells): s0, s1 = s1, cell(s0, s1, self.drop_path_prob) if i == 2 * self._layers // 3: if self._auxiliary and self.training: logits_aux = self.auxiliary_head(s1) out = self.global_pooling(s1) logits = self.classifier(out.view(out.size(0), -1)) return logits, logits_aux class NetworkADP(nn.Module): def __init__(self, C, num_classes, layers, auxiliary, genotype): super(NetworkADP, self).__init__() self._layers = layers self._auxiliary = auxiliary self.stem = nn.Sequential( nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C // 2), nn.ReLU(inplace=True), nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) C_prev_prev, C_prev, C_curr = C, C, C self.cells = nn.ModuleList() reduction_prev = False for i in range(layers): if i in [layers//3, 2*layers//3]: C_curr *= 2 reduction = True else: reduction = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) reduction_prev = reduction self.cells += [cell] C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr if i == 2*layers//3: C_to_auxiliary = C_prev if auxiliary: self.auxiliary_head = AuxiliaryHeadADP(C_to_auxiliary, num_classes) self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C_prev, num_classes) def forward(self, input): logits_aux = None s0 = s1 = self.stem(input) for i, cell in enumerate(self.cells): s0, s1 = s1, cell(s0, s1, self.drop_path_prob) if i == 2*self._layers//3: if self._auxiliary and self.training: logits_aux = self.auxiliary_head(s1) out = self.global_pooling(s1) logits = self.classifier(out.view(out.size(0), -1)) return logits, logits_aux
10,318
33.627517
90
py
PIBConv
PIBConv-main/cnn/components.py
""" """ from typing import NamedTuple, List from dataclasses import dataclass from enum import Enum class LayerType(Enum): CONV = 1 FC = 2 NON_CONV = 3 @dataclass class LayerMetrics: rank: float KG: float condition: float @dataclass class ConvLayerMetrics: input_channel: LayerMetrics output_channel: LayerMetrics class LRMetrics(NamedTuple): rank_velocity: List[float] r_conv: List[float]
438
13.633333
35
py
PIBConv
PIBConv-main/cnn/model_search.py
import torch import torch.nn as nn import torch.nn.functional as F from operations import * from torch.autograd import Variable from genotypes import PRIMITIVES from genotypes import Genotype class MixedOp(nn.Module): def __init__(self, C, stride, learnable_bn): super(MixedOp, self).__init__() self._ops = nn.ModuleList() for primitive in PRIMITIVES: op = OPS[primitive](C, stride, learnable_bn) self._ops.append(op) def forward(self, x, weights, index=None, gumbel=False): if gumbel: return self._ops[index](x) * weights[index] else: return sum(w * op(x) for w, op in zip(weights, self._ops)) class Cell(nn.Module): def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, learnable_bn): super(Cell, self).__init__() # print(C_prev_prev, C_prev, C) self.reduction = reduction if reduction_prev: self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False) else: self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False) self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False) self._steps = steps self._multiplier = multiplier self._ops = nn.ModuleList() self._bns = nn.ModuleList() for i in range(self._steps): for j in range(2 + i): stride = 2 if reduction and j < 2 else 1 op = MixedOp(C, stride, learnable_bn) self._ops.append(op) def forward(self, s0, s1, weights, index=None, gumbel=False): s0 = self.preprocess0(s0) s1 = self.preprocess1(s1) states = [s0, s1] offset = 0 for i in range(self._steps): if gumbel: s = sum(self._ops[offset + j](h, weights[offset + j], index[offset + j], gumbel) for j, h in enumerate(states)) else: s = sum(self._ops[offset + j](h, weights[offset + j]) for j, h in enumerate(states)) offset += len(states) states.append(s) return torch.cat(states[-self._multiplier:], dim=1) class Network(nn.Module): def __init__(self, C, num_classes, layers, criterion, learnable_bn=False, steps=4, multiplier=4, stem_multiplier=3): super(Network, self).__init__() self._C = C self._num_classes = num_classes self._layers = layers self._criterion = criterion self._steps = steps self._multiplier = multiplier self._learnable_bn = learnable_bn C_curr = stem_multiplier * C self.stem = nn.Sequential( # nn.Conv2d(3, C_curr, 3, padding=1, bias=False), # nn.BatchNorm2d(C_curr) nn.Conv2d(3, C_curr, 4, stride=4,padding=1, bias=False), LayerNorm(C_curr, data_format="channels_first") ) C_prev_prev, C_prev, C_curr = C_curr, C_curr, C self.cells = nn.ModuleList() reduction_prev = False for i in range(layers): if i in [layers // 3, 2 * layers // 3]: C_curr *= 2 reduction = True else: reduction = False cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, learnable_bn) reduction_prev = reduction self.cells += [cell] C_prev_prev, C_prev = C_prev, multiplier * C_curr self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C_prev, num_classes) self._initialize_alphas() self.tau = 5 def new(self): model_new = Network(self._C, self._num_classes, self._layers, self._criterion, self._learnable_bn, self._steps, self._multiplier).cuda() for x, y in zip(model_new.arch_parameters(), self.arch_parameters()): x.data.copy_(y.data) return model_new def set_tau(self, tau): self.tau = tau def get_tau(self): return self.tau def forward(self, input, gumbel=False): def get_gumbel_prob(xins): while True: gumbels = -torch.empty_like(xins).exponential_().log() logits = (xins.log_softmax(dim=1) + gumbels) / self.tau probs = nn.functional.softmax(logits, dim=1) index = probs.max(-1, keepdim=True)[1] one_h = torch.zeros_like(logits).scatter_(-1, index, 1.0) hardwts = one_h - probs.detach() + probs if (torch.isinf(gumbels).any()) or (torch.isinf(probs).any()) or (torch.isnan(probs).any()): continue else: break return hardwts, index normal_hardwts, normal_index = get_gumbel_prob(self.alphas_normal) reduce_hardwts, reduce_index = get_gumbel_prob(self.alphas_reduce) s0 = s1 = self.stem(input) for i, cell in enumerate(self.cells): index = None if cell.reduction: if gumbel: weights, index = reduce_hardwts, reduce_index else: weights = F.softmax(self.alphas_reduce, dim=-1) else: if gumbel: weights, index = normal_hardwts, normal_index else: weights = F.softmax(self.alphas_normal, dim=-1) s0, s1 = s1, cell(s0, s1, weights, index, gumbel) out = self.global_pooling(s1) logits = self.classifier(out.view(out.size(0), -1)) return logits def _loss(self, input, target, gumbel=False): logits = self(input, gumbel) return self._criterion(logits, target) def _initialize_alphas(self): k = sum(1 for i in range(self._steps) for n in range(2 + i)) num_ops = len(PRIMITIVES) self.alphas_normal = nn.Parameter(1e-3 * torch.randn(k, num_ops)) self.alphas_reduce = nn.Parameter(1e-3 * torch.randn(k, num_ops)) self._arch_parameters = [ self.alphas_normal, self.alphas_reduce, ] def arch_parameters(self): return self._arch_parameters def genotype(self): def _parse(weights): gene = [] n = 2 start = 0 for i in range(self._steps): end = start + n W = weights[start:end].copy() edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[ :2] for j in edges: k_best = None for k in range(len(W[j])): if k != PRIMITIVES.index('none'): if k_best is None or W[j][k] > W[j][k_best]: k_best = k gene.append((PRIMITIVES[k_best], j)) start = end n += 1 return gene gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy()) gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy()) concat = range(2 + self._steps - self._multiplier, self._steps + 2) genotype = Genotype( normal=gene_normal, normal_concat=concat, reduce=gene_reduce, reduce_concat=concat ) return genotype def test(): net = Network(C=16, num_classes=10, layers=3, criterion=nn.CrossEntropyLoss(), learnable_bn=False, steps=2, multiplier=2, stem_multiplier=3) print(net) test()
7,707
36.057692
144
py
PIBConv
PIBConv-main/cnn/test_cifar.py
import os import sys import glob import numpy as np import pandas as pd import torch import utils import logging import argparse import torch.nn as nn import genotypes import torch.utils import torchvision.datasets as dset import torch.backends.cudnn as cudnn from torch.autograd import Variable from model import NetworkCIFAR as Network parser = argparse.ArgumentParser("cifar") parser.add_argument('--data', type=str, default='../data', help='location of the data corpus') parser.add_argument('--batch_size', type=int, default=96, help='batch size') parser.add_argument('--report_freq', type=float, default=50, help='report frequency') parser.add_argument('--gpu', type=int, default=0, help='gpu device id') parser.add_argument('--init_channels', type=int, default=36, help='num of init channels') parser.add_argument('--layers', type=int, default=20, help='total number of layers') parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model') parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower') parser.add_argument('--cutout', action='store_true', default=False, help='use cutout') parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability') parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use') parser.add_argument('--cifar100', action='store_true', default=False, help='search with cifar100 dataset') args = parser.parse_args() log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') if args.cifar100: CIFAR_CLASSES = 100 data_folder = 'cifar-100-python' else: CIFAR_CLASSES = 10 data_folder = 'cifar-10-batches-py' def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) logging.info('genotype = %s', genotype) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() if args.cifar100: _, test_transform = utils._data_transforms_cifar100(args) else: _, test_transform = utils._data_transforms_cifar10(args) if args.cifar100: test_data = dset.CIFAR100( root=args.data, train=False, download=True, transform=test_transform) else: test_data = dset.CIFAR10( root=args.data, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc1, test_acc5, test_obj = infer(test_queue, model, criterion) logging.info('test_top1_acc %f, test_top5_acc %f', test_acc1, test_acc5) def infer(test_queue, model, criterion): objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() model.eval() with torch.no_grad(): for step, (input, target) in enumerate(test_queue): input = input.cuda() target = target.cuda(non_blocking=True) logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.item(), n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) if step % args.report_freq == 0: logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, top5.avg, objs.avg if __name__ == '__main__': main()
4,431
34.456
106
py
PIBConv
PIBConv-main/cnn/plot_alpha.py
from venv import create import matplotlib.pyplot as plt import argparse import os import pandas as pd import math import re import numpy as np import glob N_CELLS = 16 N_EDGES = 14 def create_cell_name_dict(path): gpaths = glob.glob(os.path.join(path, 'weights_stat*.xlsx')) print(gpaths) fname = gpaths[0] print(fname) print(type(fname)) df = pd.read_excel(fname, sheet_name=0) cell_name = df.columns.str.extract(r'(.*)\_epoch0',expand=False).dropna() return cell_name def read_weights(path, n_cells): gpaths = glob.glob(os.path.join(path, 'weights_stat*.xlsx')) fname = gpaths[0] print(fname) print(type(fname)) df = pd.read_excel(fname, sheet_name=0) ncols = df.shape[1] # exclude the first column #3D list: alphas[cell_id][edge_id] alphas = [] # cells trained in each epoch for cell in range(n_cells): # epoch cell_weight = [] for col in range(cell+1,ncols,n_cells): edges_weight = [] # edge weight for one cell for row in range(N_EDGES): edges_weight.append(df.iloc[row, col]) cell_weight.append(edges_weight) alphas.append(cell_weight) return alphas def plot_weights(path, cell_edges_weight, cell_id, cell_name): epochs = len(cell_edges_weight) edges = len(cell_edges_weight[0]) cell_edges_weight = np.asarray(cell_edges_weight) for edge in range(edges): if edge == 0: print("=======") print(edge) print(type(cell_edges_weight)) print(np.asarray(cell_edges_weight).shape) plt.plot(cell_edges_weight[:, edge], label=f'edge{edge}') plt.legend(loc="upper left") fname = os.path.join(path, f'cell{cell_id}.png') print(cell_id) curr_node = cell_name[int(cell_id)] plt.title(f'Alpha for Node {curr_node}') plt.ylabel('Architecture Weight') plt.xlabel('Epoch') plt.savefig(f'{fname}') plt.clf() if __name__ == '__main__': parser = argparse.ArgumentParser(description='argparser') parser.add_argument('-path', action='store', help='e.g. ../save_data#') args = parser.parse_args() folder_name = args.path cell_name = np.array(create_cell_name_dict(folder_name)) alphas = read_weights(folder_name, n_cells=len(cell_name)) # print(cell_name[2]) # print(len(alphas)) # num of cells per epoch # print(len(alphas[0])) # num of epochs # print(len(alphas[0][0])) # num of edges per cell alphas = np.array(alphas, dtype=object) num_cells = len(alphas) num_epochs = len(alphas[0]) num_edges = len(alphas[0][0]) for ncell in range(num_cells): plot_weights(folder_name, alphas[ncell][:][:], ncell, cell_name)
2,831
26.764706
77
py
PIBConv
PIBConv-main/cnn/test_imagenet.py
import os import sys import numpy as np import torch import utils import glob import random import logging import argparse import torch.nn as nn import genotypes import torch.utils import torchvision.datasets as dset import torchvision.transforms as transforms import torch.backends.cudnn as cudnn from torch.autograd import Variable from model import NetworkImageNet as Network parser = argparse.ArgumentParser("imagenet") parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus') parser.add_argument('--batch_size', type=int, default=128, help='batch size') parser.add_argument('--report_freq', type=float, default=100, help='report frequency') parser.add_argument('--gpu', type=int, default=0, help='gpu device id') parser.add_argument('--init_channels', type=int, default=48, help='num of init channels') parser.add_argument('--layers', type=int, default=14, help='total number of layers') parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model') parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower') parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability') parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use') args = parser.parse_args() log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') CLASSES = 1000 def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() model.load_state_dict(torch.load(args.model_path)['state_dict']) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) model.drop_path_prob = args.drop_path_prob valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) def infer(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() for step, (input, target) in enumerate(valid_queue): input = Variable(input, volatile=True).cuda() target = Variable(target, volatile=True).cuda(async=True) logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) if step % args.report_freq == 0: logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, top5.avg, objs.avg if __name__ == '__main__': main()
4,020
33.663793
104
py
PIBConv
PIBConv-main/cnn/plot_training.py
import matplotlib.pyplot as plt import pickle import glob import os def plot(error_dict, dir_path): f = open(f"{dir_path}/loss_accuracies.txt", "w") num_epochs = len(error_dict['train_acc_1']) iters = list(range(num_epochs)) # Loss plt.figure() plt.title("Training Curve - Loss") plt.plot(iters, error_dict['train_loss'], label = "Training") plt.plot(iters, error_dict['valid_loss'], label = "Validation") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend(loc='best') plt.savefig(f"{dir_path}/loss.png") f.write("Final Training Loss: {}\n".format(error_dict['train_loss'][-1])) f.write("Final Validation Loss: {}\n".format(error_dict['valid_loss'][-1])) f.write("\n") # Accuracy plt.figure() plt.title("Training Curve - Accuracy") plt.plot(iters, error_dict['train_acc_1'], label = "Top 1 - Training") plt.plot(iters, error_dict['valid_acc_1'], label = "Top 1 - Validation") plt.plot(iters, error_dict['train_acc_5'], label = "Top 5 - Training") plt.plot(iters, error_dict['valid_acc_5'], label = "Top 5 - Validation") plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.legend(loc='best') plt.savefig(f"{dir_path}/accuracy.png") f.write("Final Training Accuracy (top 1): {}\n".format(error_dict['train_acc_1'][-1])) f.write("Final Validation Accuracy (top 1): {}\n".format(error_dict['valid_acc_1'][-1])) f.write("Final Training Accuracy (top 5): {}\n".format(error_dict['train_acc_5'][-1])) f.write("Final Validation Accuracy (top 5): {}\n".format(error_dict['valid_acc_5'][-1])) f.close() if __name__ == '__main__': save_paths = [f for f in glob.glob("github/ConvSearch/save_data*")] for path in save_paths: error_dict_path = path + "/errors_dict.pkl" if os.path.exists(error_dict_path): print("Writing to:", path) error_dict = {} with open(error_dict_path, 'rb') as f: error_dict = pickle.load(f) plot(error_dict, path)
2,105
33.52459
92
py
PIBConv
PIBConv-main/cnn/train_cifar.py
import os import sys import time import glob import numpy as np import torch import utils import logging import argparse import torch.nn as nn import genotypes import torch.utils import torchvision.datasets as dset import torch.backends.cudnn as cudnn from torch.autograd import Variable from model import NetworkCIFAR as Network parser = argparse.ArgumentParser("cifar") #################### # Model details parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use') #be careful with this. parser.add_argument('--layers', type=int, default=20, help='total number of layers') parser.add_argument('--init_channels', type=int, default=36, help='num of init channels') #################### # Training details parser.add_argument('--gpu', type=int, default=0, help='gpu device id') parser.add_argument('--batch_size', type=int, default=96, help='batch size') parser.add_argument('--epochs', type=int, default=600, help='num of training epochs') parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate') parser.add_argument('--momentum', type=float, default=0.9, help='momentum') parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay') parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower') parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss') parser.add_argument('--cutout', action='store_true', default=False, help='use cutout') parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping') #################### # Datasets parser.add_argument('--data', type=str, default='../data', help='location of the data corpus') parser.add_argument('--cifar100', action='store_true', default=False, help='whether to search with cifar100 dataset') #################### # Others parser.add_argument('--report_freq', type=float, default=50, help='report frequency') parser.add_argument('--save', type=str, default='EXP', help='experiment name') parser.add_argument('--seed', type=int, default=0, help='random seed') args = parser.parse_args() ## LOUIS CHANGED## args.save = 'Eval-{}-arch-{}-{}'.format(args.save, args.arch, time.strftime("%Y%m%d-%H%M%S")) #args.save = 'Eval-{}-data-{}-arch-{}-{}'.format(args.save, args.dataset, args.arch, time.strftime("%Y%m%d-%H%M%S")) utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py')) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') fh = logging.FileHandler(os.path.join(args.save, 'log.txt')) fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) if args.cifar100: CIFAR_CLASSES = 100 data_folder = 'cifar-100-python' else: CIFAR_CLASSES = 10 data_folder = 'cifar-10-batches-py' def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) logging.info('genotype = %s', genotype) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) # utils.get_channel_size(args.save, model) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) if args.cifar100: train_transform, valid_transform = utils._data_transforms_cifar100(args) else: train_transform, valid_transform = utils._data_transforms_cifar10(args) if args.cifar100: train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform) else: train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) valid_acc_max = -1000 for epoch in range(args.epochs): logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) scheduler.step() valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) if (valid_acc > valid_acc_max): utils.save(model, os.path.join(args.save, 'weights.pt')) valid_acc_max = valid_acc logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) def train(train_queue, model, criterion, optimizer): objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = Variable(input).cuda() target = Variable(target).cuda(non_blocking=True) optimizer.zero_grad() logits, logits_aux = model(input) loss = criterion(logits, target) if args.auxiliary: loss_aux = criterion(logits_aux, target) loss += args.auxiliary_weight * loss_aux loss.backward() nn.utils.clip_grad_norm(model.parameters(), args.grad_clip) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.item(), n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) if step % args.report_freq == 0: logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg def infer(valid_queue, model, criterion): objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() model.eval() with torch.no_grad(): for step, (input, target) in enumerate(valid_queue): input = Variable(input, volatile=True).cuda() target = Variable(target, volatile=True).cuda(non_blocking=True) logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.item(), n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) if step % args.report_freq == 0: logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg if __name__ == '__main__': main()
7,720
37.412935
117
py
PIBConv
PIBConv-main/cnn/visualize.py
import sys import os import genotypes from graphviz import Digraph os.environ["PATH"] += os.pathsep + '/fs2/comm/kpgrp/mhosseini/venvs/venv-csearch/lib/python3.9/site-packages/graphviz' print(os.environ["PATH"]) def plot(genotype, filename): g = Digraph( format='pdf', edge_attr=dict(fontsize='20', fontname="times"), node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"), engine='dot') g.body.extend(['rankdir=LR']) g.node("c_{k-2}", fillcolor='darkseagreen2') g.node("c_{k-1}", fillcolor='darkseagreen2') assert len(genotype) % 2 == 0 steps = len(genotype) // 2 for i in range(steps): g.node(str(i), fillcolor='lightblue') for i in range(steps): for k in [2*i, 2*i + 1]: op, j = genotype[k] if j == 0: u = "c_{k-2}" elif j == 1: u = "c_{k-1}" else: u = str(j-2) v = str(i) g.edge(u, v, label=op, fillcolor="gray") g.node("c_{k}", fillcolor='palegoldenrod') for i in range(steps): g.edge(str(i), "c_{k}", fillcolor="gray") g.render(filename, view=True) if __name__ == '__main__': if len(sys.argv) != 2: print("usage:\n python {} ARCH_NAME".format(sys.argv[0])) sys.exit(1) genotype_name = sys.argv[1] try: genotype = eval('genotypes.{}'.format(genotype_name)) except AttributeError: print("{} is not specified in genotypes.py".format(genotype_name)) sys.exit(1) plot(genotype.normal, "normal") plot(genotype.reduce, "reduction")
1,729
28.827586
118
py
PIBConv
PIBConv-main/cnn/genotypes.py
from collections import namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') PRIMITIVES = [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'pib_conv_3x3', 'pib_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet = Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect', 1), ], normal_concat=[2, 3, 4, 5, 6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1), ], reduce_concat=[4, 5, 6], ) AmoebaNet = Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3', 1), ], normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ], reduce_concat=[3, 4, 6] ) DARTS_V1 = Genotype( normal=[ ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2) ], normal_concat=[2, 3, 4, 5], reduce=[ ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0) ], reduce_concat=[2, 3, 4, 5] ) DARTS_V2 = Genotype( normal=[ ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2) ], normal_concat=[2, 3, 4, 5], reduce=[ ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1) ], reduce_concat=[2, 3, 4, 5] ) DARTS_ADP_N2 = Genotype( normal=[ ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('dil_conv_5x5', 2), ('max_pool_3x3', 1) ], normal_concat=range(2, 4), reduce=[ ('sep_conv_5x5', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 2), ('dil_conv_5x5', 0) ], reduce_concat=range(2, 4) ) DARTS_ADP_N3 = Genotype( normal=[ ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('max_pool_3x3', 1), ('dil_conv_5x5', 3), ('max_pool_3x3', 1) ], normal_concat=range(2, 5), reduce=[ ('max_pool_3x3', 0), ('dil_conv_5x5', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 2), ('skip_connect', 1), ('max_pool_3x3', 0) ], reduce_concat=range(2, 5) ) DARTS_ADP_N4 = Genotype( normal=[ ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 2), ('dil_conv_3x3', 4), ('max_pool_3x3', 0) ], normal_concat=range(2, 6), reduce=[ ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('dil_conv_5x5', 2), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('max_pool_3x3', 0), ('dil_conv_3x3', 2), ('max_pool_3x3', 4) ], reduce_concat=range(2, 6) ) DARTS_PseudoInvBn = Genotype( normal=[ ('pib_conv_3x3', 0), ('pib_conv_5x5', 1), ('pib_conv_3x3', 0), ('pib_conv_3x3', 2), ('pib_conv_3x3', 2), ('pib_conv_3x3', 1), ('pib_conv_3x3', 1), ('pib_conv_3x3', 3) ], normal_concat=range(2, 6), reduce=[ ('avg_pool_3x3', 0), ('pib_conv_5x5', 1), ('avg_pool_3x3', 0), ('dil_conv_5x5', 2), ('avg_pool_3x3', 0), ('skip_connect', 2), ('pib_conv_3x3', 3), ('pib_conv_3x3', 0)], reduce_concat=range(2, 6) )
4,906
22.146226
78
py
PIBConv
PIBConv-main/cnn/operations.py
import torch import torch.nn as nn import torch.nn.functional as F import math OPS = { 'none': lambda C, stride, affine: Zero(stride), 'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False), 'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1), 'skip_connect': lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine), 'pib_conv_3x3': lambda C, stride, affine: PseudoInvBn(C, C, 3, stride, 1, affine=affine), 'pib_conv_5x5': lambda C, stride, affine: PseudoInvBn(C, C, 5, stride, 2, affine=affine), 'pib_conv_7x7': lambda C, stride, affine: PseudoInvBn(C, C, 7, stride, 3, affine=affine), 'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine), 'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine), 'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C, C, (1, 7), stride=(1, stride), padding=(0, 3), bias=False), nn.Conv2d(C, C, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False), nn.BatchNorm2d(C, affine=affine) ) } class LayerNorm(nn.Module): """ LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first","channels_first"]: raise NotImplementedError self.normalized_shape = (normalized_shape, ) def forward(self, x): if self.data_format == "channels_last": return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class ReLUConvBN(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): super(ReLUConvBN, self).__init__() self.op = nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False), nn.BatchNorm2d(C_out, affine=affine) ) def forward(self, x): return self.op(x) class DilConv(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): super(DilConv, self).__init__() self.op = nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), nn.BatchNorm2d(C_out, affine=affine), ) def forward(self, x): return self.op(x) class SepConv(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): super(SepConv, self).__init__() self.op = nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False), nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False), nn.BatchNorm2d(C_in, affine=affine), nn.ReLU(inplace=False), nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), nn.BatchNorm2d(C_out, affine=affine), ) def forward(self, x): return self.op(x) class LNormReduce (nn.Module): def __init__(self, C_in): super(LNormReduce, self).__init__() self.op = nn.Sequential( LayerNorm(C_in,eps=1e-5,data_format="channels_first"), nn.Conv2d(C_in, C_in, kernel_size=2,stride=2, groups=C_in, bias=False) ) def forward(self, x): return self.op(x) class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x class Zero(nn.Module): def __init__(self, stride): super(Zero, self).__init__() self.stride = stride def forward(self, x): if self.stride == 1: return x.mul(0.) return x[:, :, ::self.stride, ::self.stride].mul(0.) class FactorizedReduce(nn.Module): def __init__(self, C_in, C_out, affine=True): super(FactorizedReduce, self).__init__() assert C_out % 2 == 0 self.relu = nn.ReLU(inplace=False) self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) #self.ln = LayerNorm(C_in,eps=1e-5,data_format="channels_first") self.bn = nn.BatchNorm2d(C_out, affine=affine) def forward(self, x): x = self.relu(x) if x.shape[2] % 2 == 0: out = torch.cat( [self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1) else: x2 = F.pad(x[:, :, 1:, 1:], (0, 1, 0, 1), mode='constant', value=0) out = torch.cat([self.conv_1(x), self.conv_2(x2)], dim=1) out = self.bn(out) return out class PseudoInvBn(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): super(PseudoInvBn, self).__init__() self.op = nn.Sequential( nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False), nn.BatchNorm2d(C_in, affine=affine), nn.Conv2d(C_in, C_in*2, kernel_size=1, padding=0, bias=False), nn.Conv2d(C_in*2, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False), nn.GELU(), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), ) def forward(self, x): return self.op(x)
6,930
37.082418
115
py
PIBConv
PIBConv-main/cnn/adas/Adas.py
""" """ from torch.optim.optimizer import Optimizer, required import sys import numpy as np import torch mod_name = vars(sys.modules[__name__])['__name__'] if 'adas.' in mod_name: from .metrics import Metrics else: from .optim.metrics import Metrics class Adas(Optimizer): """ Vectorized SGD from torch.optim.SGD """ def __init__(self, params, lr: float = required, beta: float = 0.8, step_size: int = None, linear: bool = False, gamma: float = 1, momentum: float = 0, dampening: float = 0, weight_decay: float = 0, nesterov: bool = False): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( "Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError( "Nesterov momentum requires a momentum and zero dampening") super(Adas, self).__init__(params, defaults) # Adas Specific stuff (not SGD) if np.less(beta, 0) or np.greater_equal(beta, 1): raise ValueError(f'Invalid beta: {beta}') if np.less(gamma, 0): raise ValueError(f'Invalid gamma: {gamma}') if step_size is not None: if np.less_equal(step_size, 0): raise ValueError(f'Invalid step_size: {step_size}') self.step_size = step_size self.gamma = gamma self.beta = beta self.metrics = metrics = Metrics(params=params, linear=linear) self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params)) self.velocity = np.zeros( len(self.metrics.params) - len(self.metrics.mask)) self.not_ready = list(range(len(self.velocity))) self.init_lr = lr self.zeta = 1. self.KG = 0. def __setstate__(self, state): super(Adas, self).__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) def epoch_step(self, epoch: int) -> None: self.metrics() if epoch == 0: velocity = self.init_lr * np.ones(len(self.velocity)) self.KG = self.metrics.KG(epoch) else: KG = self.metrics.KG(epoch) velocity = KG - self.KG self.KG = KG for idx in self.not_ready: if np.isclose(KG[idx], 0.): velocity[idx] = self.init_lr - \ self.beta * self.velocity[idx] else: self.not_ready.remove(idx) if self.step_size is not None: if epoch % self.step_size == 0 and epoch > 0: # self.lr_vector *= self.gamma self.zeta *= self.gamma # Add here: # self.velocity *= self.gamma self.velocity = np.maximum( self.beta * self.velocity + self.zeta * velocity, 0.) count = 0 for i in range(len(self.metrics.params)): if i in self.metrics.mask: self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)] else: self.lr_vector[i] = self.velocity[count] count += 1 def step(self, closure: callable = None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() iteration_group = 0 for group in self.param_groups: iteration_group += 1 weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p_index, p in enumerate(group['params']): if p.grad is None: continue d_p = p.grad.data if weight_decay != 0: d_p.add_(p.data, alpha=weight_decay) if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone( d_p).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(momentum, buf) else: d_p = buf # p.data.add_(-group['lr'], d_p) p.data.add_(d_p, alpha=-self.lr_vector[p_index]) return loss
5,240
34.174497
78
py
PIBConv
PIBConv-main/cnn/adas/components.py
""" """ from typing import NamedTuple, List from dataclasses import dataclass from enum import Enum class LayerType(Enum): CONV = 1 DEPTH_CONV = 2 FC = 3 NON_CONV = 4 @dataclass class LayerMetrics: rank: float KG: float condition: float @dataclass class ConvLayerMetrics: input_channel: LayerMetrics output_channel: LayerMetrics class LRMetrics(NamedTuple): rank_velocity: List[float] r_conv: List[float]
457
13.774194
35
py
PIBConv
PIBConv-main/cnn/adas/metrics.py
""" """ from typing import List, Union, Tuple import sys import numpy as np import torch mod_name = vars(sys.modules[__name__])['__name__'] if 'adas.' in mod_name: from .components import LayerMetrics, ConvLayerMetrics from .matrix_factorization import EVBMF else: from optim.components import LayerMetrics, ConvLayerMetrics, LayerType from optim.matrix_factorization import EVBMF class Metrics: def __init__(self, params, linear: bool = False) -> None: ''' parameters: list of torch.nn.Module.parameters() ''' self.params = params self.history = list() mask = list() for param_idx, param in enumerate(params): param_shape = param.shape if not linear: if len(param_shape) != 4: mask.append(param_idx) else: if len(param_shape) != 4 and len(param_shape) != 2: mask.append(param_idx) self.mask = set(mask) def compute_low_rank(self, tensor: torch.Tensor, normalizer: float) -> torch.Tensor: if tensor.requires_grad: tensor = tensor.detach() try: tensor_size = tensor.shape if tensor_size[0] > tensor_size[1]: tensor = tensor.T U_approx, S_approx, V_approx = EVBMF(tensor) except RuntimeError: return None, None, None rank = S_approx.shape[0] / tensor_size[0] # normalizer low_rank_eigen = torch.diag(S_approx).data.cpu().numpy() if len(low_rank_eigen) != 0: condition = low_rank_eigen[0] / low_rank_eigen[-1] sum_low_rank_eigen = low_rank_eigen / \ max(low_rank_eigen) sum_low_rank_eigen = np.sum(sum_low_rank_eigen) else: condition = 0 sum_low_rank_eigen = 0 KG = sum_low_rank_eigen / tensor_size[0] # normalizer return rank, KG, condition def KG(self, epoch: int) -> np.ndarray: KG_list = list() for i, (index, metric) in enumerate(self.history[epoch]): if isinstance(metric, ConvLayerMetrics): KG_list.append((metric.input_channel.KG + metric.output_channel.KG) / 2) elif isinstance(metric, LayerMetrics): KG_list.append(metric.KG) return np.array(KG_list) def __call__(self) -> List[Tuple[int, Union[LayerMetrics, ConvLayerMetrics]]]: ''' Computes the knowledge gain (S) and mapping condition (condition) ''' metrics: List[Tuple[int, Union[LayerMetrics, ConvLayerMetrics]]] = list() # for separable convolution separable_conv = False for layer_index, layer in enumerate(self.params): if layer_index in self.mask: metrics.append((layer_index, None)) continue # if np.less(np.prod(layer.shape), 10_000): # metrics.append((layer_index, None)) if len(layer.shape) == 4: if layer.shape[1] == 1: # depth-wise conv, don't compute metrics # use the following point-wise conv's metrics instead metrics.append((layer_index, None)) separable_conv = True else: # other conv types layer_tensor = layer.data tensor_size = layer_tensor.shape mode_3_unfold = layer_tensor.permute(1, 0, 2, 3) mode_3_unfold = torch.reshape( mode_3_unfold, [tensor_size[1], tensor_size[0] * tensor_size[2] * tensor_size[3]]) mode_4_unfold = layer_tensor mode_4_unfold = torch.reshape( mode_4_unfold, [tensor_size[0], tensor_size[1] * tensor_size[2] * tensor_size[3]]) in_rank, in_KG, in_condition = self.compute_low_rank( mode_3_unfold, tensor_size[1]) if in_rank is None and in_KG is None and in_condition is None: if len(self.history) > 0: in_rank = self.history[-1][ layer_index][1].input_channel.rank in_KG = self.history[-1][ layer_index][1].input_channel.KG in_condition = self.history[-1][ layer_index][1].input_channel.condition else: in_rank = in_KG = in_condition = 0. out_rank, out_KG, out_condition = self.compute_low_rank( mode_4_unfold, tensor_size[0]) if out_rank is None and out_KG is None and out_condition is None: if len(self.history) > 0: out_rank = self.history[-1][ layer_index][1].output_channel.rank out_KG = self.history[-1][ layer_index][1].output_channel.KG out_condition = self.history[-1][ layer_index][1].output_channel.condition else: out_rank = out_KG = out_condition = 0. metrics.append((layer_index, ConvLayerMetrics( input_channel=LayerMetrics( rank=in_rank, KG=in_KG, condition=in_condition), output_channel=LayerMetrics( rank=out_rank, KG=out_KG, condition=out_condition)))) if separable_conv: # copy current metrics to preceding depth-wise conv metrics[layer_index-1] = (layer_index-1, metrics[-1][1]) separable_conv = False elif len(layer.shape) == 2: rank, KG, condition = self.compute_low_rank( layer, layer.shape[0]) if rank is None and KG is None and condition is None: if len(self.history) > 0: rank = self.history[-1][layer_index][1].rank KG = self.history[-1][layer_index][1].KG condition = self.history[-1][layer_index][1].condition else: rank = KG = condition = 0. metrics.append((layer_index, LayerMetrics( rank=rank, KG=KG, condition=condition))) else: metrics.append((layer_index, None)) self.history.append(metrics) return metrics
7,146
43.391304
85
py
PIBConv
PIBConv-main/cnn/adas/__init__.py
""" """ from .Adas import Adas
31
7
22
py
PIBConv
PIBConv-main/cnn/adas/matrix_factorization.py
from __future__ import division import numpy as np # from scipy.sparse.linalg import svds from scipy.optimize import minimize_scalar import torch def EVBMF(Y, sigma2=None, H=None): """Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization. This function can be used to calculate the analytical solution to empirical VBMF. This is based on the paper and MatLab code by Nakajima et al.: "Global analytic solution of fully-observed variational Bayesian matrix factorization." Notes ----- If sigma2 is unspecified, it is estimated by minimizing the free energy. If H is unspecified, it is set to the smallest of the sides of the input Y. Attributes ---------- Y : numpy-array Input matrix that is to be factorized. Y has shape (L,M), where L<=M. sigma2 : int or None (default=None) Variance of the noise on Y. H : int or None (default = None) Maximum rank of the factorized matrices. Returns ------- U : numpy-array Left-singular vectors. S : numpy-array Diagonal matrix of singular values. V : numpy-array Right-singular vectors. post : dictionary Dictionary containing the computed posterior values. References ---------- .. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37. .. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012. """ L, M = Y.shape # has to be L<=M if H is None: H = L alpha = L/M tauubar = 2.5129*np.sqrt(alpha) # SVD of the input matrix, max rank of H # U, s, V = np.linalg.svd(Y) U, s, V = torch.svd(Y) U = U[:, :H] s = s[:H] V = V[:H].T # Calculate residual residual = 0. if H < L: # residual = np.sum(np.sum(Y**2)-np.sum(s**2)) residual = torch.sum(np.sum(Y**2)-np.sum(s**2)) # Estimation of the variance when sigma2 is unspecified if sigma2 is None: xubar = (1+tauubar)*(1+alpha/tauubar) eH_ub = int(np.min([np.ceil(L/(1+alpha))-1, H]))-1 # upper_bound = (np.sum(s**2)+residual)/(L*M) # lower_bound = np.max( # [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M]) upper_bound = (torch.sum(s**2)+residual)/(L*M) lower_bound = torch.max(torch.stack( [s[eH_ub+1]**2/(M*xubar), torch.mean(s[eH_ub+1:]**2)/M], dim=0)) scale = 1. # /lower_bound s = s*np.sqrt(scale) residual = residual*scale lower_bound = lower_bound*scale upper_bound = upper_bound*scale sigma2_opt = minimize_scalar( EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar), bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()], method='Bounded') sigma2 = sigma2_opt.x # Threshold gamma term threshold = np.sqrt(M*sigma2*(1+tauubar)*(1+alpha/tauubar)) # pos = np.sum(s > threshold) pos = torch.sum(s > threshold) # Formula (15) from [2] # d = torch.multiply(s[:pos]/2, # 1-torch.divide( # torch.tensor((L+M)*sigma2, device=s.device), # s[:pos]**2) + torch.sqrt((1-torch.divide( # torch.tensor( # (L+M)*sigma2, device=s.device), # s[:pos]**2))**2 - # 4*L*M*sigma2**2/s[:pos]**4)) # d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt( # (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4)) d = (s[:pos]/2)*(1-(L+M)*sigma2/s[:pos]**2 + torch.sqrt((1 - (L+M)*sigma2/s[:pos]**2)**2 - 4*L*M*sigma2**2/s[:pos]**4)) # Computation of the posterior # post = {} # post['ma'] = np.zeros(H) # post['mb'] = np.zeros(H) # post['sa2'] = np.zeros(H) # post['sb2'] = np.zeros(H) # post['cacb'] = np.zeros(H) # tau = np.multiply(d, s[:pos])/(M*sigma2) # delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau) # post['ma'][:pos] = np.sqrt(np.multiply(d, delta)) # post['mb'][:pos] = np.sqrt(np.divide(d, delta)) # post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos]) # post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos])) # post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M)) # post['sigma2'] = sigma2 # post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) + # (residual+np.sum(s**2))/sigma2 + np.sum( # M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau)) return U[:, :pos], torch.diag(d), V[:, :pos] # , post def EVBsigma2(sigma2, L, M, s, residual, xubar): H = len(s) alpha = L/M x = s**2/(M*sigma2) z1 = x[x > xubar] z2 = x[x <= xubar] tau_z1 = tau(z1, alpha) term1 = np.sum(z2 - np.log(z2)) term2 = np.sum(z1 - tau_z1) term3 = np.sum(np.log(np.divide(tau_z1+1, z1))) term4 = alpha*np.sum(np.log(tau_z1/alpha+1)) obj = term1+term2+term3+term4 + residual/(M*sigma2) + (L-H)*np.log(sigma2) return obj def phi0(x): return x-np.log(x) def phi1(x, alpha): return np.log(tau(x, alpha)+1) + alpha*np.log(tau(x, alpha)/alpha + 1 ) - tau(x, alpha) def tau(x, alpha): return 0.5 * (x-(1+alpha) + np.sqrt((x-(1+alpha))**2 - 4*alpha))
5,693
30.458564
91
py
PIBConv
PIBConv-main/cnn/ADP_utils/classesADP.py
# Classes from https://github.com/mahdihosseini/ADP/blob/5b2508e8c4c513f8a556d57fd312a1222e2dfe77/src/htt_def.py # Using p classes # TODO add non-p classes from ADP-Release-Flat classesADP = { "L1": { "numClasses": 9, "classesNames": ["E", "C", "H", "S", "A", "M", "N", "G", "T"] }, "L2": { "numClasses": 26, "classesNames": ["E", "E.M", "E.T", "C", "C.D", "C.L", "H", "H.E", "H.K", "H.Y", "S", "S.M", "S.C", "S.R", "A", "A.W", "A.M", "M", "N", "N.P", "N.R", "N.G", "G", "G.O", "G.N", "T"] }, "L3": { "numClasses": 33, "classesNames": ["E", "E.M", "E.M.S", "E.M.C", "E.T", "E.T.S", "E.T.C", "C", "C.D", "C.D.I", "C.D.R", "C.L", "H", "H.E", "H.K", "H.Y", "S", "S.M", "S.C", "S.R", "A", "A.W", "A.M", "M", "N", "N.P", "N.R", "N.G", "N.G.M", "G", "G.O", "G.N", "T"] } }
1,588
31.428571
112
py
PIBConv
PIBConv-main/cnn/ADP_utils/__init__.py
0
0
0
py
PIBConv
PIBConv-main/cnn/ADP_utils/thresholded_metrics.py
import os import torch import numpy as np import pandas as pd import matplotlib.pyplot as plt from .classesADP import classesADP from sklearn.metrics import roc_curve from sklearn.metrics import auc class Thresholded_Metrics: def __init__(self, targets, predictions, level, network, epoch): self.target = targets.numpy() self.predictions = predictions.numpy() #class names self.class_names = classesADP[level]['classesNames'] #path cur_path = os.path.abspath(os.path.curdir) self.eval_dir = os.path.join(cur_path, 'eval') if not os.path.exists(self.eval_dir): os.makedirs(self.eval_dir) #sess_id self.sess_id = 'adp_' + str(network) + '_' + str(level) + '_Epoch_' + str(epoch + 1) + '_Release1_1um_bicubic' #Get optimal class thresholds self.class_thresholds, self.class_fprs, self.class_tprs, self.auc_measures = self.get_optimal_thresholds() #Get thresholded class accuracies self.metric_tprs, self.metric_fprs, self.metric_tnrs, self.metric_fnrs, self.metric_accs, self.metric_f1s = self.get_thresholded_metrics() #self.auc_measures_U = [ self.auc_measures[i] for i in self.unaugmented_class_inds] #self.auc_measures_U.append(self.auc_measures[-1]) #Plot ROC curves self.plot_rocs() #Write metrics to excel self.write_to_excel() def get_optimal_thresholds(self): def get_opt_thresh(tprs, fprs, thresholds): return thresholds[np.argmin(abs(tprs - (1 - fprs)))] class_fprs = [] class_tprs = [] class_thresholds = [] auc_measures = [] thresh_rng = [1/3,1] for iter_class in range(self.predictions.shape[1]): fprs, tprs, thresholds = \ roc_curve(self.target[:, iter_class], self.predictions[:, iter_class]) auc_measure = auc(fprs, tprs) opt_thresh = min(max(get_opt_thresh(tprs, fprs, thresholds), thresh_rng[0]), thresh_rng[1]) class_thresholds.append(opt_thresh) class_fprs.append(fprs) class_tprs.append(tprs) auc_measures.append(auc_measure) auc_measures.append(sum(np.sum(self.target, 0) * auc_measures)/np.sum(self.target)) return class_thresholds, class_fprs, class_tprs, auc_measures def get_thresholded_metrics(self): predictions_thresholded = self.predictions >= self.class_thresholds with np.errstate(divide = 'ignore', invalid = 'ignore'): #Obtain Metrics cond_positive = np.sum(self.target == 1, 0) cond_negative = np.sum(self.target == 0, 0) true_positive = np.sum((self.target == 1) & (predictions_thresholded == 1), 0) false_positive = np.sum((self.target == 0) & (predictions_thresholded == 1), 0) true_negative = np.sum((self.target == 0) & (predictions_thresholded == 0), 0) false_negative = np.sum((self.target == 1) & (predictions_thresholded == 0), 0) class_tprs = true_positive / cond_positive class_fprs = false_positive / cond_negative class_tnrs = true_negative / cond_negative class_fnrs = false_negative / cond_positive class_accs = np.sum(self.target == predictions_thresholded, 0) / predictions_thresholded.shape[0] class_f1s = (2 * true_positive) / (2 * true_positive + false_positive + false_negative) # cond_positive_T = np.sum(self.target == 1) cond_negative_T = np.sum(self.target == 0) true_positive_T = np.sum((self.target == 1) & (predictions_thresholded == 1)) false_positive_T = np.sum((self.target == 0) & (predictions_thresholded == 1)) true_negative_T = np.sum((self.target == 0) & (predictions_thresholded == 0)) false_negative_T = np.sum((self.target == 1) & (predictions_thresholded == 0)) tpr_T = true_positive_T / cond_positive_T fpr_T = false_positive_T / cond_negative_T tnr_T = true_negative_T / cond_negative_T fnr_T = false_negative_T / cond_positive_T acc_T = np.sum(self.target == predictions_thresholded) / np.prod(predictions_thresholded.shape) f1_T = (2 * true_positive_T) / (2 * true_positive_T + false_positive_T + false_negative_T) # class_tprs = np.append(class_tprs, tpr_T) class_fprs = np.append(class_fprs, fpr_T) class_tnrs = np.append(class_tnrs, tnr_T) class_fnrs = np.append(class_fnrs, fnr_T) class_accs = np.append(class_accs, acc_T) class_f1s = np.append(class_f1s, f1_T) return class_tprs, class_fprs, class_tnrs, class_fnrs, class_accs, class_f1s def plot_rocs(self): plt.figure(1) plt.plot([0, 1], [0, 1], 'k--') for iter_class in range(len(self.class_names)): plt.plot(self.class_fprs[iter_class], self.class_tprs[iter_class], label=self.class_names[iter_class]) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.legend(loc='best') # plt.show() plt.savefig(os.path.join(self.eval_dir, 'ROC_' + self.sess_id + '.png'), bbox_inches='tight') plt.close() def write_to_excel(self): sess_xlsx_path = os.path.join(self.eval_dir, 'metrics_' + self.sess_id + '.xlsx') df = pd.DataFrame({'HTT': self.class_names + ['Average'], 'TPR': list(self.metric_tprs), 'FPR': list(self.metric_fprs), 'TNR': list(self.metric_tnrs), 'FNR': list(self.metric_fnrs), 'ACC': list(self.metric_accs), 'F1': list(self.metric_f1s), 'AUC': self.auc_measures}, columns=['HTT', 'TPR', 'FPR', 'TNR', 'FNR', 'ACC', 'F1', 'AUC']) df.to_excel(sess_xlsx_path)
6,205
46.738462
146
py
clx-branch-23.04
clx-branch-23.04/examples/run_dga_training.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example Usage: python run_dga_training.py \ --training-data benign_and_dga_domains.csv \ --output-dir trained_models \ --batch-size 10000 \ --epochs 2 """ import os import cudf import torch import argparse from datetime import datetime from clx.analytics.dga_detector import DGADetector LR = 0.001 N_LAYERS = 4 CHAR_VOCAB = 128 HIDDEN_SIZE = 100 N_DOMAIN_TYPE = 2 def main(): epochs = int(args["epochs"]) input_filepath = args["training_data"] batch_size = int(args["batch_size"]) output_dir = args["output_dir"] # load input data to gpu memory input_df = cudf.read_csv(input_filepath) train_data = input_df['domain'] labels = input_df['type'] del input_df dd = DGADetector(lr=LR) dd.init_model( n_layers=N_LAYERS, char_vocab=CHAR_VOCAB, hidden_size=HIDDEN_SIZE, n_domain_type=N_DOMAIN_TYPE, ) dd.train_model(train_data, labels, batch_size=batch_size, epochs=epochs, train_size=0.7) if not os.path.exists(output_dir): print("Creating directory '{}'".format(output_dir)) os.makedirs(output_dir) now = datetime.now() model_filename = "rnn_classifier_{}.bin".format(now.strftime("%Y-%m-%d_%H_%M_%S")) model_filepath = os.path.join(output_dir, model_filename) print("Saving trained model to location '{}'".format(model_filepath)) dd.save_model(model_filepath) def parse_cmd_args(): # construct the argument parse and parse the arguments ap = argparse.ArgumentParser(description="DGA detection model training script") ap.add_argument( "--training-data", required=True, help="CSV with domain and type fields" ) ap.add_argument( "--output-dir", required=True, help="output directory to save new model files" ) ap.add_argument( "--batch-size", required=True, help="Dividing dataset into number of batches or sets or parts", ) ap.add_argument( "--epochs", required=True, help="One epoch is when an entire dataset is passed forward and backward through the neural network only once", ) args = vars(ap.parse_args()) return args # execution starts here if __name__ == "__main__": args = parse_cmd_args() main()
2,926
31.522222
119
py
clx-branch-23.04
clx-branch-23.04/examples/streamz/python/dga_detection.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import dask from clx_streamz_tools import utils from clx_streamz_tools import streamz_workflow class DGADetectionWorkflow(streamz_workflow.StreamzWorkflow): def inference(self, messages_df): # Messages will be received and run through DGA inferencing worker = dask.distributed.get_worker() batch_start_time = int(round(time.time())) result_size = messages_df.shape[0] print("Processing batch size: " + str(result_size)) dd = worker.data["dga_detector"] preds = dd.predict(messages_df["domain"]) messages_df["preds"] = preds return (messages_df, batch_start_time, result_size) def worker_init(self): # Initialization for each dask worker from clx.analytics.dga_detector import DGADetector worker = dask.distributed.get_worker() dd = DGADetector() print( "Initializing Dask worker: " + str(worker) + " with dga model. Model File: " + str(self.args.model) ) dd.load_model(self.args.model) # this dict can be used for adding more objects to distributed dask worker obj_dict = {"dga_detector": dd} worker = utils.init_dask_workers(worker, self.config, obj_dict) if __name__ == "__main__": dga_detection = DGADetectionWorkflow() dga_detection.start()
1,968
35.462963
82
py
clx-branch-23.04
clx-branch-23.04/examples/streamz/python/phishing_detection.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import dask import cudf from clx_streamz_tools import utils from clx_streamz_tools import streamz_workflow class PhisingDetectionWorkflow(streamz_workflow.StreamzWorkflow): def inference(messages): # Messages will be received and run through sequence classifier inferencing worker = dask.distributed.get_worker() batch_start_time = int(round(time.time())) df = cudf.DataFrame() if type(messages) == str: df["stream"] = [messages.decode("utf-8")] elif type(messages) == list and len(messages) > 0: df["stream"] = [msg.decode("utf-8") for msg in messages] else: print("ERROR: Unknown type encountered in inference") result_size = df.shape[0] print("Processing batch size: " + str(result_size)) pred, prob = worker.data["seq_classifier"].predict(df["stream"]) results_gdf = cudf.DataFrame({"pred": pred, "prob": prob}) return (results_gdf, batch_start_time, result_size) def worker_init(): # Initialization for each dask worker from clx.analytics.sequence_classifier import SequenceClassifier worker = dask.distributed.get_worker() seq_classifier = SequenceClassifier() print( "Initializing Dask worker: " + str(worker) + " with sequence classifier model. Model File: " + str(self.args.model) ) seq_classifier.init_model(self.args.model) # this dict can be used for adding more objects to distributed dask worker obj_dict = {"seq_classifier": seq_classifier} worker = utils.init_dask_workers(worker, self.config, obj_dict) if __name__ == "__main__": phishing_detection = PhisingDetectionWorkflow() phishing_detection.start()
2,406
37.822581
83
py
clx-branch-23.04
clx-branch-23.04/examples/streamz/python/setup.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup setup( name="clx_streamz_tools", version="0.1", author="NVIDIA Corporation", packages=["clx_streamz_tools"], include_package_data=True, )
777
31.416667
74
py
clx-branch-23.04
clx-branch-23.04/examples/streamz/python/cybert.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import dask import cudf import pandas as pd from clx_streamz_tools import utils from clx_streamz_tools import streamz_workflow class CybertWorkflow(streamz_workflow.StreamzWorkflow): def inference(self, messages): # Messages will be received and run through cyBERT inferencing worker = dask.distributed.get_worker() batch_start_time = int(round(time.time())) df = cudf.DataFrame() if type(messages) == str: df["stream"] = [messages.decode("utf-8")] elif type(messages) == list and len(messages) > 0: df["stream"] = [msg.decode("utf-8") for msg in messages] else: print("ERROR: Unknown type encountered in inference") result_size = df.shape[0] print("Processing batch size: " + str(result_size)) parsed_df, confidence_df = worker.data["cybert"].inference(df["stream"]) confidence_df = confidence_df.add_suffix("_confidence") parsed_df = pd.concat([parsed_df, confidence_df], axis=1) return (parsed_df, batch_start_time, result_size) def worker_init(self): # Initialization for each dask worker from clx.analytics.cybert import Cybert worker = dask.distributed.get_worker() cy = Cybert() print( "Initializing Dask worker: " + str(worker) + " with cybert model. Model File: " + str(self.args.model) + " Label Map: " + str(self.args.label_map) ) cy.load_model(self.args.model, self.args.label_map) # this dict can be used for adding more objects to distributed dask worker obj_dict = {"cybert": cy} worker = utils.init_dask_workers(worker, self.config, obj_dict) if __name__ == "__main__": cybert = CybertWorkflow() cybert.start()
2,443
36.030303
82
py
clx-branch-23.04
clx-branch-23.04/examples/streamz/python/clx_streamz_tools/utils.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import time import yaml import dask import argparse from datetime import datetime from collections import deque from distributed import Client from elasticsearch import helpers from dask_cuda import LocalCUDACluster SINK_KAFKA = "kafka" SINK_FS = "filesystem" SINK_ES = "elasticsearch" TIME_FORMAT = "%Y-%m-%d_%H-%M-%S-%f" def create_dask_client(): """ Creates dask client :return: LocalCUDACluster: Dask client :rtype: object """ print("Creating local cuda cluster as no dask scheduler is provided.") cluster = LocalCUDACluster() client = Client(cluster) print(client) return client def kafka_sink(output_topic, parsed_df): """ Writes cudf to Kafka topic. :param output_topic: Kafka topic name :type output_topic: str :param parsed_df: Parsed/Processed data :type parsed_df: cudf """ worker = dask.distributed.get_worker() producer = worker.data["sink"] json_str = parsed_df.to_json(orient="records", lines=True) json_recs = json_str.split("\n") for json_rec in json_recs: try: producer.poll(0) producer.produce(output_topic, json_rec) except BufferError as be: producer.poll(0.1) print(be) producer.flush() def fs_sink(config, parsed_df): """ Writes cudf to Filesystem. :param config: Configuration which contains file format details :type output_topic: dict :param parsed_df: Parsed/Processed data :type parsed_df: cudf """ filename = datetime.now().strftime(TIME_FORMAT) + config["file_extension"] filepath = os.path.join(config["output_dir"], filename) parsed_df.to_csv(filepath, sep=config["col_delimiter"], index=False) def es_sink(config, parsed_df): """ Writes cudf to Elasticsearch. :param config: Configuration which contains Elasticsearch cluster details :type config: dict :param parsed_df: Parsed/Processed data :type parsed_df: cudf """ worker = dask.distributed.get_worker() es_client = worker.data["sink"] parsed_df["_index"] = config["index"] json_str = parsed_df.to_json(orient="records") docs = json.loads(json_str) pb = helpers.parallel_bulk( es_client, docs, chunk_size=10000, thread_count=10, queue_size=10 ) deque(pb, maxlen=0) def calc_benchmark(processed_data, size_per_log): """ Calculates benchmark for the streamz workflow :param processed_data: cudf dataframe :type processed_data: cudf :param size_per_log: :type size_per_log: double :return: (time_diff, throughput_mbps, avg_batch_size): Benchmark output :rtype: (double, double, double) """ t1 = int(round(time.time() * 1000)) t2 = 0 size = 0.0 batch_count = 0 # Find min and max time while keeping track of batch count and size for result in processed_data: (ts1, ts2, result_size) = (result[1], result[2], result[3]) if ts1 == 0 or ts2 == 0: continue batch_count = batch_count + 1 t1 = min(t1, ts1) t2 = max(t2, ts2) size += result_size * size_per_log time_diff = t2 - t1 throughput_mbps = size / (1024.0 * time_diff) if time_diff > 0 else 0 avg_batch_size = size / (1024.0 * batch_count) if batch_count > 0 else 0 return (time_diff, throughput_mbps, avg_batch_size) def load_yaml(yaml_file): """ Returns a dictionary of a configuration contained in the given yaml file :param yaml_file: YAML configuration filepath :type yaml_file: str :return: config_dict: Configuration dictionary :rtype: dict """ with open(yaml_file) as yaml_file: config_dict = yaml.safe_load(yaml_file) config_dict["sink"] = config_dict["sink"].lower() return config_dict def init_dask_workers(worker, config, obj_dict=None): """ Initalize for all dask workers :param worker: Dask worker :type worker: object :param config: Configuration which contains source and sink details :type config: dict :param obj_dict: Objects that are required to be present on every dask worker :type obj_dict: dict :return: worker: Dask worker :rtype: object """ if obj_dict is not None: for key in obj_dict.keys(): worker.data[key] = obj_dict[key] sink = config["sink"] if sink == SINK_KAFKA: import confluent_kafka as ck producer_conf = config["kafka_conf"]["producer_conf"] print("Producer conf: " + str(producer_conf)) producer = ck.Producer(producer_conf) worker.data["sink"] = producer elif sink == SINK_ES: from elasticsearch import Elasticsearch es_conf = config["elasticsearch_conf"] if "username" in es_conf and "password" in es_conf: es_client = Elasticsearch( [ es_conf["url"].format( es_conf["username"], es_conf["password"], es_conf["port"] ) ], use_ssl=True, verify_certs=True, ca_certs=es_conf["ca_file"], ) else: es_client = Elasticsearch( [{"host": config["elasticsearch_conf"]["url"]}], port=config["elasticsearch_conf"]["port"], ) worker.data["sink"] = es_client elif sink == SINK_FS: print( "Streaming process will write the output to location '{}'".format( config["output_dir"] ) ) else: print( "No valid sink provided in the configuration file. Please provide kafka/elasticsearch/filsesystem" ) sys.exit(-1) print("Successfully initialized dask worker " + str(worker)) return worker def create_dir(sink, dir_path): """ :param sink: Sink type mentioned in the configuration file :type sink: str :param dir_path: Directory that needs to be created :type dir_path: str """ if sink == SINK_FS and not os.path.exists(dir_path): print("Creating directory '{}'".format(dir_path)) os.makedirs(dir_path) def parse_arguments(): """ Parse script arguments """ parser = argparse.ArgumentParser( description="Streamz and Dask. \ Data will be read from the input kafka topic, \ processed using clx streamz workflows." ) required_args = parser.add_argument_group("required arguments") required_args.add_argument( "-c", "--conf", help="Source and Sink configuration filepath" ) parser.add_argument("-m", "--model", help="Model filepath") parser.add_argument("-l", "--label_map", help="Label map filepath") parser.add_argument( "--max_batch_size", default=1000, type=int, help="Max batch size to read from kafka", ) required_args.add_argument( "--poll_interval", type=str, help="Polling interval (ex: 60s)" ) parser.add_argument( "--benchmark", help="Captures benchmark, including throughput estimates, with provided avg log size in KB. (ex: 500 or 0.1)", type=float, ) args = parser.parse_args() return args
7,863
30.709677
118
py
clx-branch-23.04
clx-branch-23.04/examples/streamz/python/clx_streamz_tools/__init__.py
0
0
0
py
clx-branch-23.04
clx-branch-23.04/examples/streamz/python/clx_streamz_tools/streamz_workflow.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import time import dask import signal from clx_streamz_tools import utils from streamz import Stream from tornado import ioloop from abc import ABC, abstractmethod class StreamzWorkflow(ABC): def __init__(self): self.args = utils.parse_arguments() self.config = utils.load_yaml(self.args.conf) self.kafka_conf = self.config["kafka_conf"] self.sink_dict = { "kafka": self.sink_to_kafka, "elasticsearch": self.sink_to_es, "filesystem": self.sink_to_fs, } def sink_to_kafka(self, processed_data): """ Writes the result to kafka topic. :param processed_data: Parsed/Processed data. :type processed_data: cudf :return: processed_data :rtype: cudf """ utils.kafka_sink(self.kafka_conf["output_topic"], processed_data[0]) return processed_data def sink_to_es(self, processed_data): """ Writes the result to Elasticsearch. :param processed_data: Parsed/Processed data. :type processed_data: cudf :return: processed_data :rtype: cudf """ utils.es_sink(self.config["elasticsearch_conf"], processed_data[0]) return processed_data def sink_to_fs(self, processed_data): """ Writes the result to Filesystem. :param processed_data: Parsed/Processed data. :type processed_data: cudf :return: processed_data :rtype: cudf """ utils.fs_sink(self.config, processed_data[0]) return processed_data def signal_term_handler(self, signal, frame): """ Receives signal and calculates benchmark if indicated in argument. """ print("Exiting streamz script...") if self.args.benchmark: (time_diff, throughput_mbps, avg_batch_size) = utils.calc_benchmark( output, self.args.benchmark ) print("*** BENCHMARK ***") print( "Job duration: {:.3f} secs, Throughput(mb/sec):{:.3f}, Avg. Batch size(mb):{:.3f}".format( time_diff, throughput_mbps, avg_batch_size ) ) sys.exit(0) def _start_stream(self): # Define the streaming pipeline. if self.config["cudf_engine"]: source = Stream.from_kafka_batched( self.kafka_conf["input_topic"], self.kafka_conf["consumer_conf"], poll_interval=self.args.poll_interval, # npartitions value varies based on kafka topic partitions configuration. npartitions=self.kafka_conf["n_partitions"], asynchronous=True, dask=True, engine="cudf", max_batch_size=self.args.max_batch_size, ) else: source = Stream.from_kafka_batched( self.kafka_conf["input_topic"], self.kafka_conf["consumer_conf"], poll_interval=self.args.poll_interval, # npartitions value varies based on kafka topic partitions configuration. npartitions=self.kafka_conf["n_partitions"], asynchronous=True, dask=True, max_batch_size=self.args.max_batch_size, ) sink = self.config["sink"] global output # If benchmark arg is True, use streamz to compute benchmark if self.args.benchmark: print("Benchmark will be calculated") output = ( source.map(self.inference) .map(lambda x: (x[0], x[1], int(round(time.time())), x[2])) .map(self.sink_dict[sink]) .gather() .sink_to_list() ) else: output = source.map(self.inference).map(self.sink_dict[sink]).gather() source.start() def start(self): """ Configure the workflow settings and starts streaming messages """ # create output directory if not exists when sink is set to file system utils.create_dir(self.config["sink"], self.config["output_dir"]) # Handle script exit signal.signal(signal.SIGTERM, self.signal_term_handler) signal.signal(signal.SIGINT, self.signal_term_handler) client = utils.create_dask_client() client.run(self.worker_init) print("Consumer conf: " + str(self.kafka_conf["consumer_conf"])) loop = ioloop.IOLoop.current() loop.add_callback(self._start_stream) try: loop.start() except KeyboardInterrupt: worker = dask.distributed.get_worker() sink = worker.data["sink"] if self.config["sink"] == utils.SINK_KAFKA: sink.close() elif self.config["sink"] == utils.SINK_ES: sink.transport.close() else: pass loop.stop() @abstractmethod def inference(self, message_df): pass @abstractmethod def worker_init(self): pass
5,750
33.232143
106
py
clx-branch-23.04
clx-branch-23.04/python/setup.py
from setuptools import setup, find_packages import versioneer setup( name="clx", version=versioneer.get_version(), description="CLX", author="NVIDIA Corporation", packages=find_packages(include=["clx", "clx.*"]), package_data={ "clx.analytics": ["resources/*.txt"], "clx.parsers": ["resources/*.yaml"], "clx.dns": ["resources/*.txt"], "clx.heuristics": ["resources/*.csv"] }, license="Apache", cmdclass=versioneer.get_cmdclass() )
502
22.952381
53
py
clx-branch-23.04
clx-branch-23.04/python/versioneer.py
# Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)
68,611
36.636862
79
py
clx-branch-23.04
clx-branch-23.04/python/clx/ip.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import numpy as np def ip_to_int(values): """ Convert string column of IP addresses to integer values. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- values : cudf.Series, IPv4 address IP addresses to be converted Returns ------- rtype : cudf.Series, integer Integer representations of IP addresses Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.ip_to_int(cudf.Series(["192.168.0.1","10.0.0.1"])) 0 89088434 1 1585596973 dtype: int64 """ return cudf.Series(values.str.ip2int()) def int_to_ip(values): """ Convert integer column to IP addresses. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- values : cudf.Series, integer Integer representations of IP addresses Returns ------- rtype : cudf.Series, IPv4 address IP addresses to be converted Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.int_to_ip(cudf.Series([3232235521, 167772161])) 0 5.79.97.178 1 94.130.74.45 dtype: object """ return cudf.Series(values._column.int2ip()) def is_ip(ips): """ Indicates whether each address is an ip string. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_ip(cudf.Series(["192.168.0.1","10.123.0"])) 0 True 1 False dtype: bool """ is_ip_REGEX = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" return ips.str.match(is_ip_REGEX) def is_reserved(ips): """ Indicates whether each address is reserved. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_reserved(cudf.Series(["127.0.0.1","10.0.0.1"])) 0 False 1 False dtype: bool """ reserved_ipv4_REGEX = r"^(2(4[0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$" return ips.str.match(reserved_ipv4_REGEX) def is_loopback(ips): """ Indicates whether each address is loopback. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_loopback(cudf.Series(["127.0.0.1","10.0.0.1"])) 0 True 1 False dtype: bool """ loopback_ipv4_REGEX = r"^127\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$" return ips.str.match(loopback_ipv4_REGEX) def is_link_local(ips): """ Indicates whether each address is link local. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_link_local(cudf.Series(["127.0.0.1","169.254.123.123"])) 0 False 1 True dtype: bool """ link_local_ipv4_REGEX = r"^169\.254\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$" return ips.str.match(link_local_ipv4_REGEX) def is_unspecified(ips): """ Indicates whether each address is unspecified. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_unspecified(cudf.Series(["127.0.0.1","10.0.0.1"])) 0 False 1 False dtype: bool """ unspecified_REGEX = r"^0\.0\.0\.0$" return ips.str.match(unspecified_REGEX) def is_multicast(ips): """ Indicates whether each address is multicast. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_multicast(cudf.Series(["127.0.0.1","224.0.0.0"])) 0 False 1 True dtype: bool """ is_multicast_ipv4_REGEX = r"^(2(2[4-9]|3[0-9]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$" return ips.str.match(is_multicast_ipv4_REGEX) def is_private(ips): """ Indicates whether each address is private. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_private(cudf.Series(["127.0.0.1","207.46.13.151"])) 0 True 1 False dtype: bool """ private_REGEX = r"((^0\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^10\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^127\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^169\.254\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^172\.(1[6-9]|2[0-9]|3[0-1])\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^192\.0\.0\.([0-7])$)|(^192\.0\.0\.(1(7[0-1]))$)|(^192\.0\.2\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^192\.168\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^198\.(1[8-9])\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^198\.51\.100\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^203\.0\.113\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^(2(4[0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)|(^255\.255\.255\.255$))" return ips.str.match(private_REGEX) def is_global(ips): """ Indicates whether each address is global. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked Returns ------- rtype : cudf.Series, booleans Boolean values true or false Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.is_global(cudf.Series(["127.0.0.1","207.46.13.151"])) 0 False 1 True dtype: bool """ is_global_REGEX = r"^(100\.(6[4-9]|[7-9][0-9]|1([0-1][0-9]|2[0-7]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))\.([0-9]|[1-9][0-9]|1([0-9][0-9])|2([0-4][0-9]|5[0-5]))$)" part1 = ips.str.match(is_global_REGEX) part2 = is_private(ips) result = ~part1 & ~part2 return result def _netmask_kernel(idx, out1, out2, out3, out4, kwarg1): for i, ipnum in enumerate(idx): out1[i] = int(kwarg1 / 16777216) % 256 out2[i] = int(kwarg1 / 65536) % 256 out3[i] = int(kwarg1 / 256) % 256 out4[i] = int(kwarg1) % 256 def netmask(ips, prefixlen=16): """ Compute a column of netmasks for a column of IP addresses. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked prefixlen: integer Length of the network prefix, in bits, for IPv4 addresses Returns ------- rtype : cudf.Series, netmask Netmask ouput from set of IP address Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.netmask(cudf.Series(["192.168.0.1","10.0.0.1"]), prefixlen=16) 0 255.255.0.0 1 255.255.0.0 Name: net_mask, dtype: object """ _ALL_ONES = (2 ** 32) - 1 mask_int = _ALL_ONES ^ (_ALL_ONES >> prefixlen) df = cudf.DataFrame() df["idx"] = ips.index x = df.apply_rows( _netmask_kernel, incols=["idx"], outcols=dict(out1=np.int64, out2=np.int64, out3=np.int64, out4=np.int64), kwargs=dict(kwarg1=mask_int), ) out1 = x["out1"].astype(str) out2 = x["out2"].astype(str) out3 = x["out3"].astype(str) out4 = x["out4"].astype(str) df["net_mask"] = out1.str.cat(out2, sep=".").str.cat(out3, sep=".").str.cat(out4, sep=".") return df["net_mask"] def _hostmask_kernel(idx, out1, out2, out3, out4, kwarg1): for i, ipnum in enumerate(idx): out1[i] = int(kwarg1 / 16777216) % 256 out2[i] = int(kwarg1 / 65536) % 256 out3[i] = int(kwarg1 / 256) % 256 out4[i] = int(kwarg1) % 256 def hostmask(ips, prefixlen=16): """ Compute a column of hostmasks for a column of IP addresses. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked prefixlen: integer Length of the network prefix, in bits, for IPv4 addresses Returns ------- rtype : cudf.Series, hostmask Hostmask ouput from set of IP address Examples -------- >>> import clx.ip >>> import cudf >>> clx.ip.hostmask(cudf.Series(["192.168.0.1","10.0.0.1"], prefixlen=16) 0 0.0.255.255 1 0.0.255.255 Name: hostmask, dtype: object """ _ALL_ONES = (2 ** 32) - 1 host_mask_int = int(_ALL_ONES ^ (_ALL_ONES >> prefixlen)) ^ _ALL_ONES df = cudf.DataFrame() df["idx"] = ips.index x = df.apply_rows( _hostmask_kernel, incols=["idx"], outcols=dict(out1=np.int64, out2=np.int64, out3=np.int64, out4=np.int64), kwargs=dict(kwarg1=host_mask_int), ) out1 = x["out1"].astype(str) out2 = x["out2"].astype(str) out3 = x["out3"].astype(str) out4 = x["out4"].astype(str) df["hostmask"] = out1.str.cat(out2, sep=".").str.cat(out3, sep=".").str.cat(out4, sep=".") return df["hostmask"] def _mask_kernel(masked_ip_int, out1, out2, out3, out4, kwarg1): for i, ipnum in enumerate(masked_ip_int): out1[i] = int(ipnum / 16777216) % 256 out2[i] = int(ipnum / 65536) % 256 out3[i] = int(ipnum / 256) % 256 out4[i] = int(ipnum) % 256 def mask(ips, masks): """ Apply a mask to a column of IP addresses. **Addresses must be IPv4. IPv6 not yet supported.** Parameters ---------- ips : IPv4 address IP addresses to be checked masks: Subnet mask value The host or subnet masks to be applied Returns ------- rtype : cudf.Series, masked IPv4 address Masked IP address from list of IPs Examples -------- >>> import clx.ip >>> import cudf >>> input_ips = cudf.Series(["192.168.0.1","10.0.0.1"]) >>> input_masks = cudf.Series(["255.255.0.0", "255.255.0.0"]) >>> clx.ip.mask(input_ips, input_masks) 0 192.168.0.0 1 10.0.0.0 Name: mask, dtype: object """ df = cudf.DataFrame() df["int_mask"] = masks.str.ip2int() df["int_ip"] = ips.str.ip2int() df["masked_ip_int"] = df["int_mask"] & df["int_ip"] x = df.apply_rows( _mask_kernel, incols=["masked_ip_int"], outcols=dict(out1=np.int64, out2=np.int64, out3=np.int64, out4=np.int64), kwargs=dict(kwarg1=0), ) out1 = x["out1"].astype(str) out2 = x["out2"].astype(str) out3 = x["out3"].astype(str) out4 = x["out4"].astype(str) df["mask"] = out1.str.cat(out2, sep=".").str.cat(out3, sep=".").str.cat(out4, sep=".") return df["mask"]
13,697
28.778261
1,529
py
clx-branch-23.04
clx-branch-23.04/python/clx/features.py
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def binary(dataframe, entity_id, feature_id): """ Create binary feature dataframe using provided dataset, entity, and feature. :param dataframe: Input dataframe to create binary features :type dataframe: cudf.DataFrame :param entity_id: Entity ID. Must be a column within `dataframe` :type entity_id: str :param feature_id: Feature ID. Must be a column within `dataframe` :type feature_id: str :return: dataframe :rtype: cudf.DataFrame Examples -------- >>> import cudf >>> import clx.features >>> df = cudf.DataFrame( { "time": [1, 2, 3], "user": ["u1", "u2", "u1",], "computer": ["c1", "c1", "c3"], } ) >>> output = clx.features.binary(df, "user", "computer") >>> output c1 c3 user u1 1.0 1.0 u2 1.0 0.0 """ if entity_id and feature_id not in dataframe.columns: raise Exception( "{0} and {1} must be column names in the input dataframe".format( entity_id, feature_id ) ) df_grouped = dataframe.groupby([entity_id, feature_id]).count().reset_index() df_output = df_grouped.pivot(index=entity_id, columns=feature_id).fillna(0) df_output[df_output != 0] = 1 return df_output def frequency(dataframe, entity_id, feature_id): """ Create frequency feature dataframe using provided dataset, entity, and feature. :param dataframe: Input dataframe to create binary features :type dataframe: cudf.DataFrame :param entity_id: Entity ID. Must be a column within `dataframe` :type entity_id: str :param feature_id: Feature ID. Must be a column within `dataframe` :type feature_id: str :return: dataframe :rtype: cudf.DataFrame Examples -------- >>> import cudf >>> import clx.features >>> df = cudf.DataFrame( { "time": [1, 2, 3], "user": ["u1", "u2", "u1",], "computer": ["c1", "c1", "c3"], } ) >>> output = clx.features.binary(df, "user", "computer") >>> output c1 c3 user u1 0.5 0.5 u2 1.0 0.0 """ if entity_id and feature_id not in dataframe.columns: raise Exception( "{0} and {1} must be column names in the input dataframe".format( entity_id, feature_id ) ) df_grouped = ( dataframe.groupby([entity_id, feature_id]) .count() .reset_index() .set_index(entity_id) ) df_grouped = dataframe.groupby([entity_id, feature_id]).count().reset_index() df_output = df_grouped.pivot(index=entity_id, columns=feature_id).fillna(0) sum_col = df_output.sum(axis=1) for col in df_output.columns: df_output[col] = df_output[col] / sum_col return df_output
3,510
31.509259
83
py
clx-branch-23.04
clx-branch-23.04/python/clx/_version.py
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (HEAD -> branch-23.04)" git_full = "68c14f460b5d3ab41ade9b2450126db0d2536745" git_date = "2023-05-19 12:07:00 -0400" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "v" cfg.parentdir_prefix = "clx-" cfg.versionfile_source = "clx/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
18,499
34.508637
79
py
clx-branch-23.04
clx-branch-23.04/python/clx/__init__.py
# Copyright (c) 2019, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Versioneer from ._version import get_versions __version__ = get_versions()['version'] del get_versions
693
35.526316
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/__init__.py
0
0
0
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/factory/kafka_factory.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from confluent_kafka import Consumer from confluent_kafka import Producer from clx.io.factory.abstract_factory import AbstractFactory from clx.io.reader.kafka_reader import KafkaReader from clx.io.writer.kafka_writer import KafkaWriter log = logging.getLogger(__name__) class KafkaFactory(AbstractFactory): def __init__(self, config): """ Constructor method :param config: dictionary object of config values for **batch_size**, **time_window**, **publisher_kafka_topic**, **output_delimiter**, **kafka_brokers**, and **group_id**. """ self._config = config def get_reader(self): """ Get instance of KafkaReader """ consumer = self._create_consumer() if "time_window" in self.config: reader = KafkaReader( self.config["batch_size"], consumer, time_window=self.config["time_window"], ) else: reader = KafkaReader(self.config["batch_size"], consumer) return reader def get_writer(self): """ Get instance of KafkaWriter """ producer = self._create_producer() writer = KafkaWriter( self.config["publisher_kafka_topic"], self.config["batch_size"], self.config["output_delimiter"], producer, ) return writer def _create_consumer(self): log.info("creating kafka consumer instance") consumer_conf = { "bootstrap.servers": self.config["kafka_brokers"], "group.id": self.config["group_id"], "session.timeout.ms": 10000, "default.topic.config": {"auto.offset.reset": "largest"}, } c = Consumer(consumer_conf) c.subscribe( self.config["consumer_kafka_topics"], on_assign=self.print_assignment ) log.info("created kafka consumer instance") return c def _create_producer(self): log.info("creating kafka producer instance") producer_conf = { "bootstrap.servers": self.config["kafka_brokers"], "session.timeout.ms": 10000, } producer = Producer(producer_conf) log.info("created producer instance") return producer def print_assignment(self, consumer, partitions): print("Assignment:", partitions)
3,008
31.706522
180
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/factory/abstract_factory.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod class AbstractFactory(ABC): @property def config(self): return self._config @config.setter def config(self, val): self._config = val @abstractmethod def get_reader(self): pass @abstractmethod def get_writer(self): pass
913
25.882353
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/factory/factory.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from clx.io.factory.kafka_factory import KafkaFactory from clx.io.factory.fs_factory import FileSystemFactory from clx.io.factory.dask_fs_factory import DaskFileSystemFactory log = logging.getLogger(__name__) class Factory: __cls_dict = { "kafka": "KafkaFactory", "fs": "FileSystemFactory", "dask_fs": "DaskFileSystemFactory", } @staticmethod def cls_dict(): return Factory.__cls_dict class InstanceGenerator(object): def __init__(self, func): self.func = func def __call__(self, *args, **kwargs): class_name, config = self.func(*args, **kwargs) try: target_cls = globals()[class_name](config) return target_cls except KeyError as error: log.error(error) log.exception(error) raise @InstanceGenerator def get_instance(io_comp, config): io_comp = io_comp.lower() if io_comp and io_comp in Factory.cls_dict(): return Factory.cls_dict()[io_comp], config else: raise KeyError( "Dictionary doesn't have { %s } corresponding component class." % (io_comp) ) @staticmethod def get_reader(io_comp, config): return Factory.get_instance(io_comp, config).get_reader() @staticmethod def get_writer(io_comp, config): return Factory.get_instance(io_comp, config).get_writer()
2,104
29.955882
79
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/factory/__init__.py
0
0
0
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/factory/fs_factory.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from clx.io.factory.abstract_factory import AbstractFactory from clx.io.reader.fs_reader import FileSystemReader from clx.io.writer.fs_writer import FileSystemWriter class FileSystemFactory(AbstractFactory): def __init__(self, config): """ Constructor method :param config: dictionary object of config values for **type**, **input_format**, **input_path** (or **output_path**), and dask reader/writer optional keyword args """ self._config = config def get_reader(self): """ Get instance of FileSystemReader """ return FileSystemReader(self.config) def get_writer(self): return FileSystemWriter(self.config)
1,294
34
171
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/factory/dask_fs_factory.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from clx.io.factory.abstract_factory import AbstractFactory from clx.io.reader.dask_fs_reader import DaskFileSystemReader class DaskFileSystemFactory(AbstractFactory): def __init__(self, config): """ Constructor method :param config: dictionary object of config values for **type**, **input_format**, **input_path**, and dask reader optional keyword args """ self._config = config def get_reader(self): """ Get instance of DaskFileSystemReader """ return DaskFileSystemReader(self.config) def get_writer(self): raise NotImplementedError
1,224
32.108108
143
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/writer/writer.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod class Writer(ABC): @abstractmethod def close(self): pass @abstractmethod def write_data(self): pass
760
28.269231
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/writer/__init__.py
0
0
0
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/writer/fs_writer.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import logging import os from clx.io.writer.file_writer import FileWriter log = logging.getLogger(__name__) class FileSystemWriter(FileWriter): """ Uses cudf to write to file system based on config object. :param config: dictionary object of config values for **type**, **output_format**, **output_path** (or **output_path**), and cudf writer optional keyword args """ def __init__(self, config): self._config = config def write_data(self, df): """ Write data to file system using cudf based on provided config object """ output_format = self.config["output_format"].lower() filepath = self.config["output_path"] kwargs = self.config.copy() del kwargs["type"] del kwargs["output_format"] del kwargs["output_path"] dir = os.path.dirname(filepath) if not os.path.isdir(dir): log.info("output directory { %s } not exist" % (dir)) log.info("creating output directory { %s }..." % (dir)) os.makedirs(dir) log.info("created output directory { %s }..." % (dir)) if os.path.exists(filepath): raise IOError("output path { %s } already exist" % (filepath)) log.info("writing data to location {%s}" % (filepath)) if "csv" == output_format: df.to_csv(filepath, **kwargs) elif "parquet" == output_format: cudf.io.parquet.to_parquet(df, filepath, **kwargs) elif "orc" == output_format: cudf.io.orc.to_orc(df, filepath, **kwargs) elif "json" == output_format: cudf.io.json.to_json(df, filepath, **kwargs) else: raise NotImplementedError("%s is not a supported output_format" % (output_format)) def close(self): """ Close cudf writer """ log.info("Closed writer")
2,492
33.625
162
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/writer/file_writer.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod from clx.io.writer.writer import Writer class FileWriter(Writer): @property def config(self): return self._config @abstractmethod def write_data(self): pass
813
28.071429
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/writer/kafka_writer.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging log = logging.getLogger(__name__) class KafkaWriter: """ Publish to Kafka topic based on config object. :param kafka_topic: Kafka topic :param batch_size: batch size :param delimiter: delimiter :param producer: producer """ # Column name of formatted output messages sent to kafka output_colname = "delimited_output" def __init__(self, kafka_topic, batch_size, delimiter, producer): self._kafka_topic = kafka_topic self._batch_size = batch_size self._delimiter = delimiter self._producer = producer @property def producer(self): return self._producer @property def delimiter(self): return self._delimiter def write_data(self, df): """ publish messages to kafka topic :param df: dataframe to publish """ out_df = self._generate_delimited_ouput_col(df) for rec in out_df.to_records(): self.producer.produce(self._kafka_topic, rec[self.output_colname]) if len(self.producer) > self._batch_size: log.debug( "batch reached, calling poll... producer unsent: %s", len(self.producer), ) self.producer.poll(0) def _generate_delimited_ouput_col(self, gdf): first_col = gdf.columns[0] gdf[first_col] = gdf[first_col].astype("str").fillna("") gdf[self.output_colname] = gdf[first_col].astype("str").str.rstrip() for col in gdf.columns[1:-1]: gdf[col] = gdf[col].astype("str").fillna("") gdf[col] = gdf[col].astype("str").str.rstrip() gdf[self.output_colname] = gdf[self.output_colname].str.cat( gdf[col], sep=self.delimiter ) return gdf def close(self): """ Close Kafka writer """ log.info("Closing kafka writer...") if self._producer is not None: self._producer.flush() log.info("Closed kafka writer.")
2,648
30.915663
78
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/reader/reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod class Reader(ABC): @property def has_data(self): return self._has_data @has_data.setter def has_data(self, val): self._has_data = val @property def config(self): return self._config @config.setter def config(self, val): self._config = val @abstractmethod def close(self): pass @abstractmethod def fetch_data(self): pass
1,048
23.97619
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/reader/dask_fs_reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dask_cudf import logging from clx.io.reader.file_reader import FileReader log = logging.getLogger(__name__) class DaskFileSystemReader(FileReader): """ Uses Dask to read from file system based on config object. :param config: dictionary object of config values for **type**, **input_format**, **input_path**, and dask reader optional keyword args """ def __init__(self, config): self._config = config self._has_data = True def fetch_data(self): """ Fetch data using dask based on provided config object """ df = None input_format = self.config["input_format"].lower() filepath = self.config["input_path"] kwargs = self.config.copy() del kwargs["type"] del kwargs["input_format"] del kwargs["input_path"] if "csv" == input_format: df = dask_cudf.read_csv(filepath, **kwargs) elif "parquet" == input_format: df = dask_cudf.read_parquet(filepath, **kwargs) elif "orc" == input_format: df = dask_cudf.read_orc(filepath, engine="cudf") elif "json" == input_format: df = dask_cudf.read_json(filepath, **kwargs) else: raise NotImplementedError("%s is not a supported input_format" % (input_format)) self.has_data = False return df def close(self): """ Close dask reader """ log.info("Closed dask_fs reader")
2,077
31.984127
139
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/reader/kafka_reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import logging import time from confluent_kafka import KafkaError from clx.io.reader.reader import Reader log = logging.getLogger(__name__) class KafkaReader(Reader): """ Reads from Kafka based on config object. :param batch_size: batch size :param consumer: Kafka consumer :param time_window: Max window of time that queued events will wait to be pushed to workflow """ def __init__(self, batch_size, consumer, time_window=30): self._batch_size = batch_size self._consumer = consumer self._has_data = True self._time_window = time_window @property def consumer(self): return self._consumer @property def has_data(self): return self._has_data @property def time_window(self): return self._time_window def fetch_data(self): """ Fetch data from Kafka based on provided config object """ events = [] rec_cnt = 0 running = True current_time = time.time() try: while running: # First check if batch size or time window has been exceeded if ( rec_cnt >= self._batch_size or (time.time() - current_time) >= self.time_window ): log.debug( "Exceeded record count (" + str(rec_cnt) + ") or time window (" + str(time.time() - current_time) + ")" ) running = False # Else poll next message in kafka queue else: msg = self.consumer.poll(timeout=1.0) if msg is None: log.debug("No message received.") continue elif not msg.error(): data = msg.value().decode("utf-8") log.debug("Message received.") events.append(data) rec_cnt += 1 elif msg.error().code() != KafkaError._PARTITION_EOF: log.error(msg.error()) running = False else: running = False df = cudf.DataFrame() if len(events) > 0: df["Raw"] = events log.debug("Kafka reader batch aggregation complete. Dataframe size = " + str(df.shape)) return df except Exception: log.error("Error fetching data from kafka") raise def close(self): """ Close Kafka reader """ log.info("Closing kafka reader...") if self.consumer is not None: self.consumer.close() log.info("Closed kafka reader.")
3,382
32.49505
127
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/reader/fs_reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import logging from clx.io.reader.file_reader import FileReader log = logging.getLogger(__name__) class FileSystemReader(FileReader): """ Uses cudf to read from file system based on config object. :param config: dictionary object of config values for **type**, **input_format**, **input_path** (or **output_path**), and cudf reader optional keyword args """ def __init__(self, config): self._config = config self._has_data = True def fetch_data(self): """ Fetch data using cudf based on provided config object """ df = None input_format = self.config["input_format"].lower() filepath = self.config["input_path"] kwargs = self.config.copy() del kwargs["type"] del kwargs["input_format"] del kwargs["input_path"] if "csv" == input_format: df = cudf.read_csv(filepath, **kwargs) elif "parquet" == input_format: df = cudf.read_parquet(filepath, **kwargs) elif "orc" == input_format: df = cudf.read_orc(filepath, engine="cudf") elif "json" == input_format: df = cudf.read_json(filepath, **kwargs) else: raise NotImplementedError("%s is not a supported input_format" % (input_format)) self.has_data = False return df def close(self): """ Close cudf reader """ log.info("Closed fs reader")
2,064
31.777778
160
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/reader/__init__.py
0
0
0
py
clx-branch-23.04
clx-branch-23.04/python/clx/io/reader/file_reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod from clx.io.reader.reader import Reader class FileReader(Reader): @property def has_data(self): return self._has_data @has_data.setter def has_data(self, val): self._has_data = val @property def config(self): return self._config @config.setter def config(self, val): self._config = val @abstractmethod def close(self): pass @abstractmethod def fetch_data(self): pass
1,090
24.372093
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/osi/virus_total.py
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import requests from os.path import abspath, basename # ref https://developers.virustotal.com/reference class VirusTotalClient(object): """ Wrapper class to query VirusTotal database. :param apikey: API key :param proxies: proxies """ def __init__(self, api_key=None, proxies=None): if api_key is None: raise ValueError("Virus Total API key is None.") self.__api_key = api_key self.__proxies = proxies self.__vt_endpoint_dict = self.__create_vt_endpoint_dict() @property def api_key(self): return self.__api_key @property def proxies(self): return self.__proxies @property def vt_endpoint_dict(self): return self.__vt_endpoint_dict def file_scan(self, file): """ This function allows you to send a file for scanning with VirusTotal. Before performing submissions it would be nice to retrieve the latest report on the file. File size limit is 32MB, in order to submit files up to 200MB in size it is mandatory to use `scan_big_file` feature :param file: File to be scanned :type file: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.file_scan('test.sh') {'status_code': 200, 'json_resp': {'scan_id': '0204e88255a0bd7807547e9186621f0478a6bb2c43e795fb5e6934e5cda0e1f6-1605914572', 'sha1': '70c0942965354dbb132c05458866b96709e37f44'...} """ file_size_mb = self.__get_file_size(file) params = {"apikey": self.api_key} files = {"file": (basename(file), open(abspath(file), "rb"))} url = self.vt_endpoint_dict["file_scan"] if file_size_mb > 32: resp = self.scan_big_file(files) else: resp = self.__post(url, params=params, files=files, proxies=self.proxies) return resp def __get_file_size(self, file): statinfo = os.stat(file) return statinfo.st_size / (1024 * 1024) def file_rescan(self, *resource): """ This function rescan given files. :param *resource: The resource argument can be the MD5, SHA-1 or SHA-256 of the file you want to re-scan. :type *resource: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.file_rescan('70c0942965354dbb132c05458866b96709e37f44') {'status_code': 200, 'json_resp': {'scan_id': ...}} """ params = {"apikey": self.api_key, "resource": ",".join(*resource)} resp = self.__post( self.vt_endpoint_dict["file_rescan"], params=params, proxies=self.proxies ) return resp def file_report(self, *resource): """ Retrieve file scan reports :param *resource: The resource argument can be the MD5, SHA-1 or SHA-256 of a file for which you want to retrieve the most recent antivirus report. You may also specify a scan_id returned by the /file/scan endpoint. :type *resource: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.file_report(["99017f6eebbac24f351415dd410d522d"]) {'status_code': 200, 'json_resp': {'scans': {'Bkav': {'detected': True, 'version': '1.3.0.9899', 'result': 'W32.AIDetectVM.malware1'...}} """ params = {"apikey": self.api_key, "resource": ",".join(*resource)} resp = self.__get( self.vt_endpoint_dict["file_report"], params=params, proxies=self.proxies ) return resp def url_scan(self, *url): """Retrieve URL scan reports :param *url: A URL for which you want to retrieve the most recent report. You may also specify a scan_id (sha256-timestamp as returned by the URL submission API) to access a specific report. :type *url: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.url_scan(["virustotal.com"]) {'status_code': 200, 'json_resp': {'permalink': 'https://www.virustotal.com/gui/url/...}} """ params = {"apikey": self.api_key, "url": "\n".join(*url)} resp = self.__post( self.vt_endpoint_dict["url_scan"], params=params, proxies=self.proxies ) return resp def url_report(self, *resource): """ Retrieve URL scan reports :param *resource: The resource argument must be the URL to retrieve the most recent report. :type *resource: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.url_report(["virustotal.com"]) {'status_code': 200, 'json_resp': {'scan_id': 'a354494a73382ea0b4bc47f4c9e8d6c578027cd4598196dc88f05a22b5817293-1605914280'...} """ params = {"apikey": self.api_key, "resource": "\n".join(*resource)} resp = self.__post( self.vt_endpoint_dict["url_report"], params=params, proxies=self.proxies ) return resp def ipaddress_report(self, ip): """ Retrieve report using ip address. :param ip: An IP address :type ip: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.ipaddress_report("90.156.201.27") {'status_code': 200, 'json_resp': {'asn': 25532, 'undetected_urls...}} """ params = {"apikey": self.api_key, "ip": ip} resp = self.__get( self.vt_endpoint_dict["ip_report"], params=params, proxies=self.proxies ) return resp def domain_report(self, domain): """ Retrieve report using domain. :param domain: A domain name :type domain: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.domain_report("027.ru") {'status_code': 200, 'json_resp': {'BitDefender category': 'parked', 'undetected_downloaded_samples'...}} """ params = {"apikey": self.api_key, "domain": domain} resp = self.__get( self.vt_endpoint_dict["domain_report"], params=params, proxies=self.proxies ) return resp def put_comment(self, resource, comment): """ Post comment for a file or URL :param resource: Either an md5/sha1/sha256 hash of the file you want to review or the URL itself that you want to comment on. :type resource: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.put_comment("75efd85cf6f8a962fe016787a7f57206ea9263086ee496fc62e3fc56734d4b53", "This is a test comment") {'status_code': 200, 'json_resp': {'response_code': 0, 'verbose_msg': 'Duplicate comment'}} """ params = {"apikey": self.api_key, "resource": resource, "comment": comment} resp = self.__post( self.vt_endpoint_dict["put_comment"], params=params, proxies=self.proxies ) return resp def scan_big_file(self, files): """ Scanning files larger than 32MB :param file: File to be scanned :type file: str :return: Response :rtype: dict Examples -------- >>> from clx.osi.virus_total import VirusTotalClient >>> client = VirusTotalClient(api_key='your-api-key') >>> client.scan_big_file('test.sh') {'status_code': 200, 'json_resp': {'scan_id': '0204e88255a0bd7807547e9186621f0478a6bb2c43e795fb5e6934e5cda0e1f6-1605914572', 'sha1': '70c0942965354dbb132c05458866b96709e37f44'...} """ params = {"apikey": self.api_key} upload_url_json = self.__get(self.vt_endpoint_dict["upload_url"], params=params) upload_url = upload_url_json["upload_url"] resp = requests.post(upload_url, files=files) return self.__validate_response(resp) def __post(self, endpoint, params, **kwargs): resp = requests.post(endpoint, params=params, **kwargs) return self.__validate_response(resp) def __get(self, endpoint, params, **kwargs): resp = requests.get(endpoint, params=params, **kwargs) return self.__validate_response(resp) def __create_vt_endpoint_dict(self): vt_endpoint_dict = {} base_url = "https://www.virustotal.com/vtapi/v2" vt_endpoint_dict["file_scan"] = "%s/file/scan" % (base_url) vt_endpoint_dict["file_rescan"] = "%s/file/rescan" % (base_url) vt_endpoint_dict["file_report"] = "%s/file/report" % (base_url) vt_endpoint_dict["url_scan"] = "%s/url/scan" % (base_url) vt_endpoint_dict["url_report"] = "%s/url/report" % (base_url) vt_endpoint_dict["upload_url"] = "%s/file/scan/upload_url" % (base_url) vt_endpoint_dict["ip_report"] = "%s/ip-address/report" % (base_url) vt_endpoint_dict["domain_report"] = "%s/domain/report" % (base_url) vt_endpoint_dict["put_comment"] = "%s/comments/put" % (base_url) return vt_endpoint_dict def __validate_response(self, response): if response.status_code == 200: json_resp = json.loads(response.text) return dict(status_code=response.status_code, json_resp=json_resp) return dict( status_code=response.status_code, error=response.text, resp=response.content )
10,955
37.307692
198
py
clx-branch-23.04
clx-branch-23.04/python/clx/osi/whois.py
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use `pip install python-whois` import whois import logging log = logging.getLogger(__name__) class WhoIsLookupClient(object): str_arr_keys = ["domain_name", "name_servers", "status", "emails", "dnssec"] datetime_arr_keys = ["creation_date", "updated_date", "expiration_date"] """ Wrapper class to query WhoIs API. :param sep: Delimiter to concat nested list values from the Whois response. :param datetime_format: Format to convert WhoIs response datetime object. """ def __init__(self, sep=",", datetime_format="%m-%d-%Y %H:%M:%S"): self.sep = sep self.datetime_format = datetime_format def whois(self, domains, arr2str=True): """ Function to access parsed WhoIs data for a given domain. :param domains: Domains to perform whois lookup. :type domains: list :param arr2str: Convert WhoIs lookup response object to list of strings. :type arr2str: boolean :return: WhoIs information with respect to given domains. :rtype: list/obj Examples -------- >>> from clx.osi.whois import WhoIsLookupClient >>> domains = ["nvidia.com"] >>> client = WhoIsLookupClient() >>> client.whois(domains) [{'domain_name': 'NVIDIA.COM', 'registrar': 'Safenames Ltd', 'whois_server': 'whois.safenames.net'...}] """ result = [] for domain in domains: resp = whois.whois(domain) if arr2str: resp_keys = resp.keys() resp = self.__flatten_str_array(resp, resp_keys) resp = self.__flatten_datetime_array(resp, resp_keys) result.append(resp) return result def __flatten_str_array(self, resp, resp_keys): for key in self.str_arr_keys: if key in resp_keys and isinstance(resp[key], list): resp[key] = self.sep.join(resp[key]) return resp def __flatten_datetime_array(self, resp, resp_keys): for key in self.datetime_arr_keys: values = [] if key in resp_keys: if isinstance(resp[key], list): for ts in resp[key]: values.append(ts.strftime(self.datetime_format)) resp[key] = self.sep.join(values) else: resp[key] = resp[key].strftime(self.datetime_format) return resp
3,032
35.542169
111
py
clx-branch-23.04
clx-branch-23.04/python/clx/osi/farsight.py
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ref: https://docs.dnsdb.info/dnsdb-api/ import json import requests import urllib import logging log = logging.getLogger(__name__) class FarsightLookupClient(object): """ Wrapper class to query DNSDB record in various ways Example: by IP, DomainName :param server: Farsight server :param apikey: API key :param limit: limit :param http_proxy: HTTP proxy :param https_proxy: HTTPS proxy """ def __init__(self, server, apikey, limit=None, http_proxy=None, https_proxy=None): self.server = server self.headers = {"Accept": "application/json", "X-Api-Key": apikey} self.limit = limit self.proxy_args = self.__get_proxy_args(http_proxy, https_proxy) def query_rrset(self, oname, rrtype=None, bailiwick=None, before=None, after=None): """ Batch version of querying DNSDB by given domain name and time ranges. :param oname: DNS domain name. :type oname: str :param rrtype: The resource record type of the resource record, either using the standard DNS type mnemonic, or an RFC 3597 generic type, i.e. the string TYPE immediately followed by the decimal RRtype number. :type rrtype: str :param bailiwick: The “bailiwick” of an RRset in DNSDB observed via passive DNS replication is the closest enclosing zone delegated to a nameserver which served the RRset. :type bailiwick: str :param before: Output results seen before this time. :type before: UNIX timestamp :param after: Output results seen after this time. :type after: UNIX timestamp :return: Response :rtype: dict Examples -------- >>> from clx.osi.farsight import FarsightLookupClient >>> client = FarsightLookupClient("https://localhost", "your-api-key") >>> client.query_rrset("www.dnsdb.info") {"status_code": 200,...} >>> client.query_rrset("www.dnsdb.info", rrtype="CNAME", bailiwick="dnsdb.info.", before=1374184718, after=1564909243,) {"status_code": 200,...} """ quoted_name = self.__quote(oname) if bailiwick: if not rrtype: rrtype = "ANY" path = "rrset/name/%s/%s/%s" % ( quoted_name, rrtype, self.__quote(bailiwick), ) elif rrtype: path = "rrset/name/%s/%s" % (quoted_name, rrtype) else: path = "rrset/name/%s" % quoted_name return self.__query(path, before, after) def query_rdata_name(self, rdata_name, rrtype=None, before=None, after=None): """ Query matches only a single DNSDB record of given owner name and time ranges. :param rdata_name: DNS domain name. :type rdata_name: str :param rrtype: The resource record type of the resource record, either using the standard DNS type mnemonic, or an RFC 3597 generic type, i.e. the string TYPE immediately followed by the decimal RRtype number. :type rrtype: str :param before: Output results seen before this time. :type before: UNIX timestamp :param after: Output results seen after this time. :type after: UNIX timestamp :return: Response :rtype: dict Examples -------- >>> from clx.osi.farsight import FarsightLookupClient >>> client = FarsightLookupClient("https://localhost", "your-api-key", limit=1) >>> client.query_rdata_name("www.farsightsecurity.com") {"status_code": 200,...} >>> client.query_rdata_name("www.farsightsecurity.com", rrtype="PTR", before=1386638408, after=1561176503) {"status_code": 200,...} """ quoted_name = self.__quote(rdata_name) if rrtype: path = "rdata/name/%s/%s" % (quoted_name, rrtype) else: path = "rdata/name/%s" % quoted_name return self.__query(path, before, after) def query_rdata_ip(self, rdata_ip, before=None, after=None): """ Query to find DNSDB records matching a specific IP address with given time range. :param rdata_ip: The VALUE is one of an IPv4 or IPv6 single address, with a prefix length, or with an address range. If a prefix is provided, the delimiter between the network address and prefix length is a single comma (“,”) character rather than the usual slash (“/”) character to avoid clashing with the HTTP URI path name separator.. :type rdata_ip: str :param before: Output results seen before this time. :type before: UNIX timestamp :param after: Output results seen after this time. :type after: UNIX timestamp :return: Response :rtype: dict Examples -------- >>> from clx.osi.farsight import FarsightLookupClient >>> client = FarsightLookupClient("https://localhost", "your-api-key", limit=1) >> client.query_rdata_ip("100.0.0.1") {"status_code": 200,...} >>> client.query_rdata_ip("100.0.0.1", before=1428433465, after=1538014110) {"status_code": 200,...} """ path = "rdata/ip/%s" % rdata_ip.replace("/", ",") return self.__query(path, before, after) def __get(self, url): """ submit http get request """ response = requests.get(url, headers=self.headers, proxies=self.proxy_args) return response # queries dnsdb. def __query(self, path, before=None, after=None): res = [] url = "%s/lookup/%s" % (self.server, path) params = self.__get_params(before, after) if params: url += "?{0}".format(urllib.parse.urlencode(params)) response = self.__get(url) try: response.raise_for_status() self.__extract_response(response, res) except requests.exceptions.HTTPError as e: log.error("Error: " + str(e)) return res # convert response to json format. def __extract_response(self, response, res): raw_result = response.text for rec in raw_result.split("\n"): if rec.strip(): res.append(json.loads(rec)) # initialize proxy arguments. def __get_proxy_args(self, http_proxy, https_proxy): proxy_args = {} if http_proxy: proxy_args["http"] = http_proxy if https_proxy: proxy_args["https"] = https_proxy return proxy_args # initialize query parameters def __get_params(self, before, after): params = {} if self.limit: params["limit"] = self.limit if before and after: params["time_first_after"] = after params["time_last_before"] = before else: if before: params["time_first_before"] = before if after: params["time_last_after"] = after return params def __quote(self, path): return urllib.parse.quote(path, safe="")
7,643
38.402062
345
py
clx-branch-23.04
clx-branch-23.04/python/clx/osi/__init__.py
0
0
0
py
clx-branch-23.04
clx-branch-23.04/python/clx/osi/slashnext.py
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ref: https://github.com/slashnext/SlashNext-URL-Analysis-and-Enrichment/tree/master/Python%20SDK import os from SlashNextPhishingIR import SlashNextPhishingIR class SlashNextClient(object): def __init__( self, api_key, snx_ir_workspace, base_url="https://oti.slashnext.cloud/api" ): if api_key is None: raise ValueError("SlashNext API key is None") if snx_ir_workspace is not None: if not os.path.exists(snx_ir_workspace): try: print("Creating directory {}".format(snx_ir_workspace)) os.makedirs(snx_ir_workspace) except Exception as error: raise Exception("Error while creating workspace: " + repr(error)) self._snx_phishing_ir = SlashNextPhishingIR(snx_ir_workspace) self._snx_phishing_ir.set_conf(api_key=api_key, base_url=base_url) @property def conn(self): return self._snx_phishing_ir def verify_connection(self): """ Verify SlashNext cloud database connection. Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> slashnext.verify_connection() Successfully connected to SlashNext cloud. 'success' """ status, details = self.conn.test() if status == "ok": print("Successfully connected to SlashNext cloud.") return "success" else: raise Exception( "Connection to SlashNext cloud failed due to {}.".format(details) ) def _execute(self, command): """ Execute all SlashNext Phishing Incident Response SDK supported actions/commands. :param command: Query to execute on SlashNext cloud database. :type command: str :return Query response as list. :rtype: list """ status, details, responses_list = self.conn.execute(command) if status == "ok": return responses_list else: raise Exception( "Action '{}' execution failed due to {}.".format(command, details) ) def host_reputation(self, host): """ Queries the SlashNext cloud database and retrieves the reputation of a host. :param host: The host to look up in the SlashNext Threat Intelligence database. Can be either a domain name or an IPv4 address. :type host: str :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.host_reputation('google.com') >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-host-reputation host={}".format(host) try: return self._execute(command) except Exception as error: raise Exception("SlashNext Host Reputation: " + repr(error)) def host_report(self, host): """ Queries the SlashNext cloud database and retrieves a detailed report. :param host: The host to look up in the SlashNext Threat Intelligence database. Can be either a domain name or an IPv4 address. :type host: str :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.host_report('google.com') >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-host-report host={}".format(host) try: return self._execute(command) except Exception as error: raise Exception("SlashNext Host Report: " + repr(error)) def host_urls(self, host, limit=10): """ Queries the SlashNext cloud database and retrieves a list of all URLs. :param host: The host to look up in the SlashNext Threat Intelligence database, for which to return a list of associated URLs. Can be either a domain name or an IPv4 address. :type host: str :param limit: The maximum number of URL records to fetch. Default is "10". :type limit: int :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.host_urls('google.com', limit=1) >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-host-urls host={} limit={}".format(host, limit) try: return self._execute(command) except Exception as error: raise Exception("SlashNext Host urls: " + repr(error)) def url_scan(self, url, extended_info=True): """ Perform a real-time URL reputation scan with SlashNext cloud-based SEER threat detection engine. :param url: The URL that needs to be scanned. :type url: str :param extended_info: Whether to download forensics data, such as screenshot, HTML, and rendered text. :type extended_info: boolean :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.url_scan('http://ajeetenterprises.in/js/kbrad/drive/index.php', extended_info=False) >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-url-scan url={} extended_info={}".format( url, str(extended_info).lower() ) try: return self._execute(command) except Exception as error: raise Exception("SlashNext URL Scan: " + repr(error)) def url_scan_sync(self, url, extended_info=True, timeout=60): """ Perform a real-time URL scan with SlashNext cloud-based SEER threat detection engine in a blocking mode. :param url: The URL that needs to be scanned. :type url: str :param extended_info: Whether to download forensics data, such as screenshot, HTML, and rendered text. :type extended_info: boolean :param timeout: A timeout value in seconds. If no timeout value is specified, a default timeout value is 60 seconds. :type timeout: int :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.url_scan_sync('http://ajeetenterprises.in/js/kbrad/drive/index.php', extended_info=False, timeout=10) >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-url-scan-sync url={} extended_info={} timeout={}".format( url, str(extended_info).lower(), timeout ) try: return self._execute(command) except Exception as error: raise Exception("SlashNext URL Scan Sync: " + repr(error)) def scan_report(self, scanid, extended_info=True): """ Retrieve URL scan results against a previous scan request. :param scanid: Scan ID of the scan for which to get the report. Can be retrieved from the "slashnext-url-scan" action or "slashnext-url-scan-sync" action. :type scanid: str :param extended_info: Whether to download forensics data, such as screenshot, HTML, and rendered text. :type extended_info: boolean :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.scan_report('2-ba57-755a7458c8a3', extended_info=False) >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-scan-report scanid={} extended_info={}".format( scanid, str(extended_info).lower() ) try: return self._execute(command) except Exception as error: raise Exception("SlashNext Scan Report: " + repr(error)) def download_screenshot(self, scanid, resolution="high"): """ Downloads a screenshot of a web page against a previous URL scan request. :param scanid: Scan ID of the scan for which to get the report. Can be retrieved from the "slashnext-url-scan" action or "slashnext-url-scan-sync" action. :type scanid: str :param resolution: Resolution of the web page screenshot. Can be "high" or "medium". Default is "high". :type resolution: str :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.download_screenshot('2-ba57-755a7458c8a3') >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-download-screenshot scanid={} resolution={}".format( scanid, resolution.lower() ) try: return self._execute(command) except Exception as error: raise Exception("SlashNext Download Screenshot: " + repr(error)) def download_html(self, scanid): """ Downloads a web page HTML against a previous URL scan request. :param scanid: Scan ID of the scan for which to get the report. Can be retrieved from the "slashnext-url-scan" action or "slashnext-url-scan-sync" action. :type scanid: str :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.download_html('2-ba57-755a7458c8a3') >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-download-html scanid={}".format(scanid) try: return self._execute(command) except Exception as error: raise Exception("SlashNext Download HTML: " + repr(error)) def download_text(self, scanid): """ Downloads the text of a web page against a previous URL scan request. :param scanid: Scan ID of the scan for which to get the report. Can be retrieved from the "slashnext-url-scan" action or "slashnext-url-scan-sync" action. :type scanid: str :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.download_text('2-ba57-755a7458c8a3') >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-download-text scanid={}".format(scanid) try: return self._execute(command) except Exception as error: raise Exception("SlashNext Download HTML: " + repr(error)) def api_quota(self): """ Find information about your API quota, like current usage, quota left etc. :return Query response as list. :rtype: list Examples -------- >>> from clx.osi.slashnext import SlashNextClient >>> api_key = 'slashnext_cloud_apikey' >>> snx_ir_workspace_dir = 'snx_ir_workspace' >>> slashnext = SlashNextClient(api_key, snx_ir_workspace_dir) >>> response_list = slashnext.api_quota() >>> type(response_list[0]) <class 'dict'> """ command = "slashnext-api-quota" try: return self._execute(command) except Exception as error: raise Exception("SlashNext API Quota: " + repr(error))
13,962
39.239193
182
py
clx-branch-23.04
clx-branch-23.04/python/clx/dns/dns_extractor.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import cudf import logging log = logging.getLogger(__name__) class DnsVarsProvider: __instance = None @staticmethod def get_instance(): if DnsVarsProvider.__instance is None: DnsVarsProvider() return DnsVarsProvider.__instance def __init__(self): if DnsVarsProvider.__instance is not None: raise Exception("This is a singleton class") else: DnsVarsProvider.__instance = self DnsVarsProvider.__instance.__suffix_df = self.__load_suffix_df() DnsVarsProvider.__instance.__allowed_output_cols = { "hostname", "subdomain", "domain", "suffix", } @property def suffix_df(self): return self.__suffix_df @property def allowed_output_cols(self): return self.__allowed_output_cols def __load_suffix_df(self): suffix_list_path = "%s/resources/suffix_list.txt" % os.path.dirname( os.path.realpath(__file__) ) log.info("Read suffix data at location %s." % (suffix_list_path)) # Read suffix list csv file suffix_df = cudf.io.csv.read_csv( suffix_list_path, names=["suffix"], header=None, dtype=["str"] ) log.info("Read suffix data is finished") suffix_df = suffix_df[suffix_df["suffix"].str.contains("^[^//]+$")] return suffix_df def extract_hostnames(url_series): """This function extracts hostnames from the given urls. :param url_series: Urls that are to be handled. :type url_series: cudf.Series :return: Hostnames extracted from the urls. :rtype: cudf.Series Examples -------- >>> from cudf import DataFrame >>> from clx.dns import dns_extractor as dns >>> input_df = DataFrame( ... { ... "url": [ ... "http://www.google.com", ... "gmail.com", ... "github.com", ... "https://pandas.pydata.org", ... ] ... } ... ) >>> dns.extract_hostnames(input_df["url"]) 0 www.google.com 1 gmail.com 2 github.com 3 pandas.pydata.org Name: 0, dtype: object """ hostnames = url_series.str.extract("([\\w]+[\\.].*[^/]|[\\-\\w]+[\\.].*[^/])")[ 0 ].str.extract("([\\w\\.\\-]+)")[0] return hostnames def generate_tld_cols(hostname_split_df, hostnames, col_len): """ This function generates tld columns. :param hostname_split_df: Hostname splits. :type hostname_split_df: cudf.DataFrame :param hostnames: Hostnames. :type hostnames: cudf.DataFrame :param col_len: Hostname splits dataframe columns length. :return: Tld columns with all combination. :rtype: cudf.DataFrame Examples -------- >>> import cudf >>> from clx.dns import dns_extractor as dns >>> hostnames = cudf.Series(["www.google.com", "pandas.pydata.org"]) >>> hostname_splits = dns.get_hostname_split_df(hostnames) >>> print(hostname_splits) 2 1 0 0 com google www 1 org pydata pandas >>> col_len = len(hostname_split_df.columns) - 1 >>> col_len = len(hostname_splits.columns) - 1 >>> dns.generate_tld_cols(hostname_splits, hostnames, col_len) 2 1 0 tld2 tld1 tld0 0 com google www com google.com www.google.com 1 org pydata pandas org pydata.org pandas.pydata.org """ hostname_split_df = hostname_split_df.fillna("") hostname_split_df["tld" + str(col_len)] = hostname_split_df[col_len] # Add all other elements of hostname_split_df for j in range(col_len - 1, 0, -1): hostname_split_df["tld" + str(j)] = ( hostname_split_df[j] .str.cat(hostname_split_df["tld" + str(j + 1)], sep=".") .str.rstrip(".") ) # Assign hostname to tld0, to handle received input is just domain name. hostname_split_df["tld0"] = hostnames return hostname_split_df def _handle_unknown_suffix(unknown_suffix_df, col_dict): if col_dict["hostname"]: unknown_suffix_df = unknown_suffix_df[["idx", "tld0"]] unknown_suffix_df = unknown_suffix_df.rename(columns={"tld0": "hostname"}) else: unknown_suffix_df = unknown_suffix_df[["idx"]] if col_dict["subdomain"]: unknown_suffix_df["subdomain"] = "" if col_dict["domain"]: unknown_suffix_df["domain"] = "" if col_dict["suffix"]: unknown_suffix_df["suffix"] = "" return unknown_suffix_df def _extract_tld(input_df, suffix_df, col_len, col_dict): """ Examples -------- input: 4 3 2 1 0 tld4 tld3 tld2 tld1 tld0 idx 0 ac com cnn news forums ac com.ac cnn.com.ac news.cnn.com.ac forums.news.cnn.com.ac 0 1 ac cnn news forums ac cnn.ac news.cnn.ac forums.news.cnn.ac 1 2 com cnn b com cnn.com b.cnn.com 2 output: hostname domain suffix subdomain idx 0 forums.news.cnn.com.ac cnn com.ac forums.news 0 2 forums.news.cnn.ac cnn ac forums.news 1 1 b.cnn.com cnn com b 2 """ tmp_dfs = [] # Left join on single column dataframe does not provide expected results hence adding dummy column. suffix_df["dummy"] = "" # Iterating over each tld column starting from tld0 until it finds a match. for i in range(col_len + 1): # Add index to sort the parsed information with respect to input records order. cols_keep = ["idx"] tld_col = "tld" + str(i) suffix_df = suffix_df.rename(columns={suffix_df.columns[0]: tld_col}) # Left join input_df with suffix_df on tld column for each iteration. merged_df = input_df.merge(suffix_df, on=tld_col, how="left") if i > 0: col_pos = i - 1 # Retrieve records which satisfies join clause. joined_recs_df = merged_df[~merged_df["dummy"].isna()] if not joined_recs_df.empty: if col_dict["hostname"]: joined_recs_df = joined_recs_df.rename(columns={"tld0": "hostname"}) cols_keep.append("hostname") if col_dict["subdomain"]: cols_keep.append("subdomain") joined_recs_df["subdomain"] = "" if col_pos > 0: for idx in range(0, col_pos): joined_recs_df["subdomain"] = joined_recs_df[ "subdomain" ].str.cat(joined_recs_df[idx], sep=".") joined_recs_df["subdomain"] = ( joined_recs_df["subdomain"] .str.replace(".^", "") .str.lstrip(".") ) if col_dict["domain"]: joined_recs_df = joined_recs_df.rename(columns={col_pos: "domain"}) cols_keep.append("domain") if col_dict["suffix"]: joined_recs_df = joined_recs_df.rename(columns={tld_col: "suffix"}) cols_keep.append("suffix") joined_recs_df = joined_recs_df[cols_keep] # Concat current iteration result to previous iteration result. tmp_dfs.append(joined_recs_df) # delete not required variable. del joined_recs_df # Assigning unprocessed records to input_df for next stage of processing. if i < col_len: input_df = merged_df[merged_df["dummy"].isna()] # Drop unwanted columns. input_df = input_df.drop(["dummy", tld_col], axis=1) # Handles scenario when some records with last tld column matches to suffix list but not all. else: merged_df = merged_df[merged_df["dummy"].isna()] unknown_suffix_df = _handle_unknown_suffix(merged_df, col_dict) tmp_dfs.append(unknown_suffix_df) # Handles scenario when all records with last tld column doesn't match to suffix list. elif i == col_len and not merged_df.empty: unknown_suffix_df = _handle_unknown_suffix(merged_df, col_dict) tmp_dfs.append(unknown_suffix_df) else: continue # Concat all temporary output dataframes output_df = cudf.concat(tmp_dfs) return output_df def _create_col_dict(allowed_output_cols, req_cols): """Creates dictionary to apply check condition while extracting tld. """ col_dict = {col: True for col in allowed_output_cols} if req_cols != allowed_output_cols: for col in allowed_output_cols ^ req_cols: col_dict[col] = False return col_dict def _verify_req_cols(req_cols, allowed_output_cols): """Verify user requested columns against allowed output columns. """ if req_cols is not None: if not req_cols.issubset(allowed_output_cols): raise ValueError( "Given req_cols must be subset of %s" % (allowed_output_cols) ) else: req_cols = allowed_output_cols return req_cols def parse_url(url_series, req_cols=None): """This function extracts subdomain, domain and suffix for a given url. :param url_df_col: Urls that are to be handled. :type url_df_col: cudf.Series :param req_cols: Columns requested to extract such as (domain, subdomain, suffix and hostname). :type req_cols: set(strings) :return: Extracted information of requested columns. :rtype: cudf.DataFrame Examples -------- >>> from cudf import DataFrame >>> from clx.dns import dns_extractor as dns >>> >>> input_df = DataFrame( ... { ... "url": [ ... "http://www.google.com", ... "gmail.com", ... "github.com", ... "https://pandas.pydata.org", ... ] ... } ... ) >>> dns.parse_url(input_df["url"]) hostname domain suffix subdomain 0 www.google.com google com www 1 gmail.com gmail com 2 github.com github com 3 pandas.pydata.org pydata org pandas >>> dns.parse_url(input_df["url"], req_cols={'domain', 'suffix'}) domain suffix 0 google com 1 gmail com 2 github com 3 pydata org """ # Singleton object. sv = DnsVarsProvider.get_instance() req_cols = _verify_req_cols(req_cols, sv.allowed_output_cols) col_dict = _create_col_dict(req_cols, sv.allowed_output_cols) hostnames = extract_hostnames(url_series) url_index = url_series.index del url_series log.info("Extracting hostnames is successfully completed.") hostname_split_ser = hostnames.str.findall("([^.]+)") hostname_split_df = hostname_split_ser.to_frame() hostname_split_df = cudf.DataFrame(hostname_split_df[0].to_arrow().to_pylist()) col_len = len(hostname_split_df.columns) - 1 log.info("Generating tld columns...") hostname_split_df = generate_tld_cols(hostname_split_df, hostnames, col_len) log.info("Successfully generated tld columns.") # remove hostnames since they are available in hostname_split_df del hostnames # Assign input index to idx column. hostname_split_df["idx"] = url_index log.info("Extracting tld...") output_df = _extract_tld(hostname_split_df, sv.suffix_df, col_len, col_dict) # Sort index based on given input index order. output_df = output_df.sort_values("idx", ascending=True) # Drop temp columns. output_df = output_df.drop("idx", axis=1) # Reset the index. output_df = output_df.reset_index(drop=True) if not output_df.empty: log.info("Extracting tld is successfully completed.") return output_df
13,134
38.326347
148
py
clx-branch-23.04
clx-branch-23.04/python/clx/dns/__init__.py
0
0
0
py
clx-branch-23.04
clx-branch-23.04/python/clx/eda/eda.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import cuxfilter from cuxfilter.layouts import feature_and_double_base from clx.eda.summary_stats import SummaryStatistics class EDA: """An EDA (Exploratory Data Analysis) Object. EDA is used to explore different features of a given dataframe. :param dataframe: Dataframe to be used for analysis :type dataframe: cudf.DataFrame Examples -------- >>> from clx.eda import EDA >>> import cudf >>> import pandas as pd >>> df = cudf.DataFrame() >>> df['a'] = [1,2,3,4] >>> df['b'] = ['a','b','c','c'] >>> df['c'] = [True, False, True, True] >>> df['d'] = cudf.Series(pd.date_range("2000-01-01", periods=3,freq="m")) >>> eda = EDA(df) >>> eda { "SummaryStatistics": { "a": { "dtype": "int64", "summary": { "unique": "4", "total": "4" } }, "b": { "dtype": "object", "summary": { "unique": "3", "total": "4" } }, "c": { "dtype": "bool", "summary": { "true_percent": "0.75" } }, "d": { "dtype": "datetime64[ns]", "summary": { "timespan": "60 days, 2880 hours, 0 minutes, 0 seconds" } } } } """ eda_modules = {"SummaryStatistics": SummaryStatistics} def __init__(self, dataframe): self.__dataframe = dataframe self.__analysis, self.__module_ref = self.__generate_analysis(dataframe) @property def analysis(self): """ Analysis results as a `dict` """ return self.__analysis @property def dataframe(self): """ Dataframe used for analysis """ return self.__dataframe def __repr__(self): return json.dumps(self.analysis, indent=2) def __generate_analysis(self, dataframe): """For each of the modules, generate the analysis""" module_ref = {} analysis_results = {} for key, eda_module in self.eda_modules.items(): eda_module_obj = eda_module(dataframe) module_ref[key] = eda_module_obj analysis_results[key] = eda_module_obj.analysis return analysis_results, module_ref def save_analysis(self, dirpath): """Save analysis output to directory path. :param dirpath: Directory path to save analysis output. :type dirpath: str """ for key, analysis in self.__module_ref.items(): if os.path.isdir(dirpath): output_file = dirpath + "/" + key analysis.save_analysis(output_file) def cuxfilter_dashboard(self): """Create cuxfilter dashboard for Exploratory Data Analysis. :return: cuxfilter dashboard with populated with data and charts. :rtype: cuxfilter.DashBoard """ for module in self.__module_ref.values(): charts = module.charts cux_df = cuxfilter.DataFrame.from_dataframe(self.__dataframe) return cux_df.dashboard( charts, layout=feature_and_double_base, theme=cuxfilter.themes.light, title="Exploratory Data Analysis", )
4,160
30.285714
113
py
clx-branch-23.04
clx-branch-23.04/python/clx/eda/analysis.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from abc import ABC, abstractmethod class Analysis(ABC): def __init__(self, dataframe): self._analysis = self._generate_analysis(dataframe) self._charts = self._generate_charts(dataframe) @property def analysis(self): return self._analysis @property def charts(self): return self._charts @abstractmethod def _generate_analysis(self, dataframe): """Abstract function intended to create a dictionary summarizing analysis results of the dataframe""" pass @abstractmethod def _generate_charts(self, dataframe): """Abstract function intended to create a list of cuxfilt""" pass def to_json(self): """Get json version of analysis results""" return json.dumps(self.analysis, indent=2) def save_analysis(self, output_filepath): """Save analysis to a json file TODO: Expand to other output types""" formatted_output = self.to_json() with open(output_filepath + ".json", "w") as file: file.write(formatted_output)
1,681
31.346154
109
py
clx-branch-23.04
clx-branch-23.04/python/clx/eda/__init__.py
from clx.eda.eda import EDA # noqa: F401
42
20.5
41
py
clx-branch-23.04
clx-branch-23.04/python/clx/eda/summary_stats.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cuxfilter from clx.eda.analysis import Analysis class SummaryStatistics(Analysis): def __init__(self, dataframe): super().__init__(dataframe) def __summary_obj(self, series): summary = {} uniq_count = len(series.unique()) total = series.notna().sum() summary["unique"] = str(uniq_count) summary["total"] = str(total) return summary def __summary_bool(self, series): summary = {} true_per = (series == True).sum() # noqa: E712 summary["true_percent"] = str(true_per / len(series)) return summary def __summary_num(self, series): summary = {} uniq_count = len(series.unique()) total = series.notna().sum() summary["unique"] = str(uniq_count) summary["total"] = str(total) return summary def __summary_time(self, series): summary = {} duration = series.max() - series.min() days = duration.astype("timedelta64[D]").astype(int) seconds = duration.astype("timedelta64[s]").astype(int) hours = days * 24 + seconds // 3600 minutes = (seconds % 3600) // 60 seconds = seconds % 60 msg = "{0} days, {1} hours, {2} minutes, {3} seconds".format( days, hours, minutes, seconds ) summary["timespan"] = msg return summary def _generate_analysis(self, dataframe): # This function will receive a dataframe and returns a dictionary of summary statistics summary_dict = {} for col in dataframe.columns: summary_dict[col] = {} summary_dict[col]["dtype"] = str(dataframe[col].dtype) if dataframe[col].dtype == "object": summary_dict[col]["summary"] = self.__summary_obj(dataframe[col]) elif dataframe[col].dtype == "bool": summary_dict[col]["summary"] = self.__summary_bool(dataframe[col]) elif dataframe[col].dtype in ["int64", "float64", "int8"]: summary_dict[col]["summary"] = self.__summary_num(dataframe[col]) elif dataframe[col].dtype == "datetime64[ns]": summary_dict[col]["summary"] = self.__summary_time(dataframe[col]) else: msg = "\t column type (" + str(dataframe[col].dtype) + ") not supported" summary_dict[col]["error"] = msg return summary_dict def _generate_charts(self, dataframe): """Get barcharts for the summary analysis""" charts = [] for col in dataframe.columns: if dataframe[col].dtype == "object": bars = len(dataframe[col].unique()) if bars < 30: if bars > 1: charts.append(cuxfilter.charts.bar(col)) return charts
3,423
37.47191
95
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_kafka_reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from confluent_kafka import Consumer from confluent_kafka import Message, KafkaError from mockito import when, mock, verify from clx.io.reader.kafka_reader import KafkaReader batch_size = 100 message = mock(Message) kafka_error = mock(KafkaError) when(kafka_error).code().thenReturn("test") when(message).value().thenReturn("test message".encode("utf-8")) @pytest.mark.parametrize("batch_size", [batch_size]) def test_read_data(batch_size): consumer = mock(Consumer) reader = KafkaReader(batch_size, consumer) # Return msg = None 1 time, then return a valid message moving forward when(reader.consumer).poll(timeout=1.0).thenReturn(None).thenReturn(message) # Always return no message error when(message).error().thenReturn(None) df = reader.fetch_data() assert df.shape == (100, 1) assert df.columns == ["Raw"] assert df["Raw"][0] == "test message" # Call to poll returned 100(Valid messages) + 1(None message) = 101 verify(reader.consumer, times=101).poll(...) @pytest.mark.parametrize("batch_size", [batch_size]) def test_read_data_message_error(batch_size): consumer = mock(Consumer) reader = KafkaReader(batch_size, consumer) # Return valid message data when(reader.consumer).poll(timeout=1.0).thenReturn(message) # Return no message error 1 time, then an error moving forward when(message).error().thenReturn(None).thenReturn(kafka_error) df = reader.fetch_data() # Validate consumer polls # 1 (Valid message) + 1 (Error Message) = 2 Consumer polls verify(reader.consumer, times=2).poll(...) # Validate dataframe output assert df.shape == (1, 1) assert df.columns == ["Raw"] assert df["Raw"].to_arrow().to_pylist() == ["test message"] @pytest.mark.parametrize("batch_size", [5]) def test_read_data_no_messages(batch_size): consumer = mock(Consumer) reader = KafkaReader(batch_size, consumer, time_window=5) # Return no messages when(reader.consumer).poll(timeout=1.0).thenReturn(None) df = reader.fetch_data() # Validate dataframe output assert df.empty
2,706
35.093333
80
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_anomaly_detection.py
import cudf import clx.analytics.anomaly_detection import clx.features def test_anomaly_detection(): df = cudf.DataFrame( { "time": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], "user": [ "u1", "u5", "u4", "u2", "u3", "u1", "u1", "u1", "u1", "u1", "u1", "u1", "u1", "u1", ], "computer": [ "c1", "c1", "c5", "c1", "c1", "c3", "c1", "c1", "c2", "c3", "c1", "c1", "c4", "c5", ], } ) fdf = clx.features.frequency(df, "user", "computer") # Create feature data actual = clx.analytics.anomaly_detection.dbscan(fdf, min_samples=2, eps=0.5) expected = cudf.Series([-1, -1], dtype="int32", index=None) expected.index = cudf.Series(["u1", "u4"]) assert actual.equals(expected)
1,225
23.52
80
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_dga_dataset.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.analytics.dga_dataset import DGADataset test_domains_len = 2 test_batchsize = 1 test_input_df = cudf.DataFrame( {"domain": ["studytour.com.tw", "cnn.com"], "type": [1, 1]} ) expected_output_df = cudf.DataFrame( { 0: [115, 99], 1: [116, 110], 2: [117, 110], 3: [100, 46], 4: [121, 99], 5: [116, 111], 6: [111, 109], 7: [117, 0], 8: [114, 0], 9: [46, 0], 10: [99, 0], 11: [111, 0], 12: [109, 0], 13: [46, 0], 14: [116, 0], 15: [119, 0], "len": [16, 7], }, dtype="int32" ) expected_output_df["type"] = [1, 1] expected_output_df["domain"] = ["studytour.com.tw", "cnn.com"] def test_detector_dataset(): dataset = DGADataset(test_input_df, 100) assert dataset.length == 2 assert dataset.data.equals(expected_output_df)
1,491
26.62963
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_dga_detector.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.utils.data.dataloader import DataLoader from clx.analytics.dga_detector import DGADetector from clx.analytics.dga_dataset import DGADataset from clx.analytics.model.rnn_classifier import RNNClassifier import torch from os import path from faker import Faker import random dd = DGADetector() dd.init_model() def test_train_model(): if torch.cuda.is_available(): fake = Faker() Faker.seed(0) domain_col = [fake.dga() for _ in range(200)] label_col = [random.randint(0, 1) for _ in range(200)] train_gdf = cudf.DataFrame(list(zip(domain_col, label_col)), columns=["domain", "label"]) # train model dd.train_model(train_gdf["domain"], train_gdf["label"], batch_size=2) gpu_count = torch.cuda.device_count() if gpu_count > 1: assert isinstance(dd.model.module, RNNClassifier) else: assert isinstance(dd.model, RNNClassifier) def test_evaluate_model(): if torch.cuda.is_available(): test_df = cudf.DataFrame({"domain": ["cnn.com", "bakercityherald.com"], "type": [1, 0]}) truncate = 100 dataset = DGADataset(test_df, truncate) dataloader = DataLoader(dataset, batchsize=2) # evaluate model accuracy = dd.evaluate_model(dataloader) assert isinstance(accuracy, (int, float)) def test_predict(): if torch.cuda.is_available(): test_domains = cudf.Series(["nvidia.com", "dfsdfsdf"]) # predict preds = dd.predict(test_domains) assert len(preds) == 2 assert preds.dtype == int assert isinstance(preds, cudf.core.series.Series) def test2_predict(): if torch.cuda.is_available(): test_domains = cudf.Series(["nvidia.com", "dfsdfsdf"]) # predict preds = dd.predict(test_domains, probability=True) assert len(preds) == 2 assert preds.dtype == float assert isinstance(preds, cudf.core.series.Series) def test_save_model(tmpdir): if torch.cuda.is_available(): # save model dd.save_model(str(tmpdir.join("clx_dga.mdl"))) assert path.exists(str(tmpdir.join("clx_dga.mdl"))) def test_load_model(tmpdir): if torch.cuda.is_available(): # save model dd.save_model(str(tmpdir.join("clx_dga.mdl"))) assert path.exists(str(tmpdir.join("clx_dga.mdl"))) # load model dd2 = DGADetector() dd2.init_model() dd2.load_model(str(tmpdir.join("clx_dga.mdl"))) gpu_count = torch.cuda.device_count() if gpu_count > 1: assert isinstance(dd2.model.module, RNNClassifier) else: assert isinstance(dd2.model, RNNClassifier) def test_save_checkpoint(tmpdir): if torch.cuda.is_available(): # save model dd.save_checkpoint(str(tmpdir.join("clx_dga.mdl"))) assert path.exists(str(tmpdir.join("clx_dga.mdl"))) def test_load_checkpoint(tmpdir): if torch.cuda.is_available(): # save model dd.save_model(str(tmpdir.join("clx_dga.mdl"))) assert path.exists(str(tmpdir.join("clx_dga.mdl"))) # load model dd2 = DGADetector() dd2.init_model() dd2.load_model(str(tmpdir.join("clx_dga.mdl"))) gpu_count = torch.cuda.device_count() if gpu_count > 1: assert isinstance(dd2.model.module, RNNClassifier) else: assert isinstance(dd2.model, RNNClassifier)
4,063
32.586777
97
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_virus_total.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import requests import cudf from mockito import when, mock from clx.osi.virus_total import VirusTotalClient api_key = "dummy-api-key" client = VirusTotalClient(api_key=api_key) test_input_df = cudf.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) ipaddress_report_resp = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{ "response_code": 1, "verbose_msg": "IP address found in dataset", "asn": "25532", "country": "RU", "resolutions": [{ "last_resolved": "2013-04-08 00:00:00", "hostname": "90.156.201.27" }, { "last_resolved": "2013-04-08 00:00:00", "hostname": "auto.rema-tiptop.ru" }], "detected_urls": [{ "url": "http://027.ru/", "positives": 2, "total": 37, "scan_date": "2013-04-07 07:18:09" }], "detected_downloaded_samples": [{ "date": "2018-03-29 18:38:05", "positives": 2, "total": 59, "sha256": "d9cacb75a3fd126762f348d00fb6e3809ede2c13b2ad251831e130bcb7ae7a84" }, { "date": "2018-03-29 08:52:38", "positives": 2, "total": 59, "sha256": "416751ebbd5d6c37bb20233a39ade80db584057f3d5c4bbf976ce9c332836707" }], "undetected_downloaded_samples": [{ "date": "2018-03-28 06:36:55", "positives": 0, "total": 0, "sha256": "4a91398fd21f2d0b09fc7478d016d4a8fc9fe6f1c01e10b8e7c725542260cd9f" }], "undetected_urls": [ "http://zadiplomom.ru/", "3aafd5a54bb034882b8f5544bb647b6841bcb6ce938c40fb92be4cb84f2f0983", 0, 67, "2018-02-19 18:04:15" ] }', }, spec=requests.Response, ) put_comment_resp = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{"response_code": 1, "verbose_msg": "Your comment was successfully posted"}', }, spec=requests.Response, ) domain_report_resp = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{ "undetected_referrer_samples": [{ "date": "2018-03-04 16:38:06", "positives": 0, "total": 66, "sha256": "ce08cf22949b6b6fcd4e61854ce810a4f9ee04529340dd077fa354d759dc7a95" }, { "positives": 0, "total": 53, "sha256": "b8f5db667431d02291eeec61cf9f0c3d7af00798d0c2d676fde0efb0cedb7741" }], "whois_timestamp": 1520586501, "detected_downloaded_samples": [{ "date": "2013-06-20 18:51:30", "positives": 2, "total": 46, "sha256": "cd8553d9b24574467f381d13c7e0e1eb1e58d677b9484bd05b9c690377813e54" }], "detected_referrer_samples": [], "undetected_downloaded_samples": [{ "date": "2018-01-14 22:34:24", "positives": 0, "total": 70, "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" }], "resolutions": [{ "last_resolved": "2018-03-09 00:00:00", "ip_address": "185.53.177.31" }, { "last_resolved": "2013-06-20 00:00:00", "ip_address": "90.156.201.97" }], "subdomains": [ "test.027.ru", "www.027.ru" ], "categories": [ "parked", "uncategorized" ], "domain_siblings": [], "undetected_urls": [], "response_code": 1, "verbose_msg": "Domain found in dataset", "detected_urls": [{ "url": "http://027.ru/", "positives": 2, "total": 67, "scan_date": "2018-04-01 15:51:22" }, { "url": "http://027.ru/adobe/flash_install_v10x1.php", "positives": 5, "total": 67, "scan_date": "2018-03-26 09:22:43" }, { "url": "http://027.ru/track.php", "positives": 4, "total": 66, "scan_date": "2018-01-14 22:39:41" }, { "url": "http://027.ru/track.php?domain=027.ru&caf=1&toggle=answercheck", "positives": 2, "total": 66, "scan_date": "2018-01-09 22:19:43" }, { "url": "https://027.ru/", "positives": 1, "total": 66, "scan_date": "2016-02-08 13:25:40" }] }', }, spec=requests.Response, ) url_scan_resp = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{ "response_code": 1, "verbose_msg": "Scan request successfully queued, come back later for the report", "scan_id": "1db0ad7dbcec0676710ea0eaacd35d5e471d3e11944d53bcbd31f0cbd11bce31-1320752364", "scan_date": "2011-11-08 11:39:24", "url": "http://www.virustotal.com/", "permalink": "http://www.virustotal.com/url/1db0ad7dbcec0676710ea0eaacd35d5e471d3e11944d53bcbd31f0cbd11bce31/analysis/1320752364/" }', }, spec=requests.Response, ) url_report_resp = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{ "response_code": 1, "verbose_msg": "Scan finished, scan information embedded in this object", "scan_id": "1db0ad7dbcec0676710ea0eaacd35d5e471d3e11944d53bcbd31f0cbd11bce31-1390467782", "permalink": "https://www.virustotal.com/url/__urlsha256__/analysis/1390467782/", "url": "http://www.virustotal.com/", "scan_date": "2014-01-23 09:03:02", "filescan_id": null, "positives": 0, "total": 51, "scans": { "CLEAN MX": { "detected": false, "result": "clean site" }, "MalwarePatrol": { "detected": false, "result": "clean site" } } }', }, spec=requests.Response, ) file_report_resp = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{ "response_code": 1, "verbose_msg": "Scan finished, scan information embedded in this object", "resource": "99017f6eebbac24f351415dd410d522d", "scan_id": "52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1273894724", "md5": "99017f6eebbac24f351415dd410d522d", "sha1": "4d1740485713a2ab3a4f5822a01f645fe8387f92", "sha256": "52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c", "scan_date": "2010-05-15 03:38:44", "permalink": "https://www.virustotal.com/file/52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c/analysis/1273894724/", "positives": 40, "total": 40, "scans": { "nProtect": { "detected": true, "version": "2010-05-14.01", "result": "Trojan.Generic.3611249", "update": "20100514" }, "CAT-QuickHeal": { "detected": true, "version": "10.00", "result": "Trojan.VB.acgy", "update": "20100514" }, "McAfee": { "detected": true, "version": "5.400.0.1158", "result": "Generic.dx!rkx", "update": "20100515" }, "TheHacker": { "detected": true, "version": "6.5.2.0.280", "result": "Trojan/VB.gen", "update": "20100514" }, "VirusBuster": { "detected": true, "version": "5.0.27.0", "result": "Trojan.VB.JFDE", "update": "20100514" } } }', }, spec=requests.Response, ) file_scan_resp = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{ "permalink": "https://www.virustotal.com/file/d140c...244ef892e5/analysis/1359112395/", "resource": "d140c244ef892e59c7f68bd0c6f74bb711032563e2a12fa9dda5b760daecd556", "response_code": 1, "scan_id": "d140c244ef892e59c7f68bd0c6f74bb711032563e2a12fa9dda5b760daecd556-1359112395", "verbose_msg": "Scan request successfully queued, come back later for the report", "sha256": "d140c244ef892e59c7f68bd0c6f74bb711032563e2a12fa9dda5b760daecd556" }', }, spec=requests.Response, ) @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("ipaddress_report_resp", [ipaddress_report_resp]) def test_ipaddress_report(client, ipaddress_report_resp): when(requests).get(...).thenReturn(ipaddress_report_resp) result = client.ipaddress_report("90.156.201.27") json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["response_code"] == 1 assert json_resp["country"] == "RU" assert json_resp["asn"] == "25532" assert json_resp["resolutions"][0]["last_resolved"] == "2013-04-08 00:00:00" @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("put_comment_resp", [put_comment_resp]) def test_put_comment(client, put_comment_resp): when(requests).post(...).thenReturn(put_comment_resp) result = client.put_comment( "75efd85cf6f8a962fe016787a7f57206ea9263086ee496fc62e3fc56734d4b53", "This is a test comment", ) json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["response_code"] == 1 assert json_resp["verbose_msg"] == "Your comment was successfully posted" @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("domain_report_resp", [domain_report_resp]) def test_domain_report(client, domain_report_resp): when(requests).get(...).thenReturn(domain_report_resp) result = client.domain_report("027.ru") json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["detected_urls"][0]["url"] == "http://027.ru/" assert json_resp["undetected_referrer_samples"][0]["positives"] == 0 assert json_resp["undetected_referrer_samples"][0]["total"] == 66 @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("url_scan_resp", [url_scan_resp]) def test_url_scan(client, url_scan_resp): when(requests).post(...).thenReturn(url_scan_resp) result = client.url_scan(["virustotal.com"]) json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["response_code"] == 1 assert json_resp["url"] == "http://www.virustotal.com/" assert ( json_resp["verbose_msg"] == "Scan request successfully queued, come back later for the report" ) @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("url_report_resp", [url_report_resp]) def test_url_report(client, url_report_resp): when(requests).post(...).thenReturn(url_report_resp) result = client.url_report(["virustotal.com"]) json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["response_code"] == 1 assert json_resp["url"] == "http://www.virustotal.com/" assert not json_resp["scans"]["CLEAN MX"]["detected"] assert json_resp["scans"]["CLEAN MX"]["result"] == "clean site" @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("file_report_resp", [file_report_resp]) def test_file_report(client, file_report_resp): when(requests).get(...).thenReturn(file_report_resp) result = client.file_report(["99017f6eebbac24f351415dd410d522d"]) json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["resource"] == "99017f6eebbac24f351415dd410d522d" assert json_resp["sha1"] == "4d1740485713a2ab3a4f5822a01f645fe8387f92" assert json_resp["scan_date"] == "2010-05-15 03:38:44" assert json_resp["scans"]["nProtect"]["detected"] @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("file_scan_resp", [file_scan_resp]) @pytest.mark.parametrize("test_input_df", [test_input_df]) def test_file_scan(tmpdir, client, file_scan_resp, test_input_df): fname = str(tmpdir.mkdir("tmp_test_virus_total").join("person.csv")) test_input_df.to_csv(fname, index=False) when(requests).post(...).thenReturn(file_scan_resp) result = client.file_scan(fname) json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["response_code"] == 1 @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("file_rescan_resp", [file_scan_resp]) @pytest.mark.parametrize("test_input_df", [test_input_df]) def test_file_rescan(tmpdir, client, file_rescan_resp, test_input_df): fname = str(tmpdir.mkdir("tmp_test_virus_total").join("person.csv")) test_input_df.to_csv(fname, index=False) when(requests).post(...).thenReturn(file_rescan_resp) result = client.file_rescan(fname) json_resp = result["json_resp"] assert result["status_code"] == 200 assert json_resp["response_code"] == 1
12,538
61.383085
1,890
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_event_parser.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.parsers.event_parser import EventParser class TestEventParserImpl(EventParser): def parse(self, dataframe, raw_column): return None class TestEventParser(object): def setup(self): # Create Test Event Parser Implementation event_name = "eventName" columns = ["eventTypeId", "username"] self.event_regex = { "eventTypeId": r"eventTypeId: ([0-9$]+)", "username": r"username: ([a-z\.\-0-9$]+)", } self.event_parser = TestEventParserImpl(columns, event_name) def test_parse_raw_event(self): test_dataframe = cudf.DataFrame( { "Raw": [ "eventTypeId: 1 \\nusername: foo", "eventTypeId: 1 \\nusername: bar", ] } ) parsed_dataframe = self.event_parser.parse_raw_event( test_dataframe, "Raw", self.event_regex ) expected_parsed_dataframe = cudf.DataFrame( {"eventTypeId": ["1", "1"], "username": ["foo", "bar"]} ) assert parsed_dataframe.equals(expected_parsed_dataframe)
1,746
32.596154
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_ip.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import clx.ip def test_ip_to_int(): input = cudf.Series(["5.79.97.178", "94.130.74.45"]) expected = cudf.Series([89088434, 1585596973]) actual = clx.ip.ip_to_int(input) assert actual.equals(expected) def test_int_to_ip(): input = cudf.Series([89088434, 1585596973]) expected = cudf.Series(["5.79.97.178", "94.130.74.45"]) actual = clx.ip.int_to_ip(input) assert actual.equals(expected) def test_is_ip(): input = cudf.Series( ["5.79.97.178", "1.2.3.4", "5", "5.79", "5.79.97", "5.79.97.178.100"] ) expected = cudf.Series([True, True, False, False, False, False]) actual = clx.ip.is_ip(input) assert actual.equals(expected) def test_is_reserved(): input = cudf.Series(["240.0.0.0", "255.255.255.255", "5.79.97.178"]) expected = cudf.Series([True, True, False]) actual = clx.ip.is_reserved(input) assert actual.equals(expected) def test_is_loopback(): input = cudf.Series(["127.0.0.1", "5.79.97.178"]) expected = cudf.Series([True, False]) actual = clx.ip.is_loopback(input) assert actual.equals(expected) def test_is_link_local(): input = cudf.Series(["169.254.0.0", "5.79.97.178"]) expected = cudf.Series([True, False]) actual = clx.ip.is_link_local(input) assert actual.equals(expected) def test_is_unspecified(): input = cudf.Series(["0.0.0.0", "5.79.97.178"]) expected = cudf.Series([True, False]) actual = clx.ip.is_unspecified(input) assert actual.equals(expected) def test_is_multicast(): input = cudf.Series(["224.0.0.0", "239.255.255.255", "5.79.97.178"]) expected = cudf.Series([True, True, False]) actual = clx.ip.is_multicast(input) assert actual.equals(expected) def test_is_private(): input = cudf.Series(["0.0.0.0", "5.79.97.178"]) expected = cudf.Series([True, False]) actual = clx.ip.is_private(input) assert actual.equals(expected) def test_is_global(): input = cudf.Series(["0.0.0.0", "5.79.97.178"]) expected = cudf.Series([False, True]) actual = clx.ip.is_global(input) assert actual.equals(expected) def test_netmask(): input = cudf.Series(["5.79.97.178", "94.130.74.45"]) expected = cudf.Series(["255.255.128.0", "255.255.128.0"]) actual = clx.ip.netmask(input, 17) assert actual.equals(expected) def test_hostmask(): input = cudf.Series(["5.79.97.178", "94.130.74.45"]) expected = cudf.Series(["0.0.127.255", "0.0.127.255"]) actual = clx.ip.hostmask(input, 17) assert actual.equals(expected) def test_mask(): input_ips = cudf.Series(["5.79.97.178", "94.130.74.45"]) input_masks = cudf.Series(["255.255.128.0", "255.255.128.0"]) expected = cudf.Series(["5.79.0.0", "94.130.0.0"]) actual = clx.ip.mask(input_ips, input_masks) assert actual.equals(expected)
3,428
29.891892
77
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_eda.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import cudf import cuxfilter import pandas as pd import pytest from clx.eda import EDA @pytest.fixture def test_dataframe(): df = cudf.DataFrame() df["a"] = [1, 2, 3, 4] df["b"] = ["a", "b", "c", "c"] df["c"] = [True, False, True, True] df["d"] = cudf.Series(pd.date_range("2000-01-01", periods=3, freq="m")) return df def test_eda_summary_stats(test_dataframe): """Test EDA Summary statistics""" expected_output = { "SummaryStatistics": { "a": {"dtype": "int64", "summary": {"unique": "4", "total": "4"}}, "b": {"dtype": "object", "summary": {"unique": "3", "total": "4"}}, "c": {"dtype": "bool", "summary": {"true_percent": "0.75"}}, "d": { "dtype": "datetime64[ns]", "summary": {"timespan": "60 days, 2880 hours, 0 minutes, 0 seconds"}, }, } } eda = EDA(test_dataframe) actual_output = eda.analysis assert expected_output == actual_output def test_eda_save_analysis(tmpdir, test_dataframe): """Test saving the analysis to a json file""" fdir = str(tmpdir.mkdir("tmp_test_eda")) fname = fdir + "/SummaryStatistics.json" eda = EDA(test_dataframe) eda.save_analysis(fdir) expected_output = { "a": {"dtype": "int64", "summary": {"unique": "4", "total": "4"}}, "b": {"dtype": "object", "summary": {"unique": "3", "total": "4"}}, "c": {"dtype": "bool", "summary": {"true_percent": "0.75"}}, "d": { "dtype": "datetime64[ns]", "summary": {"timespan": "60 days, 2880 hours, 0 minutes, 0 seconds"}, }, } with open(fname) as f: actual_output = json.load(f) assert expected_output == actual_output def test_cuxfilter_dashboard(test_dataframe): """Test generating the dashboard""" eda = EDA(test_dataframe) dash = eda.cuxfilter_dashboard() assert isinstance(dash, cuxfilter.dashboard.DashBoard) assert len(dash.charts) == 2 assert dash.title == "Exploratory Data Analysis"
2,662
32.2875
85
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_asset_classification.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import cudf import clx from clx.analytics.asset_classification import AssetClassification import torch from os import path import random import pandas as pd column1 = [random.randint(1, 24) for _ in range(9000)] column2 = [random.randint(1, 4) for _ in range(9000)] column3 = [random.randint(1, 9) for _ in range(9000)] column4 = [random.randint(1, 26) for _ in range(9000)] column5 = [random.randint(1, 3) for _ in range(9000)] column6 = [random.randint(1, 9) for _ in range(9000)] column7 = [random.randint(1, 37) for _ in range(9000)] column8 = [random.randint(1, 8) for _ in range(9000)] column9 = [random.randint(1, 4) for _ in range(9000)] column10 = [random.randint(1, 11) for _ in range(9000)] label = [random.randint(0, 6) for _ in range(9000)] train_pd = pd.DataFrame(list(zip(column1, column2, column3, column4, column5, column6, column7, column8, column9, column10, label)), columns=["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "label"]) train_gdf = cudf.from_pandas(train_pd) batch_size = 6 epochs = 15 @pytest.mark.parametrize("train_gdf", [train_gdf]) def test_train_model_mixed_cat_cont(tmpdir, train_gdf): train_gdf = train_gdf.copy() cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8"] cont_cols = ["9", "10"] train_gdf[cont_cols] = normalize_conts(train_gdf[cont_cols]) ac = AssetClassification() ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs) if torch.cuda.is_available(): assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel) @pytest.mark.parametrize("train_gdf", [train_gdf]) def test_train_model_all_cat(tmpdir, train_gdf): train_gdf = train_gdf.copy() cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] cont_cols = [] ac = AssetClassification() ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs) if torch.cuda.is_available(): assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel) @pytest.mark.parametrize("train_gdf", [train_gdf]) def test_train_model_all_cont(tmpdir, train_gdf): train_gdf = train_gdf.copy() cont_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] cat_cols = [] train_gdf[cont_cols] = normalize_conts(train_gdf[cont_cols]) ac = AssetClassification() ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs) if torch.cuda.is_available(): assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel) @pytest.mark.parametrize("train_gdf", [train_gdf]) def test_predict(tmpdir, train_gdf): if torch.cuda.is_available(): cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] cont_cols = [] ac = AssetClassification() ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs) # predict test_gdf = train_gdf.head() test_gdf.drop("label", axis=1) preds = ac.predict(test_gdf, cat_cols, cont_cols) assert isinstance(preds, cudf.core.series.Series) assert len(preds) == len(test_gdf) assert preds.dtype == int def test_save_model(tmpdir): if torch.cuda.is_available(): cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] cont_cols = [] ac = AssetClassification() ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs) # save model ac.save_model(str(tmpdir.join("clx_ac.mdl"))) assert path.exists(str(tmpdir.join("clx_ac.mdl"))) def test_load_model(tmpdir): if torch.cuda.is_available(): cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] cont_cols = [] ac = AssetClassification() ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs) # save model ac.save_model(str(tmpdir.join("clx_ac.mdl"))) assert path.exists(str(tmpdir.join("clx_ac.mdl"))) # load model ac2 = AssetClassification() ac2.load_model(str(tmpdir.join("clx_ac.mdl"))) assert isinstance(ac2._model, clx.analytics.model.tabular_model.TabularModel) def normalize_conts(gdf): means, stds = (gdf.mean(0), gdf.std(ddof=0)) gdf = (gdf - means) / stds return gdf
4,887
38.104
202
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_factory.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import cudf from clx.io.factory.factory import Factory from clx.io.reader.kafka_reader import KafkaReader from clx.io.writer.kafka_writer import KafkaWriter from clx.io.reader.fs_reader import FileSystemReader from clx.io.writer.fs_writer import FileSystemWriter kafka_config = { "kafka_brokers": "localhost:9092", "group_id": "cyber-dp", "batch_size": 100, "consumer_kafka_topics": ["consumer_topic_t1", "consumer_topic_t2"], "publisher_kafka_topic": "publisher_topic_t1", "output_delimiter": ",", } fs_reader_config = { "type": "fs", "input_path": "test_input", "names": ["_col1", "_col2", "_col3"], "delimiter": ",", "usecols": ["_col1", "_col2", "_col3"], "dtype": ["str", "str", "str"], "input_format": "text", } fs_writer_config = { "type": "fs", "output_path": "test_output", "output_format": "text", } expected_df = cudf.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) @pytest.mark.parametrize("kafka_config", [kafka_config]) def test_get_io_reader_kafka(kafka_config): reader = Factory.get_reader("kafka", kafka_config) expected_cls = KafkaReader assert isinstance(reader, expected_cls) @pytest.mark.parametrize("kafka_config", [kafka_config]) def test_get_io_writer_kafka(kafka_config): writer = Factory.get_writer("kafka", kafka_config) expected_cls = KafkaWriter assert isinstance(writer, expected_cls) @pytest.mark.parametrize("fs_reader_config", [fs_reader_config]) def test_get_io_reader_fs(fs_reader_config): reader = Factory.get_reader("fs", fs_reader_config) expected_cls = FileSystemReader assert isinstance(reader, expected_cls) @pytest.mark.parametrize("fs_writer_config", [fs_writer_config]) def test_get_io_writer_fs(fs_writer_config): writer = Factory.get_writer("fs", fs_writer_config) expected_cls = FileSystemWriter assert isinstance(writer, expected_cls) @pytest.mark.parametrize("expected_df", [expected_df]) def test_get_reader_csv(tmpdir, expected_df): fname = tmpdir.mkdir("tmp_test_factory").join("person.csv") expected_df.to_csv(fname, index=False) config = { "type": "fs", "input_path": fname, "names": ["firstname", "lastname", "gender"], "delimiter": ",", "usecols": ["firstname", "lastname", "gender"], "dtype": ["str", "str", "str"], "header": 0, "input_format": "csv", } reader_from_factory = Factory.get_reader("fs", config) fetched_df = reader_from_factory.fetch_data() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_get_reader_parquet(tmpdir, expected_df): fname = tmpdir.mkdir("tmp_test_factory").join("person.parquet") cudf.io.parquet.to_parquet(expected_df, fname) config = { "type": "fs", "input_path": fname, "columns": ["firstname", "lastname", "gender"], "input_format": "parquet", } reader_from_factory = Factory.get_reader("fs", config) fetched_df = reader_from_factory.fetch_data() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_get_reader_orc(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_reader").join("person.orc")) cudf.io.orc.to_orc(expected_df, fname) config = { "type": "fs", "input_path": fname, "usecols": ["firstname", "lastname", "gender"], "input_format": "orc", } reader_from_factory = Factory.get_reader("fs", config) fetched_df = reader_from_factory.fetch_data() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_get_reader_json(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_reader").join("person.json")) cudf.io.json.to_json(expected_df, fname, orient="records") config = { "type": "fs", "input_path": fname, "orient": "records", "input_format": "json", } reader_from_factory = Factory.get_reader("fs", config) fetched_df = reader_from_factory.fetch_data() assert fetched_df.equals(expected_df)
4,883
31.131579
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_loda.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cupy from clx.analytics.loda import Loda from os import path def test_fit(): ld = Loda(n_random_cuts=10, n_bins=None) x = cupy.random.randint(0, 100, size=(200, 10)) ld.fit(x) assert ld._histograms is not None assert isinstance( ld._histograms, cupy.ndarray ) assert cupy.all(ld._histograms > 0) def test_score(): ld = Loda(n_random_cuts=10, n_bins=None) x = cupy.random.randint(0, 100, size=(200, 10)) ld.fit(x) scores = ld.score(x) assert scores is not None assert isinstance( scores, cupy.ndarray ) assert cupy.all(scores > 0) def test_explain(): ld = Loda(n_random_cuts=10, n_bins=None) x = cupy.random.randint(0, 100, size=(200, 10)) ld.fit(x) explanation = ld.explain(x[0]) assert explanation is not None assert isinstance( explanation, cupy.ndarray ) def test_save_model(tmpdir): ld = Loda(n_random_cuts=10, n_bins=None) x = cupy.random.randint(0, 100, size=(200, 10)) ld.fit(x) ipath = path.join(tmpdir, "clx_loda") opath = path.join(tmpdir, "clx_loda.npz") ld.save_model(ipath) assert path.exists(opath) def test_load_model(tmpdir): ld = Loda(n_random_cuts=10, n_bins=None) x = cupy.random.randint(0, 100, size=(200, 10)) ld.fit(x) ipath = path.join(tmpdir, "clx_loda") opath = path.join(tmpdir, "clx_loda.npz") ld.save_model(ipath) assert path.exists(opath) # load model ld = Loda.load_model(opath) assert isinstance(ld, Loda)
2,147
26.538462
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_fs_reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import pytest from clx.io.reader.fs_reader import FileSystemReader expected_df = cudf.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) @pytest.mark.parametrize("expected_df", [expected_df]) def test_fetch_data_csv(tmpdir, expected_df): fname = tmpdir.mkdir("tmp_test_fs_reader").join("person.csv") expected_df.to_csv(fname, index=False) config = { "type": "fs", "input_path": fname, "names": ["firstname", "lastname", "gender"], "delimiter": ",", "usecols": ["firstname", "lastname", "gender"], "dtype": ["str", "str", "str"], "header": 0, "input_format": "csv" } reader = FileSystemReader(config) fetched_df = reader.fetch_data() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_fetch_data_parquet(tmpdir, expected_df): fname = tmpdir.mkdir("tmp_test_fs_reader").join("person.parquet") cudf.io.parquet.to_parquet(expected_df, fname) config = { "type": "fs", "input_path": fname, "input_format": "parquet" } reader = FileSystemReader(config) fetched_df = reader.fetch_data() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_fetch_data_orc(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_reader").join("person.orc")) cudf.io.orc.to_orc(expected_df, fname) config = { "type": "fs", "input_path": fname, "input_format": "orc" } reader = FileSystemReader(config) fetched_df = reader.fetch_data() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_fetch_data_json(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_reader").join("person.json")) cudf.io.json.to_json(expected_df, fname, orient="records") config = { "type": "fs", "input_path": fname, "orient": "records", "input_format": "json" } reader = FileSystemReader(config) fetched_df = reader.fetch_data() assert fetched_df.equals(expected_df)
2,865
28.546392
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_slashnext.py
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ref: https://github.com/slashnext/SlashNext-URL-Analysis-and-Enrichment/tree/master/Python%20SDK import pytest from mockito import when from clx.osi.slashnext import SlashNextClient api_key = "dummy-api-key" ok_status = "ok" ok_details = "Success" host_reputation_resp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'threatData': {'verdict': 'Benign', 'threatStatus': 'N/A', 'threatName': 'N/A', 'threatType': 'N/A', 'firstSeen': '12-10-2018 13:04:17 UTC', 'lastSeen': '01-14-2021 15:29:36 UTC'}}]" host_report_resp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'threatData': {'verdict': 'Benign', 'threatStatus': 'N/A', 'threatName': 'N/A', 'threatType': 'N/A', 'firstSeen': '12-10-2018 13:04:17 UTC', 'lastSeen': '01-14-2021 15:29:36 UTC'}}, {'errorNo': 0, 'errorMsg': 'Success', 'urlDataList': [{'url': 'https://www.google.com/', 'scanId': '988dd47c-0-4ecc-86fc-b7bae139bcca', 'threatData': {'verdict': 'Benign', 'threatStatus': 'N/A', 'threatName': 'N/A', 'threatType': 'N/A', 'firstSeen': '08-27-2019 10:32:19 UTC', 'lastSeen': '08-27-2019 12:34:52 UTC'}}], 'normalizeData': {'normalizeStatus': 0, 'normalizeMessage': ''}}, {'errorNo': 0, 'errorMsg': 'Success', 'scData': {'scBase64': '/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAMCw+...', 'htmlName': 'Webpage-html', 'htmlContenType': 'html'}}, {'errorNo': 0, 'errorMsg': 'Success', 'textData': {'textBase64': 'V2UndmUgZGV0ZWN0ZWQgeW=', 'textName': 'Webpage-text'}}]" api_quota_resp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'quotaDetails': {'licensedQuota': '1825000', 'remainingQuota': 1824967, 'expiryDate': '2021-12-11', 'isExpired': False, 'pointsConsumptionRate': {'hostReputation': 1, 'hostUrls': 1, 'urlReputation': 1, 'uRLScan': 3, 'uRLScanSync': 3, 'downloadScreenshot': 0, 'downloadText': 0, 'downloadHTML': 0, 'customerApiQuota': 0, 'urlScanWithScanId': 0, 'urlScanSyncWithScanId': 0}, 'consumedAPIDetail': {'hostReputation': 13, 'hostUrls': 8, 'urlReputation': 0, 'uRLScan': 2, 'uRLScanSync': 2, 'downloadScreenshot': 9, 'downloadText': 8, 'downloadHTML': 8, 'customerApiQuota': 37, 'scanReportWithScanId': 1, 'scanSyncReportWithScanId': 0}, 'consumedPointsDetail': {'hostReputation': 13, 'hostUrls': 8, 'urlReputation': 0, 'uRLScan': 6, 'uRLScanSync': 6, 'downloadScreenshot': 0, 'downloadText': 0, 'downloadHTML': 0, 'customerApiQuota': 0, 'scanReportWithScanId': 0, 'scanSyncReportWithScanId': 0}, 'note': 'Your annual API quota will be reset to zero, once either the limit is reached or upon quota expiration date indicated above.'}}]" host_urls_resp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'urlDataList': [{'url': 'https://blueheaventravel.com/vendor/filp/whoops/up/index.php?email=Jackdavis@eureliosollutions.com', 'scanId': 'ace21670-7e20-49f2-ba57-755a7458c8a3', 'threatData': {'verdict': 'Benign', 'threatStatus': 'N/A', 'threatName': 'N/A', 'threatType': 'N/A', 'firstSeen': '01-13-2021 21:04:01 UTC', 'lastSeen': '01-13-2021 21:04:11 UTC'}, 'finalUrl': 'https://blueheaventravel.com/vendor/filp/whoops/up/?email=Jackdavis@eureliosollutions.com'}], 'normalizeData': {'normalizeStatus': 1, 'normalizeMessage': 'Note: Email address specified in the Scanned URL was replaced with a dummy email to protect user privacy.'}}]" url_scan_resp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'urlData': {'url': 'http://ajeetenterprises.in/js/kbrad/drive/index.php', 'scanId': 'e468db1d-6bc0-47af-ab7d-76e4a38b2489', 'threatData': {'verdict': 'Malicious', 'threatStatus': 'Active', 'threatName': 'Fake Login Page', 'threatType': 'Phishing & Social Engineering', 'firstSeen': '12-27-2019 07:45:55 UTC', 'lastSeen': '12-27-2019 07:47:51 UTC'}}, 'normalizeData': {'normalizeStatus': 0, 'normalizeMessage': ''}}]" url_scan_sync_resp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'urlData': {'url': 'http://ajeetenterprises.in/js/kbrad/drive/index.php', 'scanId': 'e468db1d-6bc0-47af-ab7d-76e4a38b2489', 'threatData': {'verdict': 'Malicious', 'threatStatus': 'Active', 'threatName': 'Fake Login Page', 'threatType': 'Phishing & Social Engineering', 'firstSeen': '12-27-2019 07:45:55 UTC', 'lastSeen': '12-27-2019 07:47:51 UTC'}}, 'normalizeData': {'normalizeStatus': 0, 'normalizeMessage': ''}}]" scan_report_resp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'urlData': {'url': 'https://blueheaventravel.com/vendor/filp/whoops/up/index.php?email=Jackdavis@eureliosollutions.com', 'scanId': 'ace21670-7e20-49f2-ba57-755a7458c8a3', 'threatData': {'verdict': 'Benign', 'threatStatus': 'N/A', 'threatName': 'N/A', 'threatType': 'N/A', 'firstSeen': '01-13-2021 21:04:01 UTC', 'lastSeen': '01-13-2021 21:04:11 UTC'}, 'finalUrl': 'https://blueheaventravel.com/vendor/filp/whoops/up/?email=Jackdavis@eureliosollutions.com'}, 'normalizeData': {'normalizeStatus': 1, 'normalizeMessage': 'Note: Email address specified in the Scanned URL was replaced with a dummy email to protect user privacy.'}}]" download_screenshot_rsp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'scData': {'scBase64': '/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB//Z', 'scName': 'Webpage-screenshot', 'scContentType': 'jpeg'}}]" download_html_rsp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'scData': {'scBase64': '/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB//Z', 'htmlName': 'Webpage-html', 'htmlContenType': 'html'}}]" download_text_rsp_str = "[{'errorNo': 0, 'errorMsg': 'Success', 'textData': {'textBase64': 'RW1haWwgU2V0dGluZ3MKSmFja2RhdmlzQGV1cmVsaW9zb2xsdXRpb25zLmNvbQpBY2NvdW50IFZlcmlmaWNhdGlvbgpDb3VudGRvd24gdG8geW91ciBlbWFpbCBzaHV0ZG93bjoKMDE6MTU6MDMKVG8gcHJldmVudCB5b3VyIEVtYWlsIGZyb20gYmVpbmcgc2h1dGRvd24sIFZlcmlmeSB5b3VyIGFjY291bnQgZGV0YWlscyBiZWxvdzoKSmFja2RhdmlzQGV1cmVsaW9zb2xsdXRpb25zLmNvbQoqKiogQWNjb3VudCAvIFNldHRpbmdzIC8gU2VjdXJpdHkgU2V0dGluZ3MgLyBBY2NvdW50IFZlcmlmaWNhdGlvbiA+Pg==', 'textName': 'Webpage-text'}}]" def test_verify_connection(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).test().thenReturn(("ok", "Success")) assert slashnext.verify_connection() == "success" def test2_verify_connection(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).test().thenReturn(("error", "Failed")) expected_exception = Exception( "Connection to SlashNext cloud failed due to Failed." ) with pytest.raises(Exception) as actual_exception: slashnext.verify_connection() assert actual_exception == expected_exception def test_host_reputation(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(host_reputation_resp_str)) ) host = "google.com" resp_list = slashnext.host_reputation(host) assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "threatData" in resp_list[0] def test_host_report(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(host_report_resp_str)) ) host = "google.com" resp_list = slashnext.host_report(host) assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "threatData" in resp_list[0] assert resp_list[1]["errorNo"] == 0 assert resp_list[1]["errorMsg"] == "Success" assert "urlDataList" in resp_list[1] assert "normalizeData" in resp_list[1] def test_host_urls(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(host_urls_resp_str)) ) host = "blueheaventravel.com" resp_list = slashnext.host_urls(host, limit=1) assert len(resp_list) == 1 assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "urlDataList" in resp_list[0] def test_url_scan(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(url_scan_resp_str)) ) url = "http://ajeetenterprises.in/js/kbrad/drive/index.php" resp_list = slashnext.url_scan(url, extended_info=False) assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "urlData" in resp_list[0] def test_url_scan_sync(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(url_scan_sync_resp_str)) ) url = "http://ajeetenterprises.in/js/kbrad/drive/index.php" resp_list = slashnext.url_scan_sync(url, extended_info=False, timeout=10) assert len(resp_list) == 1 assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "urlData" in resp_list[0] def test_scan_report(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(scan_report_resp_str)) ) scanid = "ace21670-7e20-49f2-ba57-755a7458c8a3" resp_list = slashnext.scan_report(scanid, extended_info=False) assert len(resp_list) == 1 assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "urlData" in resp_list[0] def test_download_screenshot(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(download_screenshot_rsp_str)) ) scanid = "ace21670-7e20-49f2-ba57-755a7458c8a3" resp_list = slashnext.download_screenshot(scanid, resolution="medium") assert len(resp_list) == 1 assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "scData" in resp_list[0] assert resp_list[0]["scData"]["scName"] == "Webpage-screenshot" assert resp_list[0]["scData"]["scContentType"] == "jpeg" def test_download_html(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(download_html_rsp_str)) ) scanid = "ace21670-7e20-49f2-ba57-755a7458c8a3" resp_list = slashnext.download_html(scanid) assert len(resp_list) == 1 assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "scData" in resp_list[0] assert resp_list[0]["scData"]["htmlName"] == "Webpage-html" assert resp_list[0]["scData"]["htmlContenType"] == "html" def test_download_text(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(download_text_rsp_str)) ) scanid = "ace21670-7e20-49f2-ba57-755a7458c8a3" resp_list = slashnext.scan_report(scanid) assert len(resp_list) == 1 assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "textData" in resp_list[0] assert resp_list[0]["textData"]["textName"] == "Webpage-text" def test_api_quota(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn( (ok_status, ok_details, eval(api_quota_resp_str)) ) resp_list = slashnext.api_quota() assert resp_list[0]["errorNo"] == 0 assert resp_list[0]["errorMsg"] == "Success" assert "quotaDetails" in resp_list[0] def test2_host_reputation(tmpdir): slashnext = SlashNextClient(api_key, tmpdir) when(slashnext.conn).execute(...).thenReturn(("error", "error", [])) host = "google.com" expected_exception = Exception( "Action 'slashnext-host-reputation host=google.com execution failed due to error." ) with pytest.raises(Exception) as actual_exception: slashnext.host_reputation(host) assert actual_exception == expected_exception
12,709
63.517766
1,088
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_features.py
import cudf import pytest import clx.features df = cudf.DataFrame( { "time": [1, 2, 3, 4, 5, 6, 7], "user": ["u1", "u2", "u3", "u1", "u1", "u2", "u1"], "computer": ["c1", "c2", "c3", "c1", "c2", "c3", "c1"], } ) def test_binary_features(): actual = clx.features.binary(df, "user", "computer") expected = cudf.DataFrame( {"user": ["u1", "u2", "u3"], "c1": [1, 0, 0], "c2": [1, 1, 0], "c3": [0, 1, 1]} ) expected = expected.set_index("user") expected["c1"] = expected["c1"].astype("int32") expected["c2"] = expected["c2"].astype("int32") expected["c3"] = expected["c3"].astype("int32") expected.columns = cudf.MultiIndex( names=[None, "computer"], codes=[[0, 0, 0], [0, 1, 2]], levels=[["time"], ["c1", "c2", "c3"]], ) assert expected.equals(actual) def test_binary_exception(): with pytest.raises(Exception): clx.features.binary(df, "user", "a") def test_frequency_features(): actual = clx.features.frequency(df, "user", "computer") expected = cudf.DataFrame( { "user": ["u1", "u2", "u3"], "c1": [0.75, 0.00, 0.00], "c2": [0.25, 0.50, 0.0], "c3": [0.0, 0.5, 1.0], } ) expected = expected.set_index("user") expected.columns = cudf.MultiIndex( names=[None, "computer"], codes=[[0, 0, 0], [0, 1, 2]], levels=[["time"], ["c1", "c2", "c3"]], ) assert expected.equals(actual) def test_frequency_exception(): with pytest.raises(Exception): clx.features.frequency(df, "a", "computer")
1,630
26.644068
87
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_dns_extractor.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from cudf import DataFrame from clx.dns import dns_extractor as dns input_df = DataFrame( { "url": [ "http://www.google.com", "gmail.com", "github.com", "https://pandas.pydata.org", "http://www.worldbank.org.kg/", "waiterrant.blogspot.com", "http://forums.news.cnn.com.ac/", "http://forums.news.cnn.ac/", "ftp://b.cnn.com/", "a.news.uk", "a.news.co.uk", "https://a.news.co.uk", "107-193-100-2.lightspeed.cicril.sbcglobal.net", "a23-44-13-2.deploy.static.akamaitechnologies.com", ] } ) def test_dns_vars_provider(): sv = dns.DnsVarsProvider.get_instance() sv2 = dns.DnsVarsProvider.get_instance() assert sv is sv2 def test2_dns_vars_provider(): expected_error = Exception("This is a singleton class") with pytest.raises(Exception) as actual_error: dns.DnsVarsProvider() assert actual_error == expected_error @pytest.mark.parametrize("input_df", [input_df]) def test_parse_url(input_df): expected_output_df = DataFrame( { "domain": [ "google", "gmail", "github", "pydata", "worldbank", "waiterrant", "cnn", "cnn", "cnn", "news", "news", "news", "sbcglobal", "akamaitechnologies", ], "suffix": [ "com", "com", "com", "org", "org.kg", "blogspot.com", "com.ac", "ac", "com", "uk", "co.uk", "co.uk", "net", "com", ], } ) output_df = dns.parse_url(input_df["url"], req_cols={"domain", "suffix"}) assert expected_output_df.equals(output_df) @pytest.mark.parametrize("input_df", [input_df]) def test2_parse_url(input_df): expected_output_df = DataFrame( { "hostname": [ "www.google.com", "gmail.com", "github.com", "pandas.pydata.org", "www.worldbank.org.kg", "waiterrant.blogspot.com", "forums.news.cnn.com.ac", "forums.news.cnn.ac", "b.cnn.com", "a.news.uk", "a.news.co.uk", "a.news.co.uk", "107-193-100-2.lightspeed.cicril.sbcglobal.net", "a23-44-13-2.deploy.static.akamaitechnologies.com", ], "subdomain": [ "www", "", "", "pandas", "www", "", "forums.news", "forums.news", "b", "a", "a", "a", "107-193-100-2.lightspeed.cicril", "a23-44-13-2.deploy.static", ], "domain": [ "google", "gmail", "github", "pydata", "worldbank", "waiterrant", "cnn", "cnn", "cnn", "news", "news", "news", "sbcglobal", "akamaitechnologies", ], "suffix": [ "com", "com", "com", "org", "org.kg", "blogspot.com", "com.ac", "ac", "com", "uk", "co.uk", "co.uk", "net", "com", ], } ) output_df = dns.parse_url(input_df["url"]) assert expected_output_df.equals(output_df) @pytest.mark.parametrize("input_df", [input_df]) def test_extract_hostname(input_df): expected_output_df = DataFrame( { "hostname": [ "www.google.com", "gmail.com", "github.com", "pandas.pydata.org", "www.worldbank.org.kg", "waiterrant.blogspot.com", "forums.news.cnn.com.ac", "forums.news.cnn.ac", "b.cnn.com", "a.news.uk", "a.news.co.uk", "a.news.co.uk", "107-193-100-2.lightspeed.cicril.sbcglobal.net", "a23-44-13-2.deploy.static.akamaitechnologies.com", ] } ) output = dns.extract_hostnames(input_df["url"]) assert output.equals(expected_output_df["hostname"]) def test_generate_tld_cols(): hostnames_df = DataFrame( {"hostname": ["forums.news.cnn.com.ac", "forums.news.cnn.ac", "b.cnn.com"]} ) input_df = DataFrame( { 4: ["ac", "", ""], 3: ["com", "ac", ""], 2: ["cnn", "cnn", "com"], 1: ["news", "news", "cnn"], 0: ["forums", "forums", "b"], } ) expected_output_df = DataFrame( { 4: ["ac", "", ""], 3: ["com", "ac", ""], 2: ["cnn", "cnn", "com"], 1: ["news", "news", "cnn"], 0: ["forums", "forums", "b"], "tld4": ["ac", "", ""], "tld3": ["com.ac", "ac", ""], "tld2": ["cnn.com.ac", "cnn.ac", "com"], "tld1": ["news.cnn.com.ac", "news.cnn.ac", "cnn.com"], "tld0": ["forums.news.cnn.com.ac", "forums.news.cnn.ac", "b.cnn.com"], } ) col_len = len(input_df.columns) - 1 actual_output_df = dns.generate_tld_cols( input_df, hostnames_df["hostname"], col_len ) assert expected_output_df.equals(actual_output_df) @pytest.mark.parametrize("input_df", [input_df]) def test_parse_url_invalid_req_cols(input_df): expected_error = ValueError( "Given req_cols must be subset of %s" % ('["hostname", "subdomain", "domain", "suffix"]') ) with pytest.raises(ValueError) as actual_error: dns.parse_url(input_df["url"], req_cols={"test"}) assert actual_error == expected_error
7,095
28.322314
83
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_dask_fs_reader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import pytest from clx.io.reader.dask_fs_reader import DaskFileSystemReader expected_df = cudf.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) @pytest.mark.parametrize("expected_df", [expected_df]) def test_fetch_data_csv(tmpdir, expected_df): fname = tmpdir.mkdir("tmp_test_fs_reader").join("person.csv") expected_df.to_csv(fname, index=False) config = { "type": "dask_fs", "input_path": fname, "names": ["firstname", "lastname", "gender"], "delimiter": ",", "usecols": ["firstname", "lastname", "gender"], "dtype": ["str", "str", "str"], "header": 0, "input_format": "csv", } reader = DaskFileSystemReader(config) fetched_df = reader.fetch_data().compute() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_fetch_data_parquet(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_reader").join("person.parquet")) cudf.io.parquet.to_parquet(expected_df, fname) config = { "type": "dask_fs", "input_path": fname, "columns": ["firstname", "lastname", "gender"], "input_format": "parquet", "gather_statistics": False, "split_row_groups": False } reader = DaskFileSystemReader(config) fetched_df = reader.fetch_data().compute() assert fetched_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_fetch_data_orc(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_reader").join("person.orc")) cudf.io.orc.to_orc(expected_df, fname) config = { "type": "dask_fs", "input_path": fname, "input_format": "orc" } reader = DaskFileSystemReader(config) fetched_df = reader.fetch_data().compute() assert fetched_df.equals(expected_df)
2,572
30.765432
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_binary_sequence_classifier.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from os import path import cudf import torch import transformers from cuml.model_selection import train_test_split from faker import Faker from clx.analytics.binary_sequence_classifier import BinarySequenceClassifier sc = BinarySequenceClassifier() if torch.cuda.is_available(): sc.init_model("bert-base-uncased") def test_train_model(): if torch.cuda.is_available(): fake = Faker() email_col = [fake.text() for _ in range(200)] label_col = [random.randint(0, 1) for _ in range(200)] emails_gdf = cudf.DataFrame(list(zip(email_col, label_col)), columns=["email", "label"]) X_train, X_test, y_train, y_test = train_test_split( emails_gdf, "label", train_size=0.8, random_state=10 ) sc.train_model( X_train["email"], y_train, learning_rate=3e-5, max_seq_len=128, batch_size=6, epochs=1, ) assert isinstance( sc._model.module, transformers.models.bert.modeling_bert.BertForSequenceClassification, ) def test_evaluate_model(): if torch.cuda.is_available(): X_test = cudf.Series(["email 1", "email 2"]) y_test = cudf.Series([0, 0]) accuracy = sc.evaluate_model( X_test, y_test, max_seq_len=128, batch_size=32 ) assert accuracy >= 0.0 and accuracy <= 1.0 def test_predict(): if torch.cuda.is_available(): X_test = cudf.Series(["email 1", "email 2"]) preds = sc.predict(X_test, max_seq_len=128) assert preds[0].isin([False, True]).equals(cudf.Series([True, True])) def test_save_model(tmpdir): if torch.cuda.is_available(): sc.save_model(tmpdir) assert path.exists(str(tmpdir.join("config.json"))) assert path.exists(str(tmpdir.join("pytorch_model.bin"))) def test_save_checkpoint(tmpdir): if torch.cuda.is_available(): fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar")) sc.save_checkpoint(fname) assert path.exists(fname) def test_load_checkpoint(tmpdir): if torch.cuda.is_available(): fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar")) sc.save_checkpoint(fname) assert path.exists(fname) sc.load_checkpoint(fname) assert isinstance( sc._model.module, transformers.models.bert.modeling_bert.BertForSequenceClassification, )
3,113
32.12766
96
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_netflow_workflow.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.workflow.netflow_workflow import NetflowWorkflow def test_netflow_workflow(): """Tests the netflow dataframe enrichment""" netflow_workflow = NetflowWorkflow("netflow-workflow") input_df = cudf.DataFrame( { "ts time": ["12345678900.12345"], "uid string": ["123ABC"], "id.orig_h": ["123.456.789"], "id.orig_p": ["1000"], "id.resp_h": ["987.654.321"], "id.resp_p": ["80"], "proto": ["tcp"], "service": ["-"], "duration": ["2.015"], "orig_bytes": ["0"], "resp_bytes": ["0"], "conn_state": ["SH"], "local_orig": ["-"], "local_resp": ["-"], "missed_bytes": ["0"], "history": ["F"], "orig_pkts count": ["2"], "orig_ip_bytes": ["80"], "resp_pkts": ["0"], "resp_ip_bytes": ["0"], "tunnel_parents": ["-"], } ) actual_df = netflow_workflow.workflow(input_df) expected_df = cudf.DataFrame( { "ts time": ["12345678900.12345"], "uid string": ["123ABC"], "id.orig_h": ["123.456.789"], "id.orig_p": ["1000"], "id.resp_h": ["987.654.321"], "id.resp_p": ["80"], "proto": ["tcp"], "service": ["-"], "duration": ["2.015"], "orig_bytes": ["0"], "resp_bytes": ["0"], "conn_state": ["SH"], "local_orig": ["-"], "local_resp": ["-"], "missed_bytes": ["0"], "history": ["F"], "orig_pkts count": ["2"], "orig_ip_bytes": ["80"], "resp_pkts": ["0"], "resp_ip_bytes": ["0"], "tunnel_parents": ["-"], "netflow_enriched": ["netflow_enriched"], } ) assert actual_df.equals(expected_df)
2,547
32.526316
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_windows_event_parser.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import pytest from clx.parsers.windows_event_parser import WindowsEventParser TEST_DATA = [ '{"_indextime":"1554145632","linecount":"63","sourcetype":"WinEventLog:Security","_cd":"309:1061724899","_raw":"04/01/2019 07:07:21 PM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4624\\nEventType=0\\nType=Information\\nComputerName=test109.test.com\\nTaskCategory=Logon\\nOpCode=Info\\nRecordNumber=13730612955\\nKeywords=Audit Success\\nMessage=An account was successfully logged on.\\r\\n\\r\\nSubject:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\t-\\r\\n\\tAccount Domain:\\t\\t-\\r\\n\\tLogon ID:\\t\\t0x0\\r\\n\\r\\nLogon Type:\\t\\t\\t3\\r\\n\\r\\nImpersonation Level:\\t\\tImpersonation\\r\\n\\r\\nNew Logon:\\r\\n\\tSecurity ID:\\t\\ttest.com\\test106$\\r\\n\\tAccount Name:\\t\\ttest106$\\r\\n\\tAccount Domain:\\t\\ttest.com\\r\\n\\tLogon ID:\\t\\t0x9DE8990DE\\r\\n\\tLogon GUID:\\t\\t{E53069F0-662E-0C65-F889-AA8D8770D56A}\\r\\n\\r\\nProcess Information:\\r\\n\\tProcess ID:\\t\\t0x0\\r\\n\\tProcess Name:\\t\\t-\\r\\n\\r\\nNetwork Information:\\r\\n\\tWorkstation Name:\\t\\r\\n\\tSource Network Address:\\t100.00.100.1\\r\\n\\tSource Port:\\t\\t39028\\r\\n\\r\\nDetailed Authentication Information:\\r\\n\\tLogon Process:\\t\\tKerberos\\r\\n\\tAuthentication Package:\\tKerberos\\r\\n\\tTransited Services:\\t-\\r\\n\\tPackage Name (NTLM only):\\t-\\r\\n\\tKey Length:\\t\\t0\\r\\n\\r\\nThis event is generated when a logon session is created. It is generated on the computer that was accessed.\\r\\n\\r\\nThe subject fields indicate the account on the local system which requested the logon. This is most commonly a service such as the Server service, or a local process such as Winlogon.exe or Services.exe.\\r\\n\\r\\nThe logon type field indicates the kind of logon that occurred. The most common types are 2 (interactive) and 3 (network).\\r\\n\\r\\nThe New Logon fields indicate the account for whom the new logon was created, i.e. the account that was logged on.\\r\\n\\r\\nThe network fields indicate where a remote logon request originated. Workstation name is not always available and may be left blank in some cases.\\r\\n\\r\\nThe impersonation level field indicates the extent to which a process in the logon session can impersonate.\\r\\n\\r\\nThe authentication information fields provide detailed information about this specific logon request.\\r\\n\\t- Logon GUID is a unique identifier that can be used to correlate this event with a KDC event.\\r\\n\\t- Transited services indicate which intermediate services have participated in this logon request.\\r\\n\\t- Package name indicates which sub-protocol was used among the NTLM protocols.\\r\\n\\t- Key length indicates the length of the generated session key. This will be 0 if no session key was requested.","_pre_msg":"04/01/2019 07:07:21 PM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4624\\nEventType=0\\nType=Information\\nComputerName=test109.test.com\\nTaskCategory=Logon\\nOpCode=Info\\nRecordNumber=13730612955\\nKeywords=Audit Success","splunk_server":"sc.lab.test.com","source":"WinEventLog:Security","host":"test109","_serial":"5613","_bkt":"wineventlog~309~8C261931-2C10-4450-B82C-39A63512E150","_sourcetype":"WinEventLog:Security","EventCode":"4624","index":"wineventlog","_si":["sc.lab.test.com","wineventlog"],"_time":"1554145641","Message":"An account was successfully logged on.\\r\\n\\r\\nSubject:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\t-\\r\\n\\tAccount Domain:\\t\\t-\\r\\n\\tLogon ID:\\t\\t0x0\\r\\n\\r\\nLogon Type:\\t\\t\\t3\\r\\n\\r\\nImpersonation Level:\\t\\tImpersonation\\r\\n\\r\\nNew Logon:\\r\\n\\tSecurity ID:\\t\\ttest.com\\test106$\\r\\n\\tAccount Name:\\t\\ttest106$\\r\\n\\tAccount Domain:\\t\\ttest.com\\r\\n\\tLogon ID:\\t\\t0x9DE8990DE\\r\\n\\tLogon GUID:\\t\\t{E53069F0-662E-0C65-F889-AA8D8770D56A}\\r\\n\\r\\nProcess Information:\\r\\n\\tProcess ID:\\t\\t0x0\\r\\n\\tProcess Name:\\t\\t-\\r\\n\\r\\nNetwork Information:\\r\\n\\tWorkstation Name:\\t\\r\\n\\tSource Network Address:\\t100.00.100.1\\r\\n\\tSource Port:\\t\\t39028\\r\\n\\r\\nDetailed Authentication Information:\\r\\n\\tLogon Process:\\t\\tKerberos\\r\\n\\tAuthentication Package:\\tKerberos\\r\\n\\tTransited Services:\\t-\\r\\n\\tPackage Name (NTLM only):\\t-\\r\\n\\tKey Length:\\t\\t0\\r\\n\\r\\nThis event is generated when a logon session is created. It is generated on the computer that was accessed.\\r\\n\\r\\nThe subject fields indicate the account on the local system which requested the logon. This is most commonly a service such as the Server service, or a local process such as Winlogon.exe or Services.exe.\\r\\n\\r\\nThe logon type field indicates the kind of logon that occurred. The most common types are 2 (interactive) and 3 (network).\\r\\n\\r\\nThe New Logon fields indicate the account for whom the new logon was created, i.e. the account that was logged on.\\r\\n\\r\\nThe network fields indicate where a remote logon request originated. Workstation name is not always available and may be left blank in some cases.\\r\\n\\r\\nThe impersonation level field indicates the extent to which a process in the logon session can impersonate.\\r\\n\\r\\nThe authentication information fields provide detailed information about this specific logon request.\\r\\n\\t- Logon GUID is a unique identifier that can be used to correlate this event with a KDC event.\\r\\n\\t- Transited services indicate which intermediate services have participated in this logon request.\\r\\n\\t- Package name indicates which sub-protocol was used among the NTLM protocols.\\r\\n\\t- Key length indicates the length of the generated session key. This will be 0 if no session key was requested.","id":"c54d7f17-8eb8-4d78-a8f7-4b681256e2b3"}', '{"_raw":"04/03/2019 05:57:33 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4625\\nEventType=0\\nType=Information\\nComputerName=abc.test.com\\nTaskCategory=Logon\\nOpCode=Info\\nRecordNumber=849982687\\nKeywords=Audit Failure\\nMessage=An account failed to log on.\\r\\n\\r\\nSubject:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\t-\\r\\n\\tAccount Domain:\\t\\t-\\r\\n\\tLogon ID:\\t\\t0x0\\r\\n\\r\\nLogon Type:\\t\\t\\t3\\r\\n\\r\\nAccount For Which Logon Failed:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\thxyz\\r\\n\\tAccount Domain:\\t\\thxyz-PC1\\r\\n\\r\\nFailure Information:\\r\\n\\tFailure Reason:\\t\\tUnknown user name or bad password.\\r\\n\\tStatus:\\t\\t\\t0xc000006d\\r\\n\\tSub Status:\\t\\t0xc0000064\\r\\n\\r\\nProcess Information:\\r\\n\\tCaller Process ID:\\t0x0\\r\\n\\tCaller Process Name:\\t-\\r\\n\\r\\nNetwork Information:\\r\\n\\tWorkstation Name:\\thxyz-PC1\\r\\n\\tSource Network Address:\\t10.10.100.20\\r\\n\\tSource Port:\\t\\t53662\\r\\n\\r\\nDetailed Authentication Information:\\r\\n\\tLogon Process:\\t\\tNtLmSsp \\r\\n\\tAuthentication Package:\\tNTLM\\r\\n\\tTransited Services:\\t-\\r\\n\\tPackage Name (NTLM only):\\t-\\r\\n\\tKey Length:\\t\\t0\\r\\n\\r\\nThis event is generated when a logon request fails. It is generated on the computer where access was attempted.\\r\\n\\r\\nThe Subject fields indicate the account on the local system which requested the logon. This is most commonly a service such as the Server service, or a local process such as Winlogon.exe or Services.exe.\\r\\n\\r\\nThe Logon Type field indicates the kind of logon that was requested. The most common types are 2 (interactive) and 3 (network).\\r\\n\\r\\nThe Process Information fields indicate which account and process on the system requested the logon.\\r\\n\\r\\nThe Network Information fields indicate where a remote logon request originated. Workstation name is not always available and may be left blank in some cases.\\r\\n\\r\\nThe authentication information fields provide detailed information about this specific logon request.\\r\\n\\t- Transited services indicate which intermediate services have participated in this logon request.\\r\\n\\t- Package name indicates which sub-protocol was used among the NTLM protocols.\\r\\n\\t- Key length indicates the length of the generated session key. This will be 0 if no session key was requested.","sourcetype":"WinEventLog:Security","source":"WinEventLog:Security","_si":["sc.lab.test.com","wineventlog"],"_sourcetype":"WinEventLog:Security","Message":"An account failed to log on.\\r\\n\\r\\nSubject:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\t-\\r\\n\\tAccount Domain:\\t\\t-\\r\\n\\tLogon ID:\\t\\t0x0\\r\\n\\r\\nLogon Type:\\t\\t\\t3\\r\\n\\r\\nAccount For Which Logon Failed:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\thxyz\\r\\n\\tAccount Domain:\\t\\thxyz-PC1\\r\\n\\r\\nFailure Information:\\r\\n\\tFailure Reason:\\t\\tUnknown user name or bad password.\\r\\n\\tStatus:\\t\\t\\t0xc000006d\\r\\n\\tSub Status:\\t\\t0xc0000064\\r\\n\\r\\nProcess Information:\\r\\n\\tCaller Process ID:\\t0x0\\r\\n\\tCaller Process Name:\\t-\\r\\n\\r\\nNetwork Information:\\r\\n\\tWorkstation Name:\\thxyz-PC1\\r\\n\\tSource Network Address:\\t10.10.100.20\\r\\n\\tSource Port:\\t\\t53662\\r\\n\\r\\nDetailed Authentication Information:\\r\\n\\tLogon Process:\\t\\tNtLmSsp \\r\\n\\tAuthentication Package:\\tNTLM\\r\\n\\tTransited Services:\\t-\\r\\n\\tPackage Name (NTLM only):\\t-\\r\\n\\tKey Length:\\t\\t0\\r\\n\\r\\nThis event is generated when a logon request fails. It is generated on the computer where access was attempted.\\r\\n\\r\\nThe Subject fields indicate the account on the local system which requested the logon. This is most commonly a service such as the Server service, or a local process such as Winlogon.exe or Services.exe.\\r\\n\\r\\nThe Logon Type field indicates the kind of logon that was requested. The most common types are 2 (interactive) and 3 (network).\\r\\n\\r\\nThe Process Information fields indicate which account and process on the system requested the logon.\\r\\n\\r\\nThe Network Information fields indicate where a remote logon request originated. Workstation name is not always available and may be left blank in some cases.\\r\\n\\r\\nThe authentication information fields provide detailed information about this specific logon request.\\r\\n\\t- Transited services indicate which intermediate services have participated in this logon request.\\r\\n\\t- Package name indicates which sub-protocol was used among the NTLM protocols.\\r\\n\\t- Key length indicates the length of the generated session key. This will be 0 if no session key was requested.","_bkt":"wineventlog~313~8C261931-2C10-4450-B82C-39A63512E150","EventCode":"4625","_indextime":"1554242244","index":"wineventlog","_time":"1554242253","_pre_msg":"04/03/2019 05:57:33 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4625\\nEventType=0\\nType=Information\\nComputerName=abc.test.com\\nTaskCategory=Logon\\nOpCode=Info\\nRecordNumber=849982687\\nKeywords=Audit Failure","_cd":"313:1467779602","_serial":"16723","splunk_server":"sc.lab.test.com","host":"zjdhcp01","linecount":"61","id":"cf4876f3-716c-415c-994e-84acda054c9c"}', '{"_pre_msg":"04/03/2019 11:58:59 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=5156\\nEventType=0\\nType=Information\\nComputerName=user234.test.com\\nTaskCategory=Filtering Platform Connection\\nOpCode=Info\\nRecordNumber=241754521\\nKeywords=Audit Success","host":"user234","_raw":"04/03/2019 11:58:59 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=5156\\nEventType=0\\nType=Information\\nComputerName=user234.test.com\\nTaskCategory=Filtering Platform Connection\\nOpCode=Info\\nRecordNumber=241754521\\nKeywords=Audit Success\\nMessage=The Windows Filtering Platform has permitted a connection.\\r\\n\\r\\nApplication Information:\\r\\n\\tProcess ID:\\t\\t4\\r\\n\\tApplication Name:\\tSystem\\r\\n\\r\\nNetwork Information:\\r\\n\\tDirection:\\t\\tInbound\\r\\n\\tSource Address:\\t\\t100.20.100.20\\r\\n\\tSource Port:\\t\\t138\\r\\n\\tDestination Address:\\t100.20.100.30\\r\\n\\tDestination Port:\\t\\t138\\r\\n\\tProtocol:\\t\\t17\\r\\n\\r\\nFilter Information:\\r\\n\\tFilter Run-Time ID:\\t0\\r\\n\\tLayer Name:\\t\\tReceive/Accept\\r\\n\\tLayer Run-Time ID:\\t44","_si":["sc.lab.test.com","wineventlog"],"source":"WinEventLog:Security","sourcetype":"WinEventLog:Security","splunk_server":"sc.lab.test.com","_bkt":"wineventlog~316~8C261931-2C10-4450-B82C-39A63512E150","_sourcetype":"WinEventLog:Security","_indextime":"1554317930","EventCode":"5156","Message":"The Windows Filtering Platform has permitted a connection.\\r\\n\\r\\nApplication Information:\\r\\n\\tProcess ID:\\t\\t4\\r\\n\\tApplication Name:\\tSystem\\r\\n\\r\\nNetwork Information:\\r\\n\\tDirection:\\t\\tInbound\\r\\n\\tSource Address:\\t\\t100.20.100.20\\r\\n\\tSource Port:\\t\\t138\\r\\n\\tDestination Address:\\t100.20.100.30\\r\\n\\tDestination Port:\\t\\t138\\r\\n\\tProtocol:\\t\\t17\\r\\n\\r\\nFilter Information:\\r\\n\\tFilter Run-Time ID:\\t0\\r\\n\\tLayer Name:\\t\\tReceive/Accept\\r\\n\\tLayer Run-Time ID:\\t44","linecount":"29","_serial":"136","_cd":"316:913400766","index":"wineventlog","_time":"1554317939","id":"c3f48bba-90a1-4999-84a6-4da9d964d31d"}', '{"_pre_msg":"04/03/2019 11:58:59 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=5157\\nEventType=0\\nType=Information\\nComputerName=abc106.test.com\\nTaskCategory=Filtering Platform Connection\\nOpCode=Info\\nRecordNumber=2099763859\\nKeywords=Audit Failure","host":"abc106","_raw":"04/03/2019 11:58:59 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=5157\\nEventType=0\\nType=Information\\nComputerName=abc106.test.com\\nTaskCategory=Filtering Platform Connection\\nOpCode=Info\\nRecordNumber=2099763859\\nKeywords=Audit Failure\\nMessage=The Windows Filtering Platform has blocked a connection.\\r\\n\\r\\nApplication Information:\\r\\n\\tProcess ID:\\t\\t1048\\r\\n\\tApplication Name:\\t\\device\\harddiskvolume1\\windows\\system32\\svchost.exe\\r\\n\\r\\nNetwork Information:\\r\\n\\tDirection:\\t\\tInbound\\r\\n\\tSource Address:\\t\\t100.20.100.30\\r\\n\\tSource Port:\\t\\t137\\r\\n\\tDestination Address:\\t100.20.100.20\\r\\n\\tDestination Port:\\t\\t137\\r\\n\\tProtocol:\\t\\t0\\r\\n\\r\\nFilter Information:\\r\\n\\tFilter Run-Time ID:\\t65595\\r\\n\\tLayer Name:\\t\\tReceive/Accept\\r\\n\\tLayer Run-Time ID:\\t44","_si":["sc.lab.test.com","wineventlog"],"source":"WinEventLog:Security","sourcetype":"WinEventLog:Security","splunk_server":"sc.lab.test.com","_bkt":"wineventlog~316~8C261931-2C10-4450-B82C-39A63512E150","_sourcetype":"WinEventLog:Security","_indextime":"1554317931","EventCode":"5157","Message":"The Windows Filtering Platform has blocked a connection.\\r\\n\\r\\nApplication Information:\\r\\n\\tProcess ID:\\t\\t1048\\r\\n\\tApplication Name:\\t\\device\\harddiskvolume1\\windows\\system32\\svchost.exe\\r\\n\\r\\nNetwork Information:\\r\\n\\tDirection:\\t\\tInbound\\r\\n\\tSource Address:\\t\\t100.20.100.30\\r\\n\\tSource Port:\\t\\t137\\r\\n\\tDestination Address:\\t100.20.100.20\\r\\n\\tDestination Port:\\t\\t137\\r\\n\\tProtocol:\\t\\t0\\r\\n\\r\\nFilter Information:\\r\\n\\tFilter Run-Time ID:\\t65595\\r\\n\\tLayer Name:\\t\\tReceive/Accept\\r\\n\\tLayer Run-Time ID:\\t44","linecount":"29","_serial":"57","_cd":"316:913426654","index":"wineventlog","_time":"1554317939","id":"565beda9-346a-46a3-9f1f-25eab8d3414d"}', '{"_raw":"04/03/2019 05:57:33 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4798\\nEventType=0\\nType=Information\\nComputerName=abc.test.com\\nTaskCategory=Logon\\nOpCode=Info\\nRecordNumber=849982687\\nKeywords=Audit Failure\\nMessage=A user\'s local group membership was enumerated.\\r\\n\\r\\nSubject:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\t-\\r\\n\\tAccount Domain:\\t\\t-\\r\\n\\tLogon ID:\\t\\t0x0\\r\\n\\r\\nUser:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\thxyz\\r\\n\\tAccount Domain:\\t\\thxyz-PC1\\r\\n\\r\\nProcess Information:\\r\\n\\tProcess ID:\\t0x0\\r\\n\\tProcess Name:\\t-","sourcetype":"WinEventLog:Security","source":"WinEventLog:Security","_si":["sc.lab.test.com","wineventlog"],"_sourcetype":"WinEventLog:Security","Message":"A user\'s local group membership was enumerated.\\r\\n\\r\\nSubject:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\t-\\r\\n\\tAccount Domain:\\t\\t-\\r\\n\\tLogon ID:\\t\\t0x0\\r\\n\\r\\nUser:\\r\\n\\tSecurity ID:\\t\\tNULL SID\\r\\n\\tAccount Name:\\t\\thxyz\\r\\n\\tAccount Domain:\\t\\thxyz-PC1\\r\\n\\r\\nProcess Information:\\r\\n\\tProcess ID:\\t0x0\\r\\n\\tProcess Name:\\t-","_bkt":"wineventlog~313~8C261931-2C10-4450-B82C-39A63512E150","EventCode":"4798","_indextime":"1554242244","index":"wineventlog","_time":"1554242253","_pre_msg":"04/03/2019 05:57:33 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4798\\nEventType=0\\nType=Information\\nComputerName=abc.test.com\\nTaskCategory=Logon\\nOpCode=Info\\nRecordNumber=849982687\\nKeywords=Audit Failure","_cd":"313:1467779602","_serial":"16723","splunk_server":"sc.lab.test.com","host":"zjdhcp01","linecount":"61","id":"cf4876f3-716c-415c-994e-84acda054c9c"}', '{"EventCode":"4769","_raw":"09/27/2018 04:45:36 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4769\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Kerberos Service Ticket Operations\\nOpCode=Info\\nRecordNumber=8876865135\\nKeywords=Audit Success\\nMessage=A Kerberos service ticket was requested.\\n\\nAccount Information:\\n\\tAccount Name:\\t\\tuser@localhost.com\\n\\tAccount Domain:\\t\\tlocalhost.com\\n\\tLogon GUID:\\t\\t{1F1D4C09-E154-4898-4EB8-E3A03E130D11}\\n\\nService Information:\\n\\tService Name:\\t\\ttest.localhost.com\\n\\tService ID:\\t\\tNONE_MAPPED\\n\\nNetwork Information:\\n\\tClient Address:\\t\\t::ffff:100.10.100.20\\n\\tClient Port:\\t\\t26061\\n\\nAdditional Information:\\n\\tTicket Options:\\t\\t0x40810000\\n\\tTicket Encryption Type:\\t0x17\\n\\tFailure Code:\\t\\t0x0\\n\\tTransited Services:\\t-\\n\\nThis event is generated every time access is requested to a resource such as a computer or a Windows service. The service name indicates the resource to which access was requested.\\n\\nThis event can be correlated with Windows logon events by comparing the Logon GUID fields in each event. The logon event occurs on the machine that was accessed, which is often a different machine than the domain controller which issued the service ticket.\\n\\nTicket options, encryption types, and failure codes are defined in RFC 4120.","id":"cf4876f3-716c-415c-994e-84acda054c9c"}', '{"EventCode":"4770","_raw":"09/27/2018 05:15:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4770\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Kerberos Service Ticket Operations\\nOpCode=Info\\nRecordNumber=186980567\\nKeywords=Audit Success\\nMessage=A Kerberos service ticket was renewed.\\n\\nAccount Information:\\n\\tAccount Name:\\t\\tTEST@LOCALHOST.COM\\n\\tAccount Domain:\\t\\tLOCALHOST.COM\\n\\nService Information:\\n\\tService Name:\\t\\tuser\\n\\tService ID:\\t\\tLOCALHOST.COM\\user\\n\\nNetwork Information:\\n\\tClient Address:\\t\\t::ffff:10.30.100.130\\n\\tClient Port:\\t\\t62133\\n\\nAdditional Information:\\n\\tTicket Options:\\t\\t0x50800002\\n\\tTicket Encryption Type:\\t0x12\\n\\nTicket options and encryption types are defined in RFC 4120.","id":"052b3a64-f1bd-4884-8e48-30b553bc495a"}', '{"id":"cf4876f3-716c-415c-994e-84acda054c9c","_sourcetype": "WinEventLog:Security", "linecount": "39", "index": "wineventlog", "_raw": "12/06/2018 06:52:05 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4771\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Kerberos Authentication Service\\nOpCode=Info\\nRecordNumber=4901782953\\nKeywords=Audit Failure\\nMessage=Kerberos pre-authentication failed.\\r\\n\\r\\nAccount Information:\\r\\n\\tSecurity ID:\\t\\tLOCALHOST.COM\\lab\\r\\n\\tAccount Name:\\t\\tlab\\r\\n\\r\\nService Information:\\r\\n\\tService Name:\\t\\tuser/LOCALHOST.COM\\r\\n\\r\\nNetwork Information:\\r\\n\\tClient Address:\\t\\t100.20.1.70\\r\\n\\tClient Port:\\t\\t60284\\r\\n\\r\\nAdditional Information:\\r\\n\\tTicket Options:\\t\\t0x40800000\\r\\n\\tFailure Code:\\t\\t0x18\\r\\n\\tPre-Authentication Type:\\t2\\r\\n\\r\\nCertificate Information:\\r\\n\\tCertificate Issuer Name:\\t\\t\\r\\n\\tCertificate Serial Number: \\t\\r\\n\\tCertificate Thumbprint:\\t\\t\\r\\n\\r\\nCertificate information is only provided if a certificate was used for pre-authentication.\\r\\n\\r\\nPre-authentication types, ticket options and failure codes are defined in RFC 4120.\\r\\n\\r\\nIf the ticket was malformed or damaged during transit and could not be decrypted, then many fields in this event might not be present.", "EventCode": "4771", "host": "BGDC101", "_indextime": "1544059330", "Message": "Kerberos pre-authentication failed.\\r\\n\\r\\nAccount Information:\\r\\n\\tSecurity ID:\\t\\tLOCALHOST.COM\\lab\\r\\n\\tAccount Name:\\t\\tlab\\r\\n\\r\\nService Information:\\r\\n\\tService Name:\\t\\tuser/LOCALHOST.COM\\r\\n\\r\\nNetwork Information:\\r\\n\\tClient Address:\\t\\t100.20.1.70\\r\\n\\tClient Port:\\t\\t60284\\r\\n\\r\\nAdditional Information:\\r\\n\\tTicket Options:\\t\\t0x40800000\\r\\n\\tFailure Code:\\t\\t0x18\\r\\n\\tPre-Authentication Type:\\t2\\r\\n\\r\\nCertificate Information:\\r\\n\\tCertificate Issuer Name:\\t\\t\\r\\n\\tCertificate Serial Number: \\t\\r\\n\\tCertificate Thumbprint:\\t\\t\\r\\n\\r\\nCertificate information is only provided if a certificate was used for pre-authentication.\\r\\n\\r\\nPre-authentication types, ticket options and failure codes are defined in RFC 4120.\\r\\n\\r\\nIf the ticket was malformed or damaged during transit and could not be decrypted, then many fields in this event might not be present.", "splunk_server": "localhost", "source": "WinEventLog:Security", "_cd": "215:335179321", "_serial": "0", "_bkt": "wineventlog~215~2CDBBBA3-F529-4047-AF8A-F1380407313B", "_time": "1544059325", "_si": ["localhost", "wineventlog"], "sourcetype": "WinEventLog:Security", "_pre_msg": "12/06/2018 06:52:05 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4771\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Kerberos Authentication Service\\nOpCode=Info\\nRecordNumber=4901782953\\nKeywords=Audit Failure"}', '{"EventCode":"4781","_raw":"09/27/2018 05:15:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4781\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Kerberos Service Ticket Operations\\nOpCode=Info\\nRecordNumber=186980567\\nKeywords=Audit Success\\nMessage=The name of an account was changed.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tACME\Administrator\\n\\tAccount Name:\\t\\tTEST@LOCALHOST.COM\\n\\tAccount Domain:\\t\\tLOCALHOST.COM\\n\\tLogon ID:\\t\\t0x1f40f\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\tACME\emp-nbonaparte\\n\\tAccount Domain:\\t\\tACME\\n\\tOld Account Name:\\t\\tnbonaparte\\n\\tNew Account Name:\\t\\temp-nbonaparte\\n\\nAdditional Information:\\n\\tPrivileges:\\t\\t-","id":"052b3a64-f1bd-4884-8e48-30b553bc495a"}', '{"EventCode":"4782","_raw":"09/27/2018 05:15:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4782\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Kerberos Service Ticket Operations\\nOpCode=Info\\nRecordNumber=186980567\\nKeywords=Audit Success\\n\\nSubject:\\n\\tSecurity ID:\\t\\tACME\Administrator\\n\\tAccount Name:\\t\\tTEST@LOCALHOST.COM\\n\\tAccount Domain:\\t\\tLOCALHOST.COM\\n\\tLogon ID:\\t\\t0x1f40f\\n\\nTarget Account:\\n\\tAccount Domain:\\t\\tACME\\n\\tAccount Name:\\t\\tnbonaparte","id":"052b3a64-f1bd-4884-8e48-30b553bc495a"}', '{"EventCode":"4634","_raw":"09/27/2018 05:15:34 AM\\\nLogName=Security\\\nSourceName=Microsoft Windows security auditing.\\\nEventCode=4634\\\nEventType=0\\\nType=Information\\\nComputerName=test.localhost.com\\\nTaskCategory=Kerberos Service Ticket Operations\\\nOpCode=Info\\\nRecordNumber=186980567\\\nKeywords=Audit Success\\\nMessage=An account was logged off.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tANONYMOUS LOGON\\n\\tAccount Name:\\t\\tAppService\\n\\tAccount Domain:\\t\\tDomain001\\n\\tLogon ID:\\t\\t0x27b9013\\n\\nLogon Type: 3\\n\\nThis event is generated when a logon session is destroyed. It may be positively correlated with a logon event using the Logon ID value. Logon IDs are only unique between reboots on the same computer.","id":"052b3a64-f1bd-4884-8e48-30b553bc495a"}', '{"EventCode":"4647","_raw":"09/27/2018 05:15:34 AM\\\nLogName=Security\\\nSourceName=Microsoft Windows security auditing.\\\nEventCode=4647\\\nEventType=0\\\nType=Information\\\nComputerName=test.localhost.com\\\nTaskCategory=Kerberos Service Ticket Operations\\\nOpCode=Info\\\nRecordNumber=186980567\\\nKeywords=Audit Success\\\nMessage=User initiated logoff.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tANONYMOUS LOGON\\n\\tAccount Name:\\t\\tAppService\\n\\tAccount Domain:\\t\\tDomain001\\n\\tLogon ID:\\t\\t0x27b9013\\n\\nThis event is generated when a logoff is initiated but the token reference count is not zero and the logon session cannot be destroyed. No further user-initiated activity can occur. This event can be interpreted as a logoff event.","id":"052b3a64-f1bd-4884-8e48-30b553bc495a"}', '{"EventCode":"4648","_raw":"09/27/2018 05:15:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4648\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Kerberos Service Ticket Operations\\nOpCode=Info\\nRecordNumber=186980567\\nKeywords=Audit Success\\nMessage==A logon was attempted using explicit credentials.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tWIN-R9H529RIO4Y\Administrator\\n\\tAccount Name:\\t\\tAdministrator\\n\\tAccount Domain:\\t\\tWIN-R9H529RIO4Y\\n\\tLogon ID:\\t\\t0x1ba0e\\n\\tLogon GUID:\\t\\t {00000000-0000-0000-0000-000000000000}\\n\\nAccount Whose Credentials Were Used:\\n\\tAccount Name:\\t\\trsmith@mtg.com\\n\\tAccount Domain:\\t\\tWIN-R9H529RIO4Y\\n\\tLogon GUID:\\t\\t{00000000-0000-0000-0000-000000000000}\\n\\nTarget Server:\\n\\tTarget Server Name:\\t\\tsp01.IceMAIL.com\\n\\tAdditional Information:\\t\\tsp01.IceMAIL.com\\n\\nProcess Information:\\n\\tProcess ID:\\t\\t0x77c\\n\\tProcess Name:\\t\\tC:\\t\\t\Program Files\Internet Explorer\iexplore.exe\\n\\nNetwork Information:\\n\\tNetwork Address:-\\n\\tPort:-","id":\\t\\t"052b3a64-f1bd-4884-8e48-30b553bc495a"}', '{"EventCode":"4672","_raw":"09/27/2018 10:52:50 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4672\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=Special Logon\\nOpCode=Info\\nRecordNumber=3706115579\\nKeywords=Audit Success\\nMessage=Special privileges assigned to new logon.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tDEV\\tuser\\n\\tAccount Name:\\t\\tuser\\n\\tAccount Domain:\\t\\tDEV\\n\\tLogon ID:\\t\\t0x800A513D\\n\\nPrivileges:\\t\\tSeSecurityPrivilege\\n\\t\\t\\tSeBackupPrivilege\\n\\t\\t\\tSeRestorePrivilege\\n\\t\\t\\tSeTakeOwnershipPrivilege\\n\\t\\t\\tSeDebugPrivilege\\n\\t\\t\\tSeSystemEnvironmentPrivilege\\n\\t\\t\\tSeLoadDriverPrivilege\\n\\t\\t\\tSeImpersonatePrivilege","id":"052b3a64-f1bd-4884-8e48-30b553bc495a"}', '{"Account_Domain": "test.com", "Account_Name": "fvjbvfjbvf$", "ComputerName": "fvjbvfjbvf.test.com", "Logon_ID": "0x3e7", "Message": "A privileged service was called.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tNT AUTHORITY\\SYSTEM\\n\\tAccount Name:\\t\\tfvjbvfjbvf$\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x3e7\\n\\nService:\\n\\tServer:\\tNT Local Security Authority / Authentication Service\\n\\tService Name:\\tLsaRegisterLogonProcess()\\n\\nProcess:\\n\\tProcess ID:\\t0x234\\n\\tProcess Name:\\tC:\\Windows\\System32\\lsass.exe\\n\\nService Request Information:\\n\\tPrivileges:\\t\\tSeTcbPrivilege", "Security_ID": "NT AUTHORITY\\SYSTEM", "_bkt": "wineventlog~15~3D7EB920-B824-4467-A0DA-EFE0925C0D7D", "_cd": "15:36073965", "_indextime": "1527787976", "_pre_msg": "04/30/2018 05:13:59 PM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4673\\nEventType=0\\nType=Information\\nComputerName=fvjbvfjbvf.test.com\\nTaskCategory=Sensitive Privilege Use\\nOpCode=Info\\nRecordNumber=6623591495\\nKeywords=Audit Success", "_raw":"04/30/2018 05:13:59 PM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4673\\nEventType=0\\nType=Information\\nComputerName=fvjbvfjbvf.test.com\\nTaskCategory=Sensitive Privilege Use\\nOpCode=Info\\nRecordNumber=6623591495\\nKeywords=Audit Success\\nMessage=A privileged service was called.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tNT AUTHORITY\\SYSTEM\\n\\tAccount Name:\\t\\tfvjbvfjbvf$\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x3e7\\n\\nService:\\n\\tServer:\\tNT Local Security Authority / Authentication Service\\n\\tService Name:\\tLsaRegisterLogonProcess()\\n\\nProcess:\\n\\tProcess ID:\\t0x234\\n\\tProcess Name:\\tC:\\Windows\\System32\\lsass.exe\\n\\nService Request Information:\\n\\tPrivileges:\\t\\tSeTcbPrivilege", "_serial": "153", "_si": ["idx9.nvda-sec.splunkcloud.com", "wineventlog"], "_sourcetype": "WinEventLog:Security", "_time": "2018-05-01T00:13:59.000+00:00", "dest_nt_host": "fvjbvfjbvf.test.com", "host": "hqdvppmwb07", "index": "wineventlog", "linecount": "29", "source": "WinEventLog:Security", "sourcetype": "WinEventLog:Security", "splunk_server": "sc.lab.test.com, "vendor_privilege": "SeTcbPrivilege","id":"sdgfhsdfhj-3245-dsf"}', '{"preview":false,"result":{"EventCode":"4722","_raw":"09/27/2018 09:56:10 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4722\\nEventType=0\\nType=Information\\nComputerName=localhost.test.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=37352998061\\nKeywords=Audit Success\\nMessage=A user account was enabled.\\n\\nSubject:\\n\\tSecurity ID:\\t\\ttest.com\\dhgfckkcg\\n\\tAccount Name:\\t\\tdhgfckkcg\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x2D55E5EF7\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\ttest.com\\hgcghjj\\n\\tAccount Name:\\t\\thgcghjj\\n\\tAccount Domain:\\t\\ttest.com","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4720","_raw":"09/27/2018 09:56:10 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4720\\nEventType=0\\nType=Information\\nComputerName=localhost.test.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=37352998061\\nKeywords=Audit Success\\nMessage=A user account was created.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tACME-FR\administrator\\n\\tAccount Name:\\t\\tadministrator\\n\\tAccount Domain:\\t\\tACME-FR\\n\\tLogon ID:\\t\\t0x20f9d\\n\\nNew Account:\\n\\tSecurity ID:\\t\\tACME-FR\John.LockeAccount\\n\\tName:\\t\\tJohn.Locke\\n\\tAccount Domain:\\t\\tACME-FR\\n\\nAttributes:\\n\\tSAM Account Name:\\t\\tJohn.Locke\\n\\tDisplay Name:\\t\\tJohn Locke\\n\\tUser Principal Name:\\t\\tJohn.Locke@acme-fr.local\\n\\tHome Directory:\\t\\t-\\n\\tHome Drive:\\t\\t-\\n\\tScript Path:\\t\\t-\\n\\tProfile Path:\\t\\t-\\n\\tUser Workstations:\\t\\t-\\n\\tPassword Last Set:\\t\\t<never>\\n\\tAccount Expires:\\t\\t<never>\\n\\tPrimary Group ID:\\t\\t513\\n\\tAllowed To Delegate To:\\t\\t-\\n\\tOld UAC Value:\\t\\t0x0\\n\\tNew UAC Value:\\t\\t0x15\\n\\tUser Account Control:\\t\\t\\nAccount Disabled\\n\'Password Not Required\' - Enabled\\n\'Normal Account\' - Enabled\\n\\tUser Parameters:\\t\\t-\\n\\tSID History:\\t\\t-\\n\\tLogon Hours:\\t\\t<value not set>\\n\\nAdditional Information:\\n\\tPrivileges\\t\\t-","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4723","_raw":"09/27/2018 10:24:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4723\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=9342213186\\nKeywords=Audit Failure\\nMessage=An attempt was made to change an account\'s password.\\n\\nSubject:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x258440926\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\nAdditional Information:\\n\\tPrivileges\\t\\t-","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4724","_raw":"09/27/2018 10:24:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4724\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=9342213186\\nKeywords=Audit Failure\\nMessage=An attempt was made to reset an account\'s password.\\n\\nSubject:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x258440926\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4725","_raw":"09/27/2018 10:24:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4725\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=9342213186\\nKeywords=Audit Failure\\nMessage=A user account was disabled..\\n\\nSubject:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x258440926\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4726","_raw":"09/27/2018 10:24:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4726\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=9342213186\\nKeywords=Audit Failure\\nMessage=A user account was deleted.\\n\\nSubject:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x258440926\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\nAdditional Information:\\n\\tPrivileges\\t\\t-","id":"tdr5d-fjfgg-687bv-klhk"}', '{"EventCode":"4732","_raw":"09/19/2018 06:18:24 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4732\\nEventType=0\\nType=Information\\nComputerName=testuser.localhost.com\\nTaskCategory=Security Group Management\\nOpCode=Info\\nRecordNumber=7984447290\\nKeywords=Audit Success\\nMessage=A member was added to a security-enabled local group.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tNT AUTHORITY\\SYSTEM\\n\\tAccount Name:\\t\\ttestuser$\\n\\tAccount Domain:\\t\\tlocalhost.COM\\n\\tLogon ID:\\t\\t0x3e7\\n\\nMember:\\n\\tSecurity ID:\\t\\tlocalhost.COM\\NV-LocalAdmins\\n\\tAccount Name:\\t\\t-\\n\\nGroup:\\n\\tSecurity ID:\\t\\ttestuser\\Offer Remote Assistance Helpers\\n\\tGroup Name:\\t\\tOffer Remote Assistance Helpers\\n\\tGroup Domain:\\t\\ttestuser\\n\\nAdditional Information:\\n\\tPrivileges:\\t\\t-","id":"tdr5d-fjfgg-687bv-klhk"}', '{"Account_Domain": ["PROD", "esfdhf06"], "Account_Name": ["esfdhf06$", "MATBLE"], "ComputerName": "esfdhf06.prod.test.com", "Logon_ID": "0x3E7", "Message": "A user account was changed.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tNT AUTHORITY\\SYSTEM\\n\\tAccount Name:\\t\\tesfdhf06$\\n\\tAccount Domain:\\t\\tPROD\\n\\tLogon ID:\\t\\t0x3E7\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\tesfdhf06\\ATBLE\\n\\tAccount Name:\\t\\tATBLE\\n\\tAccount Domain:\\t\\tesfdhf06\\n\\nChanged Attributes:\\n\\tSAM Account Name:\\tATBLE\\n\\tDisplay Name:\\t\\tMIKE ATBLE\\n\\tUser Principal Name:\\t-\\n\\tHome Directory:\\t\\t<value not set>\\n\\tHome Drive:\\t\\t<value not set>\\n\\tScript Path:\\t\\t<value not set>\\n\\tProfile Path:\\t\\t<value not set>\\n\\tUser Workstations:\\t<value not set>\\n\\tPassword Last Set:\\t5/1/2018 5:41:37 AM\\n\\tAccount Expires:\\t\\t<never>\\n\\tPrimary Group ID:\\t513\\n\\tAllowedToDelegateTo:\\t-\\n\\tOld UAC Value:\\t\\t0x210\\n\\tNew UAC Value:\\t\\t0x210\\n\\tUser Account Control:\\t-\\n\\tUser Parameters:\\t-\\n\\tSID History:\\t\\t-\\n\\tLogon Hours:\\t\\tAll\\n\\nAdditional Information:\\n\\tPrivileges:\\t\\t-", "Security_ID": ["NT AUTHORITY\\SYSTEM", "esfdhf06\\ATBLE"], "_bkt": "wineventlog~0~3D7EB920-B824-4467-A0DA-EFE0925C0D7D", "_cd": "0:1057390650", "_indextime": "1526126427", "_pre_msg": "05/01/2018 05:41:37 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4738\\nEventType=0\\nType=Information\\nComputerName=esfdhf06.prod.test.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=92255448\\nKeywords=Audit Success", "_raw":"05/01/2018 05:41:37 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4738\\nEventType=0\\nType=Information\\nComputerName=esfdhf06.prod.test.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=92255448\\nKeywords=Audit Success\\nMessage=A user account was changed.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tNT AUTHORITY\\SYSTEM\\n\\tAccount Name:\\t\\tesfdhf06$\\n\\tAccount Domain:\\t\\tPROD\\n\\tLogon ID:\\t\\t0x3E7\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\tesfdhf06\\ATBLE\\n\\tAccount Name:\\t\\tATBLE\\n\\tAccount Domain:\\t\\tesfdhf06\\n\\nChanged Attributes:\\n\\tSAM Account Name:\\tATBLE\\n\\tDisplay Name:\\t\\tMIKE ATBLE\\n\\tUser Principal Name:\\t-\\n\\tHome Directory:\\t\\t<value not set>\\n\\tHome Drive:\\t\\t<value not set>\\n\\tScript Path:\\t\\t<value not set>\\n\\tProfile Path:\\t\\t<value not set>\\n\\tUser Workstations:\\t<value not set>\\n\\tPassword Last Set:\\t5/1/2018 5:41:37 AM\\n\\tAccount Expires:\\t\\t<never>\\n\\tPrimary Group ID:\\t513\\n\\tAllowedToDelegateTo:\\t-\\n\\tOld UAC Value:\\t\\t0x210\\n\\tNew UAC Value:\\t\\t0x210\\n\\tUser Account Control:\\t-\\n\\tUser Parameters:\\t-\\n\\tSID History:\\t\\t-\\n\\tLogon Hours:\\t\\tAll\\n\\nAdditional Information:\\n\\tPrivileges:\\t\\t-", "_serial": "551", "_si": ["test.splunkcloud.com", "wineventlog"], "_sourcetype": "WinEventLog:Security", "_time": "2018-05-01T00:11:37.000+00:00", "dest_nt_host": "esfdhf06.prod.test.com", "host": "esfdhf06", "index": "wineventlog", "linecount": "46", "source": "WinEventLog:Security", "sourcetype": "WinEventLog:Security", "splunk_server": "test.splunkcloud.com", "vendor_privilege": "-","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4740","_raw":"09/28/2018 01:53:37 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4740\\nEventType=0\\nType=Information\\nComputerName=sdgbjsd02.test.localhost.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=20832836\\nKeywords=Audit Success\\nMessage=A user account was locked out.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tNT AUTHORITY\\SYSTEM\\n\\tAccount Name:\\t\\tsdgbjsd02$\\n\\tAccount Domain:\\t\\tNVDMZ\\n\\tLogon ID:\\t\\t0x3E7\\n\\nAccount That Was Locked Out:\\n\\tSecurity ID:\\t\\tsdgbjsd02\\Guest\\n\\tAccount Name:\\t\\tGuest\\n\\nAdditional Information:\\n\\tCaller Computer Name:\\tsdgbjsd01","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4743","_raw":"09/27/2018 10:24:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4743\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=9342213186\\nKeywords=Audit Failure\\nMessage=A computer account was deleted.\\n\\nSubject:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x258440926\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\nAdditional Information:\\n\\tPrivileges\\t\\t-","id":"tdr5d-fjfgg-687bv-klhk"}', '{"EventCode":"4756","_raw":"09/19/2018 06:18:24 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4756\\nEventType=0\\nType=Information\\nComputerName=testuser.localhost.com\\nTaskCategory=Security Group Management\\nOpCode=Info\\nRecordNumber=7984447290\\nKeywords=Audit Success\\nMessage=A member was added to a security-enabled universal group.\\n\\nSubject:\\n\\tSecurity ID:\\t\\tNT AUTHORITY\\SYSTEM\\n\\tAccount Name:\\t\\ttestuser$\\n\\tAccount Domain:\\t\\tlocalhost.COM\\n\\tLogon ID:\\t\\t0x3e7\\n\\nMember:\\n\\tSecurity ID:\\t\\tlocalhost.COM\\NV-LocalAdmins\\n\\tAccount Name:\\t\\t-\\n\\nGroup:\\n\\tSecurity ID:\\t\\ttestuser\\Offer Remote Assistance Helpers\\n\\tGroup Name:\\t\\tOffer Remote Assistance Helpers\\n\\tGroup Domain:\\t\\ttestuser\\n\\nAdditional Information:\\n\\tPrivileges:\\t\\t-\\n\\tExpiration time:\\t\\t","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4725","_raw":"09/27/2018 10:24:34 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4725\\nEventType=0\\nType=Information\\nComputerName=test.localhost.com\\nTaskCategory=User Account Management\\nOpCode=Info\\nRecordNumber=9342213186\\nKeywords=Audit Failure\\nMessage=A user account was unlocked.\\n\\nSubject:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com\\n\\tLogon ID:\\t\\t0x258440926\\n\\nTarget Account:\\n\\tSecurity ID:\\t\\ttest.com\\auser\\n\\tAccount Name:\\t\\tauser\\n\\tAccount Domain:\\t\\ttest.com","id":"tdr5d-fjfgg-687bv-klhk"}', '{"preview":false,"result":{"EventCode":"4768","_raw":"09/27/2018 09:08:02 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=4768\\nEventType=0\\nType=Information\\nComputerName=test02.localhost.com\\nTaskCategory=Kerberos Authentication Service\\nOpCode=Info\\nRecordNumber=1376039507\\nKeywords=Audit Success\\nMessage=A Kerberos authentication ticket (TGT) was requested.\\n\\nAccount Information:\\n\\tAccount Name:\\t\\tHealthMailbox06ca30c\\n\\tSupplied Realm Name:\\tlocalhost.com\\n\\tUser ID:\\t\\t\\tlocalhost.com\\HealthMailbox06ca30c\\n\\nService Information:\\n\\tService Name:\\t\\tasdfgrvk\\n\\tService ID:\\t\\tlocalhost.com\\asdfgrvk\\n\\nNetwork Information:\\n\\tClient Address:\\t\\t::ffff:10.20.90.30\\n\\tClient Port:\\t\\t6349\\n\\nAdditional Information:\\n\\tTicket Options:\\t\\t0x40810010\\n\\tResult Code:\\t\\t0x0\\n\\tTicket Encryption Type:\\t0x12\\n\\tPre-Authentication Type:\\t2\\n\\nCertificate Information:\\n\\tCertificate Issuer Name:\\t\\t\\n\\tCertificate Serial Number:\\t\\n\\tCertificate Thumbprint:\\t\\t\\n\\nCertificate information is only provided if a certificate was used for pre-authentication.\\n\\nPre-authentication types, ticket options, encryption types and result codes are defined in RFC 4120.","id":"asd-eter-34235-fgd-346"}', ] def validate_4624(parsed_rec): assert parsed_rec["time"] == "04/01/2019 07:07:21 pm" assert parsed_rec["id"] == "c54d7f17-8eb8-4d78-a8f7-4b681256e2b3" assert parsed_rec["eventcode"] == "4624" assert ( parsed_rec["detailed_authentication_information_authentication_package"] == "kerberos" ) assert ( parsed_rec["new_logon_logon_guid"] == "{e53069f0-662e-0c65-f889-aa8d8770d56a}" ) assert parsed_rec["failure_information_failure_reason"] == "" assert parsed_rec["failure_information_status"] == "" assert parsed_rec["computername"] == "" assert parsed_rec["new_logon_logon_id"] == "0x9de8990de" assert parsed_rec["subject_security_id"] == "null sid" assert ( parsed_rec["detailed_authentication_information_package_name_ntlm_only"] == "-" ) assert parsed_rec["logon_type"] == "3" assert parsed_rec["account_for_which_logon_failed_security_id"] == "" assert parsed_rec["detailed_authentication_information_key_length"] == "0" assert parsed_rec["subject_logon_id"] == "0x0" assert parsed_rec["process_information_caller_process_name"] == "" assert parsed_rec["process_information_caller_process_id"] == "" assert parsed_rec["subject_account_name"] == "-" assert parsed_rec["process_information_process_name"] == "-" assert parsed_rec["new_logon_account_name"] == "test106$" assert parsed_rec["process_information_process_id"] == "0x0" assert parsed_rec["failure_information_sub_status"] == "" assert parsed_rec["new_logon_security_id"] == "test.comest" assert parsed_rec["network_information_source_network_address"] == "100.00.100.1" assert parsed_rec["detailed_authentication_information_transited_services"] == "-" assert parsed_rec["new_logon_account_domain"] == "test.com" assert parsed_rec["subject_account_domain"] == "-" assert parsed_rec["detailed_authentication_information_logon_process"] == "kerberos" assert parsed_rec["account_for_which_logon_failed_account_domain"] == "" assert parsed_rec["account_for_which_logon_failed_account_name"] == "" assert parsed_rec["network_information_workstation_name"] == "" assert parsed_rec["network_information_source_port"] == "39028" assert parsed_rec["application_information_process_id"] == "" assert parsed_rec["application_information_application_name"] == "" assert parsed_rec["network_information_direction"] == "" assert parsed_rec["network_information_source_address"] == "" assert parsed_rec["network_information_destination_address"] == "" assert parsed_rec["network_information_destination_port"] == "" assert parsed_rec["network_information_protocol"] == "" assert parsed_rec["filter_information_filter_run_time_id"] == "" assert parsed_rec["filter_information_layer_name"] == "" assert parsed_rec["filter_information_layer_run_time_id"] == "" def validate_4625(parsed_rec): assert parsed_rec["time"] == "04/03/2019 05:57:33 am" assert parsed_rec["id"] == "cf4876f3-716c-415c-994e-84acda054c9c" assert parsed_rec["eventcode"] == "4625" assert ( parsed_rec["detailed_authentication_information_authentication_package"] == "ntlm" ) assert parsed_rec["new_logon_logon_guid"] == "" assert ( parsed_rec["failure_information_failure_reason"] == "unknown user name or bad password." ) assert parsed_rec["failure_information_status"] == "0xc000006d" assert parsed_rec["computername"] == "abc.test.com" assert parsed_rec["new_logon_logon_id"] == "" assert parsed_rec["subject_security_id"] == "null sid" assert ( parsed_rec["detailed_authentication_information_package_name_ntlm_only"] == "-" ) assert parsed_rec["logon_type"] == "3" assert parsed_rec["account_for_which_logon_failed_security_id"] == "null sid" assert parsed_rec["detailed_authentication_information_key_length"] == "0" assert parsed_rec["subject_logon_id"] == "0x0" assert parsed_rec["process_information_caller_process_name"] == "-" assert parsed_rec["process_information_caller_process_id"] == "0x0" assert parsed_rec["subject_account_name"] == "-" assert parsed_rec["process_information_process_name"] == "" assert parsed_rec["new_logon_account_name"] == "" assert parsed_rec["process_information_process_id"] == "" assert parsed_rec["failure_information_sub_status"] == "0xc0000064" assert parsed_rec["new_logon_security_id"] == "" assert parsed_rec["network_information_source_network_address"] == "10.10.100.20" assert parsed_rec["detailed_authentication_information_transited_services"] == "-" assert parsed_rec["new_logon_account_domain"] == "" assert parsed_rec["subject_account_domain"] == "-" assert parsed_rec["detailed_authentication_information_logon_process"] == "ntlmssp" assert parsed_rec["account_for_which_logon_failed_account_domain"] == "hxyz" assert parsed_rec["account_for_which_logon_failed_account_name"] == "hxyz" assert parsed_rec["network_information_workstation_name"] == "hxyz-pc1" assert parsed_rec["network_information_source_port"] == "53662" assert parsed_rec["application_information_process_id"] == "" assert parsed_rec["application_information_application_name"] == "" assert parsed_rec["network_information_direction"] == "" assert parsed_rec["network_information_source_address"] == "" assert parsed_rec["network_information_destination_address"] == "" assert parsed_rec["network_information_destination_port"] == "" assert parsed_rec["network_information_protocol"] == "" assert parsed_rec["filter_information_filter_run_time_id"] == "" assert parsed_rec["filter_information_layer_name"] == "" assert parsed_rec["filter_information_layer_run_time_id"] == "" def validate_5156(parsed_rec): assert parsed_rec["time"] == "04/03/2019 11:58:59 am" assert parsed_rec["id"] == "c3f48bba-90a1-4999-84a6-4da9d964d31d" assert parsed_rec["eventcode"] == "5156" assert ( parsed_rec["detailed_authentication_information_authentication_package"] == "" ) assert parsed_rec["new_logon_logon_guid"] == "" assert parsed_rec["failure_information_failure_reason"] == "" assert parsed_rec["failure_information_status"] == "" assert parsed_rec["computername"] == "" assert parsed_rec["new_logon_logon_id"] == "" assert parsed_rec["subject_security_id"] == "" assert ( parsed_rec["detailed_authentication_information_package_name_ntlm_only"] == "" ) assert parsed_rec["logon_type"] == "" assert parsed_rec["account_for_which_logon_failed_security_id"] == "" assert parsed_rec["detailed_authentication_information_key_length"] == "" assert parsed_rec["subject_logon_id"] == "" assert parsed_rec["process_information_caller_process_name"] == "" assert parsed_rec["process_information_caller_process_id"] == "" assert parsed_rec["subject_account_name"] == "" assert parsed_rec["process_information_process_name"] == "" assert parsed_rec["new_logon_account_name"] == "" assert parsed_rec["process_information_process_id"] == "" assert parsed_rec["failure_information_sub_status"] == "" assert parsed_rec["new_logon_security_id"] == "" assert parsed_rec["network_information_source_network_address"] == "" assert parsed_rec["detailed_authentication_information_transited_services"] == "" assert parsed_rec["new_logon_account_domain"] == "" assert parsed_rec["subject_account_domain"] == "" assert parsed_rec["detailed_authentication_information_logon_process"] == "" assert parsed_rec["account_for_which_logon_failed_account_domain"] == "" assert parsed_rec["account_for_which_logon_failed_account_name"] == "" assert parsed_rec["network_information_workstation_name"] == "" assert parsed_rec["network_information_source_port"] == "138" assert parsed_rec["application_information_process_id"] == "4" assert parsed_rec["application_information_application_name"] == "system" assert parsed_rec["network_information_direction"] == "inbound" assert parsed_rec["network_information_source_address"] == "100.20.100.20" assert parsed_rec["network_information_destination_address"] == "100.20.100.30" assert parsed_rec["network_information_destination_port"] == "138" assert parsed_rec["network_information_protocol"] == "17" assert parsed_rec["filter_information_filter_run_time_id"] == "0" assert parsed_rec["filter_information_layer_name"] == "receive/accept" assert parsed_rec["filter_information_layer_run_time_id"] == "44" def validate_5157(parsed_rec): assert parsed_rec["time"] == "04/03/2019 11:58:59 am" assert parsed_rec["id"] == "565beda9-346a-46a3-9f1f-25eab8d3414d" assert parsed_rec["eventcode"] == "5157" assert ( parsed_rec["detailed_authentication_information_authentication_package"] == "" ) assert parsed_rec["new_logon_logon_guid"] == "" assert parsed_rec["failure_information_failure_reason"] == "" assert parsed_rec["failure_information_status"] == "" assert parsed_rec["computername"] == "" assert parsed_rec["new_logon_logon_id"] == "" assert parsed_rec["subject_security_id"] == "" assert ( parsed_rec["detailed_authentication_information_package_name_ntlm_only"] == "" ) assert parsed_rec["logon_type"] == "" assert parsed_rec["account_for_which_logon_failed_security_id"] == "" assert parsed_rec["detailed_authentication_information_key_length"] == "" assert parsed_rec["subject_logon_id"] == "" assert parsed_rec["process_information_caller_process_name"] == "" assert parsed_rec["process_information_caller_process_id"] == "" assert parsed_rec["subject_account_name"] == "" assert parsed_rec["process_information_process_name"] == "" assert parsed_rec["new_logon_account_name"] == "" assert parsed_rec["process_information_process_id"] == "" assert parsed_rec["failure_information_sub_status"] == "" assert parsed_rec["new_logon_security_id"] == "" assert parsed_rec["network_information_source_network_address"] == "" assert parsed_rec["detailed_authentication_information_transited_services"] == "" assert parsed_rec["new_logon_account_domain"] == "" assert parsed_rec["subject_account_domain"] == "" assert parsed_rec["detailed_authentication_information_logon_process"] == "" assert parsed_rec["account_for_which_logon_failed_account_domain"] == "" assert parsed_rec["account_for_which_logon_failed_account_name"] == "" assert parsed_rec["network_information_workstation_name"] == "" assert parsed_rec["network_information_source_port"] == "137" assert parsed_rec["application_information_process_id"] == "1048" assert ( parsed_rec["application_information_application_name"] == "\device\harddiskvolume1\windows\system32\svchost.exe" ) assert parsed_rec["network_information_direction"] == "inbound" assert parsed_rec["network_information_source_address"] == "100.20.100.30" assert parsed_rec["network_information_destination_address"] == "100.20.100.20" assert parsed_rec["network_information_destination_port"] == "137" assert parsed_rec["network_information_protocol"] == "0" assert parsed_rec["filter_information_filter_run_time_id"] == "65595" assert parsed_rec["filter_information_layer_name"] == "receive/accept" assert parsed_rec["filter_information_layer_run_time_id"] == "44" def validate_4798(parsed_rec): assert parsed_rec["time"] == "04/03/2019 05:57:33 am" assert parsed_rec["id"] == "cf4876f3-716c-415c-994e-84acda054c9c" assert parsed_rec["eventcode"] == "4798" assert parsed_rec["subject_security_id"] == "null sid" assert parsed_rec["subject_account_name"] == "" assert parsed_rec["subject_logon_id"] == "0x0" assert parsed_rec["user_security_id"] == "null sid" assert parsed_rec["user_account_name"] == "hxyz" assert parsed_rec["user_account_domain"] == "hxyz-pc1" assert parsed_rec["process_information_process_id"] == "0x0" assert parsed_rec["process_information_process_name"] == "-" def validate_4769(parsed_rec): assert parsed_rec["time"] == "09/27/2018 04:45:36 am" assert parsed_rec["id"] == "cf4876f3-716c-415c-994e-84acda054c9c" assert parsed_rec["eventcode"] == "4769" assert parsed_rec["account_information_account_name"] == "user@localhost.com" assert parsed_rec["account_information_account_domain"] == "localhost.com" assert ( parsed_rec["account_information_logon_guid"] == "{1f1d4c09-e154-4898-4eb8-e3a03e130d11}" ) assert parsed_rec["service_information_service_name"] == "test.localhost.com" assert parsed_rec["service_information_service_id"] == "none_mapped" assert parsed_rec["network_information_client_address"] == "::ffff:100.10.100.20" assert parsed_rec["network_information_client_port"] == "26061" assert parsed_rec["additional_information_ticket_options"] == "0x40810000" assert parsed_rec["additional_information_ticket_encryption_type"] == "0x17" assert parsed_rec["additional_information_failure_code"] == "0x0" assert parsed_rec["additional_information_transited_services"] == "-" def validate_4770(parsed_rec): assert parsed_rec["time"] == "09/27/2018 05:15:34 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4770" assert parsed_rec["account_information_account_name"] == "test@localhost.com" assert parsed_rec["account_information_account_domain"] == "localhost.com" assert parsed_rec["service_information_service_name"] == "user" assert parsed_rec["service_information_service_id"] == "localhost" assert parsed_rec["network_information_client_address"] == "::ffff:10.30.100.130" assert parsed_rec["network_information_client_port"] == "62133" assert parsed_rec["additional_information_ticket_options"] == "0x50800002" assert parsed_rec["additional_information_ticket_encryption_type"] == "0x12" def validate_4771(parsed_rec): assert parsed_rec["time"] == "12/06/2018 06:52:05 am" assert parsed_rec["id"] == "cf4876f3-716c-415c-994e-84acda054c9c" assert parsed_rec["eventcode"] == "4771" assert parsed_rec["account_information_security_id"] == "localhost.com\lab" assert parsed_rec["account_information_account_name"] == "lab" assert parsed_rec["service_information_service_name"] == "user/localhost.com" assert parsed_rec["network_information_client_address"] == "100.20.1.70" assert parsed_rec["network_information_client_port"] == "60284" assert parsed_rec["additional_information_ticket_options"] == "0x40800000" assert parsed_rec["additional_information_failure_code"] == "0x18" assert parsed_rec["additional_information_pre_authentication_type"] == "2" assert parsed_rec["certificate_information_certificate_issuer_name"] == "" assert parsed_rec["certificate_information_certificate_serial_number"] == "" assert parsed_rec["certificate_information_certificate_thumbprint"] == "" def validate_4781(parsed_rec): assert parsed_rec["time"] == "09/27/2018 05:15:34 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4781" assert parsed_rec["subject_security_id"] == "acme\\administrator" assert parsed_rec["subject_account_domain"] == "localhost.com" assert parsed_rec["subject_account_name"] == "test@localhost.com" assert parsed_rec["subject_logon_id"] == "0x1f40f" assert parsed_rec["target_account_security_id"] == "acme\\emp-nbonaparte" assert parsed_rec["target_account_account_domain"] == "acme" assert parsed_rec["target_account_old_account_name"] == "nbonaparte" assert parsed_rec["target_account_new_account_name"] == "emp-nbonaparte" assert parsed_rec["additional_information_privileges"] == "-" def validate_4782(parsed_rec): assert parsed_rec["time"] == "09/27/2018 05:15:34 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4782" assert parsed_rec["subject_security_id"] == "acme\\administrator" assert parsed_rec["subject_account_domain"] == "localhost.com" assert parsed_rec["subject_account_name"] == "test@localhost.com" assert parsed_rec["subject_logon_id"] == "0x1f40f" assert parsed_rec["target_account_account_domain"] == "acme" assert parsed_rec["target_account_account_name"] == "nbonaparte" def validate_4647(parsed_rec): assert parsed_rec["time"] == "09/27/2018 05:15:34 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4647" assert parsed_rec["subject_security_id"] == "anonymous logon" assert parsed_rec["subject_account_name"] == "appservice" assert parsed_rec["subject_account_domain"] == "domain001" assert parsed_rec["subject_logon_id"] == "0x27b9013" def validate_4634(parsed_rec): assert parsed_rec["time"] == "09/27/2018 05:15:34 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4634" assert parsed_rec["subject_security_id"] == "anonymous logon" assert parsed_rec["subject_account_name"] == "appservice" assert parsed_rec["subject_account_domain"] == "domain001" assert parsed_rec["subject_logon_id"] == "0x27b9013" assert parsed_rec["logon_type"] == "3" def validate_4648(parsed_rec): assert parsed_rec["time"] == "09/27/2018 05:15:34 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4648" assert parsed_rec["subject_account_name"] == "administrator" assert parsed_rec["subject_account_domain"] == "win-r9h529rio4y" assert parsed_rec["subject_logon_id"] == "0x1ba0e" assert parsed_rec["subject_logon_guid"] == "{00000000-0000-0000-0000-000000000000}" assert ( parsed_rec["account_whose_credentials_were_used_account_name"] == "rsmith@mtg.com" ) assert ( parsed_rec["account_whose_credentials_were_used_account_domain"] == "win-r9h529rio4y" ) assert ( parsed_rec["account_whose_credentials_were_used_logon_guid"] == "{00000000-0000-0000-0000-000000000000}" ) assert parsed_rec["target_server_target_server_name"] == "sp01.icemail.com" assert parsed_rec["target_server_additional_information"] == "sp01.icemail.com" assert parsed_rec["process_information_process_id"] == "0x77c" assert ( parsed_rec["process_information_process_name"] == "c:\program files\internet explorer\iexplore.exe" ) assert parsed_rec["network_information_network_address"] == "-" assert parsed_rec["network_information_port"] == "-" def validate_4672(parsed_rec): assert parsed_rec["time"] == "09/27/2018 10:52:50 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4672" assert parsed_rec["time"] == "09/27/2018 10:52:50 am" assert parsed_rec["id"] == "052b3a64-f1bd-4884-8e48-30b553bc495a" assert parsed_rec["eventcode"] == "4672" assert parsed_rec["subject_security_id"] == "devuser" assert parsed_rec["subject_account_name"] == "user" assert parsed_rec["subject_account_domain"] == "dev" assert parsed_rec["subject_logon_id"] == "0x800a513d" assert ( parsed_rec["privileges"] == "sesecurityprivilege|sebackupprivilege|serestoreprivilege|setakeownershipprivilege|sedebugprivilege|sesystemenvironmentprivilege|seloaddriverprivilege|seimpersonateprivilege" ) def validate_4673(parsed_rec): assert parsed_rec["time"] == "04/30/2018 05:13:59 pm" assert parsed_rec["id"] == "sdgfhsdfhj-3245-dsf" assert parsed_rec["eventcode"] == "4673" assert parsed_rec["subject_security_id"] == "nt authority\\system" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "fvjbvfjbvf$" assert parsed_rec["subject_logon_id"] == "0x3e7" assert ( parsed_rec["service_server"] == "nt local security authority / authentication service" ) assert parsed_rec["service_service_name"] == "lsaregisterlogonprocess()" assert parsed_rec["process_process_id"] == "0x234" assert parsed_rec["process_process_name"] == "c:\windows\system32\lsass.exe" assert parsed_rec["privileges"] == "setcbprivilege" def validate_4722(parsed_rec): assert parsed_rec["time"] == "09/27/2018 09:56:10 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4722" assert parsed_rec["subject_security_id"] == "test.com\\dhgfckkcg" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "dhgfckkcg" assert parsed_rec["subject_logon_id"] == "0x2d55e5ef7" assert parsed_rec["target_account_security_id"] == "test.com\\hgcghjj" assert parsed_rec["target_account_account_domain"] == "test.com" assert parsed_rec["target_account_account_name"] == "hgcghjj" def validate_4720(parsed_rec): assert parsed_rec["time"] == "09/27/2018 09:56:10 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4720" assert parsed_rec["subject_security_id"] == "acme-fr\administrator" assert parsed_rec["subject_account_domain"] == "acme-fr" assert parsed_rec["subject_account_name"] == "administrator" assert parsed_rec["subject_logon_id"] == "0x20f9d" assert parsed_rec["new_account_security_id"] == "acme-fr\john.lockeaccount" assert parsed_rec["new_account_account_name"] == "john.locke" assert parsed_rec["new_account_domain_name"] == "acme-fr" assert parsed_rec["attributes_sam_account_name"] == "john.locke" assert parsed_rec["attributes_display_name"] == "john locke" assert parsed_rec["attributes_user_principal_name"] == "john.locke@acme-fr.local" assert parsed_rec["attributes_home_directory"] == "-" assert parsed_rec["attributes_home_drive"] == "-" assert parsed_rec["attributes_script_path"] == "-" assert parsed_rec["attributes_profile_path"] == "-" assert parsed_rec["attributes_user_workstations"] == "-" assert parsed_rec["attributes_password_last_set"] == "<never>" assert parsed_rec["attributes_account_expires"] == "<never>" assert parsed_rec["attributes_primary_group_id"] == "513" assert parsed_rec["attributes_allowed_to_delegate_to"] == "-" assert parsed_rec["attributes_old_uac_value"] == "0x0" assert parsed_rec["attributes_new_uac_value"] == "0x15" assert ( parsed_rec["attributes_user_account_control"] == "account disabled|'password not required' - enabled|'normal account' - enable" ) assert parsed_rec["attributes_user_parameters"] == "-" assert parsed_rec["attributes_sid_history"] == "-" assert parsed_rec["attributes_logon_hours"] == "<value not set>" assert parsed_rec["additional_information_privileges"] == "-" def validate_4723(parsed_rec): assert parsed_rec["time"] == "09/27/2018 10:24:34 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4723" assert parsed_rec["subject_security_id"] == "test.com\\auser" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "auser" assert parsed_rec["subject_logon_id"] == "0x258440926" assert parsed_rec["target_account_security_id"] == "test.com\\auser" assert parsed_rec["target_account_account_domain"] == "test.com" assert parsed_rec["target_account_account_name"] == "auser" assert parsed_rec["additional_information_privileges"] == "-" def validate_4724(parsed_rec): assert parsed_rec["time"] == "09/27/2018 10:24:34 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4724" assert parsed_rec["subject_security_id"] == "test.com\\auser" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "auser" assert parsed_rec["subject_logon_id"] == "0x258440926" assert parsed_rec["target_account_security_id"] == "test.com\\auser" assert parsed_rec["target_account_account_domain"] == "test.com" assert parsed_rec["target_account_account_name"] == "auser" def validate_4725(parsed_rec): assert parsed_rec["time"] == "09/27/2018 10:24:34 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4725" assert parsed_rec["subject_security_id"] == "test.com\\auser" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "auser" assert parsed_rec["subject_logon_id"] == "0x258440926" assert parsed_rec["target_account_security_id"] == "test.com\\auser" assert parsed_rec["target_account_account_domain"] == "test.com" assert parsed_rec["target_account_account_name"] == "auser" def validate_4726(parsed_rec): assert parsed_rec["time"] == "09/27/2018 10:24:34 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4726" assert parsed_rec["subject_security_id"] == "test.com\\auser" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "auser" assert parsed_rec["subject_logon_id"] == "0x258440926" assert parsed_rec["target_account_security_id"] == "test.com\\auser" assert parsed_rec["target_account_account_domain"] == "test.com" assert parsed_rec["target_account_account_name"] == "auser" assert parsed_rec["additional_information_privileges"] == "-" def validate_4732(parsed_rec): assert parsed_rec["time"] == "09/19/2018 06:18:24 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4732" assert parsed_rec["subject_security_id"] == "nt authority\system" assert parsed_rec["subject_account_domain"] == "localhost.com" assert parsed_rec["subject_account_name"] == "testuser$" assert parsed_rec["subject_logon_id"] == "0x3e7" assert parsed_rec["member_security_id"] == "testuser\offer" assert parsed_rec["member_account_name"] == "-" assert parsed_rec["group_security_id"] == "testuser\offer remote assistance helpers" assert parsed_rec["group_group_name"] == "offer remote assistance helpers" assert parsed_rec["group_group_domain"] == "testuser" assert parsed_rec["additional_information_privileges"] == "-" def validate_4738(parsed_rec): assert parsed_rec["time"] == "05/01/2018 05:41:37 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4738" assert parsed_rec["subject_security_id"] == "nt authority\system" assert parsed_rec["subject_account_domain"] == "prod" assert parsed_rec["subject_account_name"] == "esfdhf06$" assert parsed_rec["subject_logon_id"] == "0x3e7" assert parsed_rec["target_account_security_id"] == "esfdhf06\\atble" assert parsed_rec["target_account_account_domain"] == "esfdhf06" assert parsed_rec["target_account_account_name"] == "atble" assert parsed_rec["additional_information_privileges"] == "-" assert parsed_rec["changed_attributes_sam_account_name"] == "atble" assert parsed_rec["changed_attributes_home_directory"] == "<value not set>" assert parsed_rec["changed_attributes_primary_group_id"] == "513" assert parsed_rec["changed_attributes_user_principal_name"] == "-" assert parsed_rec["changed_attributes_profile_path"] == "<value not set>" assert parsed_rec["changed_attributes_user_workstations"] == "<value not set>" assert parsed_rec["changed_attributes_user_parameters"] == "-" assert parsed_rec["changed_attributes_script_path"] == "<value not set>" assert parsed_rec["changed_attributes_display_name"] == "mike atble" assert parsed_rec["changed_attributes_home_drive"] == "<value not set>" assert parsed_rec["changed_attributes_new_uac_value"] == "0x210" assert parsed_rec["changed_attributes_logon_hours"] == "all" assert parsed_rec["changed_attributes_account_expires"] == "<never>" assert parsed_rec["changed_attributes_old_uac_value"] == "0x210" assert parsed_rec["changed_attributes_password_last_set"] == "5/1/2018 5:41:37 am" assert parsed_rec["changed_attributes_allowedtodelegateto"] == "-" assert parsed_rec["changed_attributes_user_account_control"] == "-" assert parsed_rec["changed_attributes_sid_history"] == "-" def validate_4740(parsed_rec): assert parsed_rec["time"] == "09/28/2018 01:53:37 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4740" assert parsed_rec["subject_security_id"] == "nt authority\system" assert parsed_rec["subject_account_domain"] == "nvdmz" assert parsed_rec["subject_account_name"] == "sdgbjsd02$" assert parsed_rec["subject_logon_id"] == "0x3e7" assert parsed_rec["account_locked_out_security_id"] == "sdgbjsd02\guest" assert parsed_rec["account_locked_out_account_name"] == "guest" assert parsed_rec["additional_information_caller_computer_name"] == "sdgbjsd01" def validate_4743(parsed_rec): assert parsed_rec["time"] == "09/27/2018 10:24:34 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4743" assert parsed_rec["subject_security_id"] == "test.com\\auser" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "auser" assert parsed_rec["subject_logon_id"] == "0x258440926" assert parsed_rec["target_account_security_id"] == "test.com\\auser" assert parsed_rec["target_account_account_domain"] == "test.com" assert parsed_rec["target_account_account_name"] == "auser" assert parsed_rec["additional_information_privileges"] == "-" def validate_4756(parsed_rec): assert parsed_rec["time"] == "09/19/2018 06:18:24 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4756" assert parsed_rec["subject_security_id"] == "nt authority\\system" assert parsed_rec["subject_account_domain"] == "localhost.com" assert parsed_rec["subject_account_name"] == "testuser$" assert parsed_rec["subject_logon_id"] == "0x3e7" assert parsed_rec["member_security_id"] == "testuser\offer" assert parsed_rec["member_account_name"] == "-" assert parsed_rec["group_security_id"] == "testuser\offer remote assistance helpers" assert parsed_rec["group_group_name"] == "offer remote assistance helpers" assert parsed_rec["group_group_domain"] == "testuser" assert parsed_rec["additional_information_privileges"] == "-" def validate_4767(parsed_rec): assert parsed_rec["time"] == "09/27/2018 10:24:34 am" assert parsed_rec["id"] == "tdr5d-fjfgg-687bv-klhk" assert parsed_rec["eventcode"] == "4767" assert parsed_rec["subject_security_id"] == "test.com\\auser" assert parsed_rec["subject_account_domain"] == "test.com" assert parsed_rec["subject_account_name"] == "auser" assert parsed_rec["subject_logon_id"] == "0x258440926" assert parsed_rec["target_account_security_id"] == "test.com\\auser" assert parsed_rec["target_account_account_domain"] == "test.com" assert parsed_rec["target_account_account_name"] == "auser" def validate_4768(parsed_rec): assert parsed_rec["time"] == "09/27/2018 09:08:02 am" assert parsed_rec["id"] == "asd-eter-34235-fgd-346" assert parsed_rec["eventcode"] == "4768" assert parsed_rec["network_information_client_address"] == "::ffff:10.20.90.30" assert parsed_rec["network_information_client_port"] == "6349" assert parsed_rec["service_information_service_name"] == "asdfgrvk" assert parsed_rec["service_information_service_id"] == "localhost.com\\asdfgrvk" assert parsed_rec["account_information_account_name"] == "healthmailbox06ca30c" assert parsed_rec["account_information_supplied_realm_name"] == "localhost.com" assert ( parsed_rec["account_information_user_id"] == "localhost.com\healthmailbox06ca30c" ) assert parsed_rec["additional_information_result_code"] == "0x0" assert parsed_rec["additional_information_ticket_options"] == "0x40810010" assert parsed_rec["additional_information_ticket_encryption_type"] == "0x12" assert parsed_rec["additional_information_pre_authentication_type"] == "2" assert parsed_rec["certificate_information_certificate_issuer_name"] == "" assert parsed_rec["certificate_information_certificate_serial_number"] == "" assert parsed_rec["certificate_information_certificate_thumbprint"] == "" def unknown_record_type(parsed_rec): raise Exception("Unknown eventcode appeared") VALIDATE_DICT = { "4624": validate_4624, "4625": validate_4625, "4634": validate_4634, "4647": validate_4647, "4648": validate_4648, "4672": validate_4672, "4673": validate_4673, "4720": validate_4720, "4722": validate_4722, "4723": validate_4723, "4724": validate_4724, "4725": validate_4725, "4726": validate_4726, "4732": validate_4732, "4738": validate_4738, "4740": validate_4740, "4743": validate_4743, "4756": validate_4756, "4767": validate_4767, "4768": validate_4768, "4769": validate_4769, "4770": validate_4770, "4771": validate_4771, "4781": validate_4781, "4782": validate_4782, "4798": validate_4798, "5156": validate_5156, "5157": validate_5157, } def test_windows_event_parser(): wep = WindowsEventParser() test_input_df = cudf.DataFrame() raw_colname = "_raw" test_input_df[raw_colname] = TEST_DATA test_output_df = wep.parse(test_input_df, raw_colname) for parsed_rec in test_output_df.to_records(): eventcode = parsed_rec["eventcode"] validate_func = VALIDATE_DICT.get(eventcode, unknown_record_type) validate_func(parsed_rec) def test2_windows_event_parser(): wep = WindowsEventParser(interested_eventcodes=["5156"]) test_input_df = cudf.DataFrame() raw_colname = "_raw" test_input_df[raw_colname] = TEST_DATA test_output_df = wep.parse(test_input_df, raw_colname) parsed_rec = test_output_df.to_records()[0] assert parsed_rec["time"] == "04/03/2019 11:58:59 am" assert parsed_rec["id"] == "c3f48bba-90a1-4999-84a6-4da9d964d31d" assert parsed_rec["eventcode"] == "5156" assert parsed_rec["application_information_process_id"] == "4" assert parsed_rec["application_information_application_name"] == "system" assert parsed_rec["network_information_direction"] == "inbound" assert parsed_rec["network_information_source_address"] == "100.20.100.20" assert parsed_rec["network_information_source_port"] == "138" assert parsed_rec["network_information_destination_address"] == "100.20.100.30" assert parsed_rec["network_information_destination_port"] == "138" assert parsed_rec["network_information_protocol"] == "17" assert parsed_rec["filter_information_filter_run_time_id"] == "0" assert parsed_rec["filter_information_layer_name"] == "receive/accept" assert parsed_rec["filter_information_layer_run_time_id"] == "44" def test3_windows_event_parser(): expected_error = KeyError( "Regex for eventcode 24 is not available in the config file. Please choose from ['4624', '4625', '4634', '4647', '4648', '4672', '4673', '4720', '4722', '4723', '4724', '4725', '4726', '4732', '4738', '4740', '4743', '4756', '4767', '4768', '4769', '4770', '4771', '4781', '4782', '4798', '5156', '5157']" ) with pytest.raises(KeyError) as actual_error: WindowsEventParser(interested_eventcodes=["5156", "24"]) assert actual_error == expected_error
80,401
109.139726
5,743
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_periodicity_detection.py
import cudf import cupy as cp from clx.analytics import periodicity_detection as pdd def test_to_periodogram(): expected_periodogram = cp.array( [ 2.14782297e-30, 8.83086404e-02, 5.23325583e-02, 1.99054116e-01, 9.58452790e-01, 5.40114641e00, 1.04142106e00, 2.46821568e-01, 5.06332729e-02, 3.44875313e-02, 2.97378597e-01, 7.47935264e-02, 3.87987331e-02, 6.56637625e-02, 1.34893777e-01, 1.13015864e00, 7.79747216e-03, 1.14757856e-01, 4.10151947e-01, 2.84306210e-01, 1.25890800e-02, 2.56152419e-01, 4.40248947e-01, 2.64140790e-01, 8.26499055e-01, 5.82104062e-01, 2.04041628e00, 4.24631265e00, 1.53295952e-01, 1.36986604e00, 6.93053951e00, 3.77611060e00, 3.79886075e00, 4.40471582e-01, 3.98427502e-01, 8.63914848e00, 1.13520190e-01, 7.77541742e-01, 1.65678473e00, 1.60364982e00, 2.53134486e00, 4.42140629e-01, 1.15635914e-01, 7.41331357e-01, 1.91152360e-01, 1.17622857e-01, 2.08266982e-01, 2.38361680e-02, 1.18239068e00, 1.03731817e00, 1.29349009e-01, 1.28179689e00, 1.91976049e-01, 1.17875358e-01, 1.10296708e-01, 7.84909233e-01, 1.34339221e-01, 6.32343429e-02, 8.14424044e-01, 3.22720512e-01, 3.22720512e-01, 8.14424044e-01, 6.32343429e-02, 1.34339221e-01, 7.84909233e-01, 1.10296708e-01, 1.17875358e-01, 1.91976049e-01, 1.28179689e00, 1.29349009e-01, 1.03731817e00, 1.18239068e00, 2.38361680e-02, 2.08266982e-01, 1.17622857e-01, 1.91152360e-01, 7.41331357e-01, 1.15635914e-01, 4.42140629e-01, 2.53134486e00, 1.60364982e00, 1.65678473e00, 7.77541742e-01, 1.13520190e-01, 8.63914848e00, 3.98427502e-01, 4.40471582e-01, 3.79886075e00, 3.77611060e00, 6.93053951e00, 1.36986604e00, 1.53295952e-01, 4.24631265e00, 2.04041628e00, 5.82104062e-01, 8.26499055e-01, 2.64140790e-01, 4.40248947e-01, 2.56152419e-01, 1.25890800e-02, 2.84306210e-01, 4.10151947e-01, 1.14757856e-01, 7.79747216e-03, 1.13015864e00, 1.34893777e-01, 6.56637625e-02, 3.87987331e-02, 7.47935264e-02, 2.97378597e-01, 3.44875313e-02, 5.06332729e-02, 2.46821568e-01, 1.04142106e00, 5.40114641e00, 9.58452790e-01, 1.99054116e-01, 5.23325583e-02, 8.83086404e-02, ] ) signal = cudf.Series( [ 3274342, 3426017, 3758781, 3050763, 3765678, 3864117, 3287878, 3397645, 3509973, 3844070, 3725934, 3287715, 3373505, 3909898, 3630503, 3070180, 3528452, 3801183, 3277141, 3625685, 3142354, 3140470, 3829668, 3623178, 3129990, 3549270, 3928100, 3331894, 3599137, 3978103, 3471284, 3220011, 3654968, 3789411, 3584702, 3512986, 3401678, 3774912, 3461276, 3549195, 3320150, 3655766, 3562267, 3525937, 3267010, 3441179, 3596828, 3208453, 3167370, 4036471, 3358863, 3169950, 3341009, 4010556, 3317385, 3132360, 3753407, 3808679, 3499711, 3248874, 3945531, 3837029, 3400068, 3625813, 3612960, 3523530, 3427957, 3749848, 3475452, 3289964, 3238560, 3428817, 3489523, 3429917, 3557773, 3432514, 3459938, 3440332, 3296710, 3711087, 3729805, 3447954, 3773181, 3855161, 3955022, 3252652, 3599792, 3769181, 3809061, 3495044, 3396623, 3680456, 3358306, 3368779, 3469016, 3169477, 3449529, 3738450, 3293116, 3303107, 3522923, 3746871, 3436093, 3124102, 3679797, 3829441, 3641894, 3654410, 3588528, 3628979, 3738718, 3737379, 3370349, 3583376, 3694398, 3559319, 3464402, 3421738, 3265208, ] ) actual_periodgram = pdd.to_periodogram(signal) assert cp.allclose(actual_periodgram, expected_periodogram) def test_filter_periodogram(): periodogram = cp.array( [ 2.14782297e-30, 8.83086404e-02, 5.23325583e-02, 1.99054116e-01, 9.58452790e-01, 5.40114641e00, 1.04142106e00, 2.46821568e-01, 5.06332729e-02, 3.44875313e-02, 2.97378597e-01, 7.47935264e-02, 3.87987331e-02, 6.56637625e-02, 1.34893777e-01, 1.13015864e00, 7.79747216e-03, 1.14757856e-01, 4.10151947e-01, 2.84306210e-01, 1.25890800e-02, 2.56152419e-01, 4.40248947e-01, 2.64140790e-01, 8.26499055e-01, 5.82104062e-01, 2.04041628e00, 4.24631265e00, 1.53295952e-01, 1.36986604e00, 6.93053951e00, 3.77611060e00, 3.79886075e00, 4.40471582e-01, 3.98427502e-01, 8.63914848e00, 1.13520190e-01, 7.77541742e-01, 1.65678473e00, 1.60364982e00, 2.53134486e00, 4.42140629e-01, 1.15635914e-01, 7.41331357e-01, 1.91152360e-01, 1.17622857e-01, 2.08266982e-01, 2.38361680e-02, 1.18239068e00, 1.03731817e00, 1.29349009e-01, 1.28179689e00, 1.91976049e-01, 1.17875358e-01, 1.10296708e-01, 7.84909233e-01, 1.34339221e-01, 6.32343429e-02, 8.14424044e-01, ] ) expected_filtered = cp.array( [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.93053951, 0.0, 0.0, 0.0, 0.0, 8.63914848, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ) actual_filtered = pdd.filter_periodogram(periodogram, 0.001) assert cp.allclose(actual_filtered, expected_filtered) def test_to_domain(): periodogram = cp.array( [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.93053951, 0.0, 0.0, 0.0, 0.0, 8.63914848, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ) expected_signal = cp.array( [ 0.26389302, 0.25470949, 0.22783294, 0.18526557, 0.13035481, 0.06865704, 0.02978938, 0.08107234, 0.14211096, 0.19489424, 0.23454643, 0.25800256, 0.26352351, 0.25070444, 0.22048766, 0.17513786, 0.11830457, 0.05657488, 0.03572989, 0.09357597, 0.15352238, 0.20399289, 0.24060791, 0.26057394, 0.26241606, 0.24599927, 0.21253286, 0.16454466, 0.10602009, 0.04528412, 0.04528412, 0.10602009, 0.16454466, 0.21253286, 0.24599927, 0.26241606, 0.26057394, 0.24060791, 0.20399289, 0.15352238, 0.09357597, 0.03572989, 0.05657488, 0.11830457, 0.17513786, 0.22048766, 0.25070444, 0.26352351, 0.25800256, 0.23454643, 0.19489424, 0.14211096, 0.08107234, 0.02978938, 0.06865704, 0.13035481, 0.18526557, 0.22783294, 0.25470949, ] ) actual_signal = pdd.to_time_domain(periodogram) assert cp.allclose(actual_signal, expected_signal)
11,756
21.183019
64
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_workflow.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import os import pytest import yaml from clx.workflow.workflow import Workflow from mockito import spy, verify, when from cudf import DataFrame input_df = cudf.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) empty_df = DataFrame() class TestWorkflowImpl(Workflow): def __init__(self, name, source=None, destination=None, custom_workflow_param=None): self.custom_workflow_param = custom_workflow_param Workflow.__init__(self, name, source, destination) def workflow(self, dataframe): dataframe["enriched"] = "enriched" return dataframe dirname = os.path.split(os.path.abspath(__file__))[0] @pytest.fixture def set_workflow_config(): """Sets the workflow config dictionary used for the unit tests""" source = { "type": "fs", "input_format": "csv", "input_path": "/path/to/input", "names": ["firstname", "lastname", "gender"], "delimiter": ",", "usecols": ["firstname", "lastname", "gender"], "dtype": ["str", "str", "str"], "header": 0, } destination = { "type": "fs", "output_format": "csv", "output_path": "/path/to/output", "index": False } workflow_config = { "source": source, "destination": destination, "custom_workflow_param": "param_value", } return workflow_config, source, destination @pytest.fixture def mock_env_home(monkeypatch): dirname, filename = os.path.split(os.path.abspath(__file__)) monkeypatch.setenv("HOME", dirname) @pytest.mark.parametrize("input_df", [input_df]) def test_workflow_parameters( tmpdir, mock_env_home, set_workflow_config, input_df ): """Tests the initialization and running of a workflow with passed in parameters""" source = set_workflow_config[1] destination = set_workflow_config[2] test_dir = tmpdir.mkdir("tmp_test_workflow") input_path = str(test_dir.join("person.csv")) input_df.to_csv(input_path, index=False) output_path = str(test_dir.join("output_parameters.csv")) source["input_path"] = input_path destination["output_path"] = output_path # Create new workflow with source and destination configurations test_workflow = TestWorkflowImpl( source=source, destination=destination, name="test-workflow", custom_workflow_param="test_param", ) test_workflow.run_workflow() expected_df = input_df expected_df["enriched"] = "enriched" result_df = cudf.read_csv(output_path) assert result_df.equals(expected_df) assert test_workflow.custom_workflow_param == "test_param" def test_workflow_config(tmpdir, mock_env_home, set_workflow_config): """Tests the initialization and running of a workflow with a configuration yaml file""" test_dir = tmpdir.mkdir("tmp_test_workflow") input_path = str(test_dir.join("person.csv")) input_df.to_csv(input_path, index=False) output_path = str(test_dir.join("output_config.csv")) # Write workflow.yaml file workflow_name = "test-workflow-config" workflow_config = set_workflow_config[0] workflow_config["destination"]["output_path"] = output_path workflow_config["destination"]["index"] = False workflow_config["source"]["input_path"] = input_path workflow_config["custom_workflow_param"] = "param_value" write_config_file(workflow_config, workflow_name) # Run workflow test_workflow = TestWorkflowImpl(workflow_name) test_workflow.run_workflow() expected_df = input_df expected_df["enriched"] = "enriched" result_df = cudf.read_csv(output_path) assert result_df.equals(expected_df) # Check that custom workflow parameter was set from config file assert test_workflow.custom_workflow_param == "param_value" def test_workflow_config_error(mock_env_home, set_workflow_config): """Tests the error handling on incomplete workflow.yaml configuration file""" workflow_name = "test-workflow-error" test_config = {} test_config["source"] = set_workflow_config[1] write_config_file(test_config, workflow_name) with pytest.raises(Exception): TestWorkflowImpl(workflow_name) test_config = {} test_config["destination"] = set_workflow_config[2] write_config_file(test_config, workflow_name) with pytest.raises(Exception): TestWorkflowImpl(workflow_name) def test_workflow_no_data(tmpdir, mock_env_home, set_workflow_config): """ Test confirms that workflow is not run and output not written if no data is returned from the workflow io_reader """ # Create source and destination configurations source = set_workflow_config[1] destination = set_workflow_config[2] test_dir = tmpdir.mkdir("tmp_test_workflow") input_path = str(test_dir.join("input_empty.csv")) empty_df.to_csv(input_path) output_path = str(test_dir.join("output_empty.csv")) source["input_path"] = input_path destination["output_path"] = output_path # Create new workflow with source and destination configurations test_workflow = spy(TestWorkflowImpl( source=source, destination=destination, name="test-workflow-no-data", custom_workflow_param="test_param" )) test_workflow.run_workflow() # Verify workflow not run verify(test_workflow, times=0).workflow(...) # Verify that no output file created. assert not os.path.exists(output_path) def test_workflow_no_enriched_data(tmpdir, mock_env_home, set_workflow_config): """ Test confirms that if workflow produces no enriched data that no output file is created """ # Create source and destination configurations source = set_workflow_config[1] destination = set_workflow_config[2] test_dir = tmpdir.mkdir("tmp_test_workflow") input_path = str(test_dir.join("person.csv")) input_df.to_csv(input_path, index=False) output_path = str(test_dir.join("output_empty.csv")) source["input_path"] = input_path destination["output_path"] = output_path # Create new workflow with source and destination configurations test_workflow = spy(TestWorkflowImpl( source=source, destination=destination, name="test-workflow-no-data", custom_workflow_param="test_param" )) io_writer = spy(test_workflow._io_writer) # Return empty dataframe when workflow runs when(test_workflow).workflow(...).thenReturn(DataFrame()) # Verify io_writer does not write data verify(io_writer, times=0).write_data(...) # Verify that no output file created. assert not os.path.exists(output_path) def test_benchmark_decorator( tmpdir, mock_env_home, set_workflow_config ): # Dummy function def func(self): return DataFrame() benchmarked_func = Workflow.benchmark(func) source = set_workflow_config[1] destination = set_workflow_config[2] test_dir = tmpdir.mkdir("tmp_test_workflow") input_path = str(test_dir.join("person.csv")) input_df.to_csv(input_path, index=False) output_path = str(test_dir.join("output_benchmark.csv")) source["input_path"] = input_path destination["output_path"] = output_path # Create new workflow with source and destination configurations tb = spy( TestWorkflowImpl(source=source, destination=destination, name="test-workflow") ) benchmarked_func(tb.run_workflow) # Verify that run_workflow was not called, instead expect that benchmark wrapper function will be called verify(tb, times=0).run_workflow(...) def write_config_file(workflow_config, workflow_name): """Helper function to write workflow.yaml configuration file""" workflow_dir = "{0}/.config/clx/{1}".format(dirname, workflow_name) if not os.path.exists(workflow_dir): os.makedirs(workflow_dir) with open(workflow_dir + "/workflow.yaml", "w") as f: yaml.dump(workflow_config, f)
8,619
33.618474
120
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_splunk_alert_workflow.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import pytest from clx.workflow.splunk_alert_workflow import SplunkAlertWorkflow @pytest.mark.parametrize("threshold", [2.0]) @pytest.mark.parametrize("interval", ["day"]) @pytest.mark.parametrize("window", [7]) def test_splunk_alert_workflow(threshold, interval, window): """Tests the splunk alert analysis workflow""" sa_workflow = SplunkAlertWorkflow( "splunk-alert-workflow", threshold=threshold, interval=interval, window=window, raw_data_col_name="raw", ) TEST_DATA = [ '1515699589, search_name="Test Search Name", orig_time="1515699589", info_max_time="1566346500.000000000", info_min_time="1566345300.000000000", info_search_time="1566305689.361160000", message.description="Test Message Description", message.hostname="msg.test.hostname", message.ip="100.100.100.123", message.user_name="user@test.com", severity="info", urgency="medium"', '1515705792, search_name="Test Search Name 2", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", src_ip="10.01.01.123", src_ip="10.01.01.1, count="19", info_max_time="1548772200.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"', '1515867169, search_name="Test Search Name 3", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", count="19", info_max_time="1548234811.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"', '1515943943, search_name="Endpoint - Brute Force against Known User - Rule", orig_source="100.20.2.21", orig_source="FEDEX-MA", orig_source="localhost.com", failure="1104", first="Pattrick", identity="pjame", info_max_time="1546382400.000000000", info_min_time="1546378800.000000000", info_search_time="1546382850.589570000", success="8", user="pjame', '1515983612, search_name="Manual Notable Event - Rule", _time="1554290847", app="SplunkEnterpriseSecuritySuite", creator="test@nvidia.com", info_max_time="+Infinity", info_min_time="0.000", info_search_time="1554290847.423961000", owner="test@nvidia.com", rule_description="FireEye NX alert for Incident Review with Major Severity", rule_title="FireEye NX alert for Incident Review(Majr)", security_domain="endpoint", status="0", urgency="medium"', '1516034744, search_name="Endpoint - FireEye NX alert for Incident Review (Minor) - Rule", category="riskware-callback", dest_ip="10.15.90.150", occurred="Mar 09 2019 02:36:00 UTC", signature="Android.Adware.Batmobil", src_ip="10.15.90.151", dest_port="80", src_port="40472", orig_time="1552098960", info_max_time="1552099380.000000000", info_min_time="1552098780.000000000", info_search_time="1552052094.393543000", severity="minr", src_host="ip-10.5.13.compute.internal"', '1516112793, search_name=\\"Endpoint - Host With Malware Detected (Quarantined or Waived) - Rule\\", count=\\"1\\", dest=\\"TEST-01\\", dest_priority=\\"medium\\", info_max_time=\\"1511389440.000000000\\", info_min_time=\\"1511388840.000000000\\", info_search_time=\\"1511389197.841039000\\", info_sid=\\"rt_scheduler_dGNhcnJvbGxAbnZpZGlhLmNvbQ__SplunkEnterpriseSecuritySuite__RMD5c5145919d43bdffc_at_1511389196_22323\\", lastTime=\\"1511388996.202094\\"', '1516238826, search_name="Test Search Name", orig_time="1516238826", info_max_time="1566346500.000000000", info_min_time="1566345300.000000000", info_search_time="1566305689.361160000", message.description="Test Message Description", message.hostname="msg.test.hostname", message.ip="100.100.100.123", message.user_name="user@test.com", severity="info", urgency="medium"', '1516381833, search_name="Test Search Name 2", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", src_ip="10.01.01.123", src_ip="10.01.01.1, count="19", info_max_time="1548772200.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"', '1516515000, search_name="Test Search Name 3", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", count="19", info_max_time="1548234811.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"', '1516618560, search_name="Endpoint - Brute Force against Known User - Rule", orig_source="100.20.2.21", orig_source="FEDEX-MA", orig_source="localhost.com", failure="1104", first="Pattrick", identity="pjame", info_max_time="1546382400.000000000", info_min_time="1546378800.000000000", info_search_time="1546382850.589570000", success="8", user="pjame', '1516797485, search_name="Manual Notable Event - Rule", _time="1554290847", app="SplunkEnterpriseSecuritySuite", creator="test@nvidia.com", info_max_time="+Infinity", info_min_time="0.000", info_search_time="1554290847.423961000", owner="test@nvidia.com", rule_description="FireEye NX alert for Incident Review with Major Severity", rule_title="FireEye NX alert for Incident Review(Majr)", security_domain="endpoint", status="0", urgency="medium"', '1516988701, search_name="Endpoint - FireEye NX alert for Incident Review (Minor) - Rule", category="riskware-callback", dest_ip="10.15.90.150", occurred="Mar 09 2019 02:36:00 UTC", signature="Android.Adware.Batmobil", src_ip="10.15.90.151", dest_port="80", src_port="40472", orig_time="1552098960", info_max_time="1552099380.000000000", info_min_time="1552098780.000000000", info_search_time="1552052094.393543000", severity="minr", src_host="ip-10.5.13.compute.internal"', '1517106577, search_name=\\"Endpoint - Host With Malware Detected (Quarantined or Waived) - Rule\\", count=\\"1\\", dest=\\"TEST-01\\", dest_priority=\\"medium\\", info_max_time=\\"1511389440.000000000\\", info_min_time=\\"1511388840.000000000\\", info_search_time=\\"1511389197.841039000\\", info_sid=\\"rt_scheduler_dGNhcnJvbGxAbnZpZGlhLmNvbQ__SplunkEnterpriseSecuritySuite__RMD5c5145919d43bdffc_at_1511389196_22323\\", lastTime=\\"1511388996.202094\\"', '1517236429, search_name="Test Search Name", orig_time="1517236429", info_max_time="1566346500.000000000", info_min_time="1566345300.000000000", info_search_time="1566305689.361160000", message.description="Test Message Description", message.hostname="msg.test.hostname", message.ip="100.100.100.123", message.user_name="user@test.com", severity="info", urgency="medium"', '1517304151, search_name="Test Search Name 2", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", src_ip="10.01.01.123", src_ip="10.01.01.1, count="19", info_max_time="1548772200.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"', '1517627976, search_name="Test Search Name 3", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", count="19", info_max_time="1548234811.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"', '1517772505, search_name="Endpoint - Brute Force against Known User - Rule", orig_source="100.20.2.21", orig_source="FEDEX-MA", orig_source="localhost.com", failure="1104", first="Pattrick", identity="pjame", info_max_time="1546382400.000000000", info_min_time="1546378800.000000000", info_search_time="1546382850.589570000", success="8", user="pjame', '1517798946, search_name="Manual Notable Event - Rule", _time="1554290847", app="SplunkEnterpriseSecuritySuite", creator="test@nvidia.com", info_max_time="+Infinity", info_min_time="0.000", info_search_time="1554290847.423961000", owner="test@nvidia.com", rule_description="FireEye NX alert for Incident Review with Major Severity", rule_title="FireEye NX alert for Incident Review(Majr)", security_domain="endpoint", status="0", urgency="medium"', '1517811562, search_name="Endpoint - FireEye NX alert for Incident Review (Minor) - Rule", category="riskware-callback", dest_ip="10.15.90.150", occurred="Mar 09 2019 02:36:00 UTC", signature="Android.Adware.Batmobil", src_ip="10.15.90.151", dest_port="80", src_port="40472", orig_time="1552098960", info_max_time="1552099380.000000000", info_min_time="1552098780.000000000", info_search_time="1552052094.393543000", severity="minr", src_host="ip-10.5.13.compute.internal"', '1518083921, search_name=\\"Endpoint - Host With Malware Detected (Quarantined or Waived) - Rule\\", count=\\"1\\", dest=\\"TEST-01\\", dest_priority=\\"medium\\", info_max_time=\\"1511389440.000000000\\", info_min_time=\\"1511388840.000000000\\", info_search_time=\\"1511389197.841039000\\", info_sid=\\"rt_scheduler_dGNhcnJvbGxAbnZpZGlhLmNvbQ__SplunkEnterpriseSecuritySuite__RMD5c5145919d43bdffc_at_1511389196_22323\\", lastTime=\\"1511388996.202094\\"', '1518119960, search_name="Test Search Name 3", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", count="19", info_max_time="1548234811.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"', '1518157223, search_name="Endpoint - Brute Force against Known User - Rule", orig_source="100.20.2.21", orig_source="FEDEX-MA", orig_source="localhost.com", failure="1104", first="Pattrick", identity="pjame", info_max_time="1546382400.000000000", info_min_time="1546378800.000000000", info_search_time="1546382850.589570000", success="8", user="pjame', '1518261255, search_name="Manual Notable Event - Rule", _time="1554290847", app="SplunkEnterpriseSecuritySuite", creator="test@nvidia.com", info_max_time="+Infinity", info_min_time="0.000", info_search_time="1554290847.423961000", owner="test@nvidia.com", rule_description="FireEye NX alert for Incident Review with Major Severity", rule_title="FireEye NX alert for Incident Review(Majr)", security_domain="endpoint", status="0", urgency="medium"', ] raw_df = cudf.DataFrame({"raw": TEST_DATA}) actual_df = sa_workflow.workflow(raw_df) expected_df = cudf.DataFrame() expected_df["time"] = [ 1517702400, 1516924800, 1517097600, 1517788800, 1517184000, 1517270400, 1517616000 ] expected_df["rule"] = [ "Endpoint - Brute Force against Known User - Rule", "Endpoint - FireEye NX alert for Incident Review (Minor) - Rule", "Endpoint - Host With Malware Detected (Quarantined or Waived) - Rule", "Manual Notable Event - Rule", "Test Search Name", "Test Search Name 2", "Test Search Name 3", ] for col in expected_df.columns: assert expected_df[col].equals(actual_df[col]) @pytest.mark.parametrize("threshold", [1.5, 3.05, 1.0]) @pytest.mark.parametrize("interval", ["hour"]) @pytest.mark.parametrize("window", [24, 48]) def test_splunk_alert_workflow_hour(threshold, interval, window): SplunkAlertWorkflow( "splunk-alert-workflow", threshold=threshold, interval=interval ) @pytest.mark.parametrize("threshold", [2.0]) @pytest.mark.parametrize("interval", ["minute"]) def test_splunk_alert_workflow_min(threshold, interval): with pytest.raises(Exception): SplunkAlertWorkflow( "splunk-alert-workflow", threshold=threshold, interval=interval )
19,130
192.242424
1,358
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_whois.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import datetime import whois from clx.osi.whois import WhoIsLookupClient from mockito import when domains = ["nvidia.com"] datetime_1 = datetime.datetime(2020, 5, 17) datetime_2 = datetime.datetime(2020, 5, 18) client = WhoIsLookupClient() response = { "domain_name": "NVIDIA.COM", "registrar": "Safenames Ltd", "emails": [ "abuse@safenames.net", "wadmpfvzi5ei@idp.email", "hostmaster@safenames.net", ], "updated_date": [datetime_1, datetime_2], } @pytest.mark.parametrize("client", [client]) @pytest.mark.parametrize("domains", [domains]) def test_whois(client, domains): expected_output = [{ "domain_name": "NVIDIA.COM", "registrar": "Safenames Ltd", "emails": "abuse@safenames.net,wadmpfvzi5ei@idp.email,hostmaster@safenames.net", "updated_date": "05-17-2020 00:00:00,05-18-2020 00:00:00", }] when(whois).whois(...).thenReturn(response) actual_output = client.whois(domains) assert actual_output[0]["domain_name"] == "NVIDIA.COM" assert len(actual_output) == len(domains) assert actual_output == expected_output
1,727
31.603774
88
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_port_heuristic.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.heuristics import ports def test_major_ports(): input_addr_col = cudf.Series(["10.0.75.1", "10.0.75.1", "10.0.75.1", "10.0.75.255", "10.110.104.107"]) input_port_col = cudf.Series([137, 137, 7680, 137, 7680]) expected = cudf.DataFrame() expected["addr"] = ["10.0.75.1", "10.0.75.255", "10.110.104.107"] expected["port"] = [137, 137, 7680] expected["service"] = ["netbios-ns", "netbios-ns", "pando-pub"] expected["conns"] = [2, 1, 1] actual = ports.major_ports(input_addr_col, input_port_col) assert actual.equals(expected) def test_major_ports_ephemeral(): input_addr_col = cudf.Series(["10.0.75.1", "10.0.75.2", "10.0.75.3", "10.0.75.4"]) input_port_col = cudf.Series([50000, 60000, 20000, 80]) expected = cudf.DataFrame() expected["addr"] = ["10.0.75.1", "10.0.75.2", "10.0.75.3", "10.0.75.4"] expected["port"] = [50000, 60000, 20000, 80] expected["service"] = ["ephemeral", "ephemeral", "dnp", "http"] expected["conns"] = [1, 1, 1, 1] actual = ports.major_ports(input_addr_col, input_port_col, eph_min=50000) assert actual.equals(expected) def test_major_ports_min_conns(): input_addr_col = cudf.Series(["10.0.75.1", "10.0.75.1", "10.0.75.1", "10.0.75.255", "10.110.104.107"]) input_port_col = cudf.Series([137, 137, 7680, 137, 7680]) expected = cudf.DataFrame() expected["addr"] = ["10.0.75.1"] expected["port"] = [137] expected["service"] = ["netbios-ns"] expected["conns"] = [2] actual = ports.major_ports(input_addr_col, input_port_col, min_conns=2) assert actual.equals(expected) def test_major_ports_all_params(): input_addr_col = cudf.Series(["10.0.75.1", "10.0.75.1", "10.0.75.1", "10.0.75.255", "10.110.104.107", "10.110.104.107"]) input_port_col = cudf.Series([137, 137, 7680, 137, 7680, 7680]) expected = cudf.DataFrame() expected["addr"] = ["10.0.75.1", "10.110.104.107"] expected["port"] = [137, 7680] expected["service"] = ["netbios-ns", "ephemeral"] expected["conns"] = [2, 2] actual = ports.major_ports(input_addr_col, input_port_col, min_conns=2, eph_min=7000) assert actual.equals(expected)
2,786
35.194805
124
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_farsight.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import requests from mockito import when, mock from clx.osi.farsight import FarsightLookupClient ip = "100.0.0.1" server = "https://localhost" apikey = "dummy-api-key" ip_response = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{"count":69435,"time_first":1428433465,"time_last":1538014110,"rrname":"io.","rrtype":"A","rdata":"100.0.0.1"}', }, spec=requests.Response, ) rrset_response = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{"count":81556,"time_first":1374184718,"time_last":1564909243,"rrname":"www.dnsdb.info.","rrtype":"CNAME","bailiwick":"dnsdb.info.","rdata":["dnsdb.info."]}', }, spec=requests.Response, ) rdata_name_response = mock( { "status_code": 200, "raise_for_status": lambda: None, "text": '{"count":497,"time_first":1386638408,"time_last":1561176503,"rrname":"81.64-26.140.160.66.in-addr.arpa.","rrtype":"PTR","rdata":"www.farsightsecurity.com."}', }, spec=requests.Response, ) @pytest.mark.parametrize("server", [server]) @pytest.mark.parametrize("apikey", [apikey]) @pytest.mark.parametrize("ip", [ip]) @pytest.mark.parametrize("ip_response", [ip_response]) def test_query_rdata_ip(server, apikey, ip, ip_response): client = FarsightLookupClient(server, apikey, limit=1) when(requests).get(...).thenReturn(ip_response) result = client.query_rdata_ip(ip) assert len(result) == 1 @pytest.mark.parametrize("server", [server]) @pytest.mark.parametrize("apikey", [apikey]) @pytest.mark.parametrize("ip", [ip]) @pytest.mark.parametrize("ip_response", [ip_response]) def test_query_rdata_ip2(server, apikey, ip, ip_response): client = FarsightLookupClient(server, apikey, limit=1) when(requests).get(...).thenReturn(ip_response) result = client.query_rdata_ip(ip, before=1428433465, after=1538014110) assert len(result) == 1 @pytest.mark.parametrize("server", [server]) @pytest.mark.parametrize("apikey", [apikey]) @pytest.mark.parametrize("rrset_response", [rrset_response]) def test_query_rrset(server, apikey, rrset_response): client = FarsightLookupClient(server, apikey) when(requests).get(...).thenReturn(rrset_response) result = client.query_rrset("www.dnsdb.info") result = result[0] assert "count" in result assert "time_first" in result assert "time_last" in result assert "rrname" in result assert "rrtype" in result assert "bailiwick" in result assert "rdata" in result assert result["bailiwick"] == "dnsdb.info." @pytest.mark.parametrize("server", [server]) @pytest.mark.parametrize("apikey", [apikey]) @pytest.mark.parametrize("rrset_response", [rrset_response]) def test_query_rrset2(server, apikey, rrset_response): client = FarsightLookupClient(server, apikey) when(requests).get(...).thenReturn(rrset_response) result = client.query_rrset( "www.dnsdb.info", rrtype="CNAME", bailiwick="dnsdb.info.", before=1374184718, after=1564909243, ) result = result[0] assert "count" in result assert "time_first" in result assert "time_last" in result assert "rrname" in result assert "rrtype" in result assert "bailiwick" in result assert "rdata" in result assert result["bailiwick"] == "dnsdb.info." @pytest.mark.parametrize("server", [server]) @pytest.mark.parametrize("apikey", [apikey]) @pytest.mark.parametrize("rdata_name_response", [rdata_name_response]) def test_query_rdata_name(server, apikey, rdata_name_response): client = FarsightLookupClient(server, apikey) when(requests).get(...).thenReturn(rdata_name_response) result = client.query_rdata_name("www.farsightsecurity.com") result = result[0] assert "count" in result assert "time_first" in result assert "time_last" in result assert "rrname" in result assert "rrtype" in result assert result["rdata"] == "www.farsightsecurity.com." @pytest.mark.parametrize("server", [server]) @pytest.mark.parametrize("apikey", [apikey]) @pytest.mark.parametrize("rdata_name_response", [rdata_name_response]) def test_query_rdata_name2(server, apikey, rdata_name_response): client = FarsightLookupClient(server, apikey) when(requests).get(...).thenReturn(rdata_name_response) result = client.query_rdata_name( "www.farsightsecurity.com", rrtype="PTR", before=1386638408, after=1561176503 ) result = result[0] assert "count" in result assert "time_first" in result assert "time_last" in result assert "rrname" in result assert "rrtype" in result assert result["rdata"] == "www.farsightsecurity.com."
5,317
34.932432
175
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_zeek.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import cudf import clx.parsers.zeek as zeek def test_parse_log_file(tmpdir): header = "#separator\t\\x09\n\ #set_separator\t,\n\ #empty_field\t(empty)\n\ #unset_field\t-\n\ #path\tconn\n\ #open\t2015-01-24-16-49-04\n\ #fields\tts\tuid\tid.orig_h\tid.orig_p\tid.resp_h\tid.resp_p\tproto\tservice\tduration\torig_bytes\tresp_bytes\tconn_state\tlocal_orig\tmissed_bytes\thistory\torig_pkts\torig_ip_bytes\tresp_pkts\tresp_ip_bytes\ttunnel_parents\n\ #types\ttime\tstring\taddr\tport\taddr\tport\tenum\tstring\tinterval\tcount\tcount\tstring\tbool\tcount\tstring\tcount\tcount\tcount\tcount\tset[string]\n" actual = cudf.DataFrame() actual["ts"] = [1421927450.370337, 1421927658.777193] actual["ts"] = actual["ts"].astype("float64") actual["uid"] = ["CFlyqZgM1g71BYPB6", "CnKVxKIj403JsAK5k"] actual["id.orig_h"] = ["175.45.176.3", "175.45.176.1"] actual["id.orig_p"] = [7177, 24809] actual["id.orig_p"] = actual["id.orig_p"].astype("int64") actual["id.resp_h"] = ["149.171.126.16", "149.171.126.14"] actual["id.resp_p"] = [80, 443] actual["id.resp_p"] = actual["id.resp_p"].astype("int64") actual["proto"] = ["tcp", "tcp"] actual["service"] = ["http", "http"] actual["duration"] = [0.214392, 2.37679] actual["duration"] = actual["duration"].astype("float64") actual["orig_bytes"] = [194, 188] actual["orig_bytes"] = actual["orig_bytes"].astype("int64") actual["resp_bytes"] = [12282, 0] actual["resp_bytes"] = actual["resp_bytes"].astype("int64") actual["conn_state"] = ["SF", "SF"] actual["local_orig"] = [False, False] actual["missed_bytes"] = [12282, 0] actual["missed_bytes"] = actual["missed_bytes"].astype("int64") actual["history"] = ["ShADdFfa", "ShADFfa"] actual["orig_pkts"] = [12, 14] actual["orig_pkts"] = actual["orig_pkts"].astype("int64") actual["orig_ip_bytes"] = [900, 1344] actual["orig_ip_bytes"] = actual["orig_ip_bytes"].astype("int64") actual["resp_pkts"] = [24, 6] actual["resp_pkts"] = actual["resp_pkts"].astype("int64") actual["resp_ip_bytes"] = [25540, 256] actual["resp_ip_bytes"] = actual["resp_ip_bytes"].astype("int64") actual["tunnel_parents"] = ["(empty)", "(empty)"] footer = "#close^I2015-01-24-16-50-35" fname = tmpdir.mkdir("tmp_clx_zeek_test").join("tst_zeek_conn_log.csv") actual.to_csv(fname, sep="\t", index=False, header=False) with open(fname, "r+") as f: content = f.read() f.seek(0, 0) f.write(header + content + footer) parsed = zeek.parse_log_file(fname) assert np.allclose(parsed["ts"].values_host, actual["ts"].values_host) assert parsed["uid"].equals(actual["uid"]) assert parsed["id.orig_h"].equals(actual["id.orig_h"]) assert parsed["id.orig_p"].equals(actual["id.orig_p"]) assert parsed["id.resp_h"].equals(actual["id.resp_h"]) assert parsed["id.resp_p"].equals(actual["id.resp_p"]) assert parsed["proto"].equals(actual["proto"]) assert parsed["service"].equals(actual["service"]) assert np.allclose(parsed["duration"].values_host, actual["duration"].values_host) assert parsed["orig_bytes"].equals(actual["orig_bytes"]) assert parsed["resp_bytes"].equals(actual["resp_bytes"]) assert parsed["conn_state"].equals(actual["conn_state"]) assert parsed["local_orig"].equals(actual["local_orig"]) assert parsed["missed_bytes"].equals(actual["missed_bytes"]) assert parsed["history"].equals(actual["history"]) assert parsed["orig_pkts"].equals(actual["orig_pkts"]) assert parsed["orig_ip_bytes"].equals(actual["orig_ip_bytes"]) assert parsed["resp_pkts"].equals(actual["resp_pkts"]) assert parsed["resp_ip_bytes"].equals(actual["resp_ip_bytes"]) assert parsed["tunnel_parents"].equals(actual["tunnel_parents"])
4,483
45.708333
236
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_multiclass_sequence_classifier.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from os import path import cudf import torch import transformers from cuml.model_selection import train_test_split from faker import Faker from clx.analytics.multiclass_sequence_classifier import MulticlassSequenceClassifier sc = MulticlassSequenceClassifier() if torch.cuda.is_available(): sc.init_model("bert-base-uncased", num_labels=3) def test_train_model(): if torch.cuda.is_available(): fake = Faker() email_col = [fake.text() for _ in range(200)] label_col = [random.randint(0, 2) for _ in range(200)] emails_gdf = cudf.DataFrame(list(zip(email_col, label_col)), columns=["email", "label"]) X_train, X_test, y_train, y_test = train_test_split( emails_gdf, "label", train_size=0.8, random_state=10 ) sc.train_model( X_train["email"], y_train, learning_rate=3e-5, max_seq_len=128, batch_size=6, epochs=1, ) assert isinstance( sc._model.module, transformers.models.bert.modeling_bert.BertForSequenceClassification, ) def test_evaluate_model(): if torch.cuda.is_available(): X_test = cudf.Series(["email 1", "email 2"]) y_test = cudf.Series([0, 0]) accuracy = sc.evaluate_model( X_test, y_test, max_seq_len=128, batch_size=32 ) assert accuracy >= 0.0 and accuracy <= 1.0 def test_predict(): if torch.cuda.is_available(): X_test = cudf.Series(["email 1", "email 2"]) preds = sc.predict(X_test, max_seq_len=128) assert preds.isin([0, 1, 2]).equals(cudf.Series([True, True])) def test_save_model(tmpdir): if torch.cuda.is_available(): sc.save_model(tmpdir) assert path.exists(str(tmpdir.join("config.json"))) assert path.exists(str(tmpdir.join("pytorch_model.bin"))) def test_save_checkpoint(tmpdir): if torch.cuda.is_available(): fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar")) sc.save_checkpoint(fname) assert path.exists(fname) def test_load_checkpoint(tmpdir): if torch.cuda.is_available(): fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar")) sc.save_checkpoint(fname) assert path.exists(fname) sc.load_checkpoint(fname) assert isinstance( sc._model.module, transformers.models.bert.modeling_bert.BertForSequenceClassification, )
3,132
32.329787
96
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_kafka_writer.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import pytest from mockito import when, mock, verify from clx.io.writer.kafka_writer import KafkaWriter input_df = cudf.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) kafka_topic = "publisher_topic_t1" batch_size = 100 delimiter = "," producer = mock() @pytest.mark.parametrize("kafka_topic", [kafka_topic]) @pytest.mark.parametrize("batch_size", [batch_size]) @pytest.mark.parametrize("delimiter", [delimiter]) @pytest.mark.parametrize("producer", [producer]) @pytest.mark.parametrize("input_df", [input_df]) def test_write_data(kafka_topic, batch_size, delimiter, producer, input_df): writer = KafkaWriter(kafka_topic, batch_size, delimiter, producer) when(writer.producer).__len__().thenReturn(1) writer.write_data(input_df) verify(writer.producer, times=3).produce(...)
1,512
34.186047
76
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_cybert.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import cupy import pandas as pd import numpy as np import torch import s3fs import transformers from clx.analytics.cybert import Cybert S3_BASE_PATH = "models.huggingface.co/bert/raykallen/cybert_apache_parser" CONFIG_FILENAME = "config.json" MODEL_FILENAME = "pytorch_model.bin" fs = s3fs.S3FileSystem(anon=True) fs.get(S3_BASE_PATH + "/" + MODEL_FILENAME, MODEL_FILENAME) fs.get(S3_BASE_PATH + "/" + CONFIG_FILENAME, CONFIG_FILENAME) cyparse = Cybert() input_logs = cudf.Series(['109.169.248.247 - -', 'POST /administrator/index.php HTTP/1.1 200 4494']) def get_expected_preprocess(): tokens = torch.tensor( [[11523, 119, 20065, 119, 27672, 119, 26049, 118, 118, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [153, 9025, 1942, 120, 11065, 120, 7448, 119, 185, 16194, 145, 20174, 2101, 120, 122, 119, 122, 2363, 3140, 1580, 1527, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], device='cuda:0' ) masks = torch.tensor( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], device='cuda:0' ) metadata = cupy.array([[0, 0, 8], [1, 0, 20]], dtype='uint32') return tokens, masks, metadata def get_expected_inference(): expected_parsed_df = pd.DataFrame({ 'remote_host': ['109.169.248.247', np.NaN], 'other': ['-', np.NaN], 'request_method': [np.NaN, 'POST'], 'request_url': [np.NaN, "/administrator/index.php"], 'request_http_ver': [np.NaN, 'HTTP/1.1'], 'status': [np.NaN, '200'], 'response_bytes_clf': [np.NaN, '449'] }) expected_confidence_df = pd.DataFrame({ 'remote_host': [0.999628, np.NaN], 'other': [0.999579, np.NaN], 'request_method': [np.NaN, 0.99822], 'request_url': [np.NaN, 0.999629], 'request_http_ver': [np.NaN, 0.999936], 'status': [np.NaN, 0.999866], 'response_bytes_clf': [np.NaN, 0.999751] }) return expected_parsed_df, expected_confidence_df def test_load_model(): cyparse.load_model(MODEL_FILENAME, CONFIG_FILENAME) assert isinstance(cyparse._label_map, dict) assert isinstance(cyparse._model.module, transformers.models.bert.modeling_bert.BertForTokenClassification) def test_preprocess(): expected_tokens, expected_masks, expected_metadata = get_expected_preprocess() actual_tokens, actual_masks, actual_metadata = cyparse.preprocess(input_logs) assert actual_tokens.equal(expected_tokens) assert actual_masks.equal(expected_masks) assert cupy.equal(actual_metadata, expected_metadata).all() def test_inference(): if torch.cuda.is_available(): expected_parsed_df, expected_confidence_df = get_expected_inference() actual_parsed_df, actual_confidence_df = cyparse.inference(input_logs) pd._testing.assert_frame_equal(actual_parsed_df, expected_parsed_df) pd._testing.assert_frame_equal(actual_confidence_df, expected_confidence_df)
5,066
43.447368
88
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_fs_writer.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import pytest from clx.io.writer.fs_writer import FileSystemWriter expected_df = cudf.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) @pytest.mark.parametrize("expected_df", [expected_df]) def test_write_data_csv(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_writer").join("person.csv")) config = { "type": "fs", "output_path": fname, "output_format": "csv", "index": False } writer = FileSystemWriter(config) writer.write_data(expected_df) result_df = cudf.read_csv(fname) assert result_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_write_data_parquet(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_writer").join("person.parquet")) config = { "type": "fs", "output_path": fname, "output_format": "parquet" } writer = FileSystemWriter(config) writer.write_data(expected_df) result_df = cudf.read_parquet(fname) assert result_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_write_data_orc(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_writer").join("person.orc")) config = { "type": "fs", "output_path": fname, "output_format": "orc", } writer = FileSystemWriter(config) writer.write_data(expected_df) result_df = cudf.read_orc(fname) assert result_df.equals(expected_df) @pytest.mark.parametrize("expected_df", [expected_df]) def test_write_data_json(tmpdir, expected_df): fname = str(tmpdir.mkdir("tmp_test_fs_writer").join("person.json")) config = { "type": "fs", "output_path": fname, "output_format": "json", "orient": "records" } writer = FileSystemWriter(config) writer.write_data(expected_df) result_df = cudf.read_json(fname, orient="records") assert result_df.equals(expected_df)
2,667
28.644444
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_splunk_notable_parser.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.parsers.splunk_notable_parser import SplunkNotableParser TEST_DATA = '1566345812.924, search_name="Test Search Name", orig_time="1566345812.924", info_max_time="1566346500.000000000", info_min_time="1566345300.000000000", info_search_time="1566305689.361160000", message.description="Test Message Description", message.hostname="msg.test.hostname", message.ip="100.100.100.123", message.user_name="user@test.com", severity="info", urgency="medium"' TEST_DATA2 = '1548772230, search_name="Test Search Name 2", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", src_ip="10.01.01.123", src_ip="10.01.01.1, count="19", info_max_time="1548772200.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"' TEST_DATA3 = '1548234811, search_name="Test Search Name 3", signature="Android.Adware.Batmobil", signature="Android.Adware.Dlv", signature="Android.PUP.Downloader", src="10.01.01.123", src="10.01.01.1", count="19", info_max_time="1548234811.000000000", info_min_time="1548599400.000000000", info_search_time="1548772206.179561000", info_sid="test-info-sid", lastTime="1548771235", orig_raw="<164>fenotify-113908.warning: CEF:0|FireEye|MPS|1.2.3.123|RC|riskware-callback|1|rt=Jan 29 2019 14:13:55 UTC end=Jan 29 2019 14:13:55 UTC src=10.01.01.123 dest="10.01.01.122" request=http://test.com/test.php cs1Label=sname cs1=Android.PUP.Downloader act=notified dvc=10.01.01.2 dvchost=fireeye.ban2-in smac=1a:2b:3c:4d:5e:6f dmac=1a:2b:3c:4d:5e:7g spt=49458 dpt=80 cn1Label=vlan cn1=0 externalId=123456 devicePayloadId=123abc msg=risk ware detected:57007 proto=tcp cs4Label=link cs4=https://fireeye.test/notification_url/test cs6Label=channel cs6=POST /multiadctrol.php HTTP/1.1::~~Content-type: application/json::~~User-Agent: Dalvik/2.1.0 (Linux; U; Android 8.0.0; SM-G611F Build/R16NW)::~~Host: test.hostname::~~Connection: Keep-Alive::~~Accept-Encoding: gzip::~~Content-Length: 85::~~::~~[{"android_id":"123abc","isnew":0,"m_ch":"123","s_ch":"1","ver_c":"342\\"}] \n\\\\x00", orig_sourcetype="source", src_subnet="12.34.56"' TEST_DATA4 = '1566345700, search_name="Endpoint - Brute Force against Known User - Rule", orig_source="100.20.2.21", orig_source="FEDEX-MA", orig_source="localhost.com", failure="1104", first="Pattrick", identity="pjame", info_max_time="1546382400.000000000", info_min_time="1546378800.000000000", info_search_time="1546382850.589570000", success="8", user="pjame' TEST_DATA5 = '1566345700, search_name="Manual Notable Event - Rule", _time="1554290847", app="SplunkEnterpriseSecuritySuite", creator="test@nvidia.com", info_max_time="+Infinity", info_min_time="0.000", info_search_time="1554290847.423961000", owner="test@nvidia.com", rule_description="FireEye NX alert for Incident Review with Major Severity", rule_title="FireEye NX alert for Incident Review(Majr)", security_domain="endpoint", status="0", urgency="medium"' TEST_DATA6 = '1566345700, search_name="Endpoint - FireEye NX alert for Incident Review (Minor) - Rule", category="riskware-callback", dest_ip="10.15.90.150", occurred="Mar 09 2019 02:36:00 UTC", signature="Android.Adware.Batmobil", src_ip="10.15.90.151", dest_port="80", src_port="40472", orig_time="1552098960", info_max_time="1552099380.000000000", info_min_time="1552098780.000000000", info_search_time="1552052094.393543000", severity="minr", src_host="ip-10.5.13.compute.internal"' TEST_DATA7 = '1566345700, search_name=\\"Endpoint - Host With Malware Detected (Quarantined or Waived) - Rule\\", count=\\"1\\", dest=\\"TEST-01\\", dest_priority=\\"medium\\", info_max_time=\\"1511389440.000000000\\", info_min_time=\\"1511388840.000000000\\", info_search_time=\\"1511389197.841039000\\", info_sid=\\"rt_scheduler_dGNhcnJvbGxAbnZpZGlhLmNvbQ__SplunkEnterpriseSecuritySuite__RMD5c5145919d43bdffc_at_1511389196_22323\\", lastTime=\\"1511388996.202094\\"' def test_splunk_notable_parser(): """Test splunk notable parsing""" snp = SplunkNotableParser() test_input_df = cudf.DataFrame() raw_colname = "_raw" test_input_df[raw_colname] = [TEST_DATA] test_output_df = snp.parse(test_input_df, raw_colname) assert len(test_output_df.columns) == 23 assert test_output_df["time"][0] == "1566345812.924" assert test_output_df["search_name"][0] == "Test Search Name" assert test_output_df["orig_time"][0] == "1566345812.924" assert test_output_df["urgency"][0] == "medium" assert test_output_df["user"][0] == "" assert test_output_df["owner"][0] == "" assert test_output_df["security_domain"][0] == "" assert test_output_df["severity"][0] == "info" assert test_output_df["src_ip"][0] == "" assert test_output_df["src_mac"][0] == "" assert test_output_df["src_port"][0] == "" assert test_output_df["dest_ip"][0] == "" assert test_output_df["dest_port"][0] == "" assert test_output_df["dest_mac"][0] == "" assert test_output_df["dest_priority"][0] == "" assert test_output_df["device_name"][0] == "" assert test_output_df["event_name"][0] == "" assert test_output_df["event_type"][0] == "" assert test_output_df["ip_address"][0] == "" assert test_output_df["message_ip"][0] == "100.100.100.123" assert test_output_df["message_username"][0] == "user@test.com" assert test_output_df["message_hostname"][0] == "msg.test.hostname" assert test_output_df["message_description"][0] == "Test Message Description" test_input_df2 = cudf.DataFrame() test_input_df2[raw_colname] = [TEST_DATA2] test_output_df2 = snp.parse(test_input_df2, raw_colname) assert len(test_output_df2.columns) == 23 assert test_output_df2["time"][0] == "1548772230" assert test_output_df2["search_name"][0] == "Test Search Name 2" assert test_output_df2["orig_time"][0] == "" assert test_output_df2["urgency"][0] == "" assert test_output_df2["user"][0] == "" assert test_output_df2["owner"][0] == "" assert test_output_df2["security_domain"][0] == "" assert test_output_df2["severity"][0] == "" assert test_output_df2["src_ip"][0] == "10.01.01.123" assert test_output_df2["src_mac"][0] == "1a:2b:3c:4d:5e:6f" assert test_output_df2["src_port"][0] == "" # dest_ip is obtained from dest attribute. Since data doesn't have dest_ip. assert test_output_df2["dest_ip"][0] == "10.01.01.122" assert test_output_df2["dest_mac"][0] == "1a:2b:3c:4d:5e:7g" assert test_output_df2["dest_port"][0] == "" assert test_output_df2["dest_priority"][0] == "" assert test_output_df2["device_name"][0] == "" assert test_output_df2["event_name"][0] == "" assert test_output_df2["event_type"][0] == "" assert test_output_df2["ip_address"][0] == "" assert test_output_df2["message_ip"][0] == "" assert test_output_df2["message_username"][0] == "" assert test_output_df2["message_hostname"][0] == "" assert test_output_df2["message_description"][0] == "" test_input_df3 = cudf.DataFrame() test_input_df3[raw_colname] = [TEST_DATA3] test_output_df3 = snp.parse(test_input_df3, raw_colname) assert len(test_output_df3.columns) == 23 assert test_output_df3["time"][0] == "1548234811" assert test_output_df3["search_name"][0] == "Test Search Name 3" assert test_output_df3["orig_time"][0] == "" assert test_output_df3["urgency"][0] == "" assert test_output_df3["user"][0] == "" assert test_output_df3["owner"][0] == "" assert test_output_df3["security_domain"][0] == "" assert test_output_df3["severity"][0] == "" # src_ip is obtained from src attribute. Since data doesn't have src_ip. assert test_output_df3["src_ip"][0] == "10.01.01.123" assert test_output_df3["src_mac"][0] == "1a:2b:3c:4d:5e:6f" assert test_output_df3["src_port"][0] == "" # dest_ip is obtained from dest attribute. Since data doesn't have dest_ip. assert test_output_df3["dest_ip"][0] == "10.01.01.122" assert test_output_df3["dest_mac"][0] == "1a:2b:3c:4d:5e:7g" assert test_output_df3["dest_port"][0] == "" assert test_output_df3["dest_priority"][0] == "" assert test_output_df3["device_name"][0] == "" assert test_output_df3["event_name"][0] == "" assert test_output_df3["event_type"][0] == "" assert test_output_df3["ip_address"][0] == "" assert test_output_df3["message_ip"][0] == "" assert test_output_df3["message_username"][0] == "" assert test_output_df3["message_hostname"][0] == "" assert test_output_df3["message_description"][0] == "" test_input_df4 = cudf.DataFrame() test_input_df4[raw_colname] = [TEST_DATA4] test_output_df4 = snp.parse(test_input_df4, raw_colname) assert len(test_output_df4.columns) == 23 assert test_output_df4["time"][0] == "1566345700" assert ( test_output_df4["search_name"][0] == "Endpoint - Brute Force against Known User - Rule" ) assert test_output_df4["orig_time"][0] == "" assert test_output_df4["urgency"][0] == "" assert test_output_df4["user"][0] == "pjame" assert test_output_df4["owner"][0] == "" assert test_output_df4["security_domain"][0] == "" assert test_output_df4["severity"][0] == "" assert test_output_df4["src_ip"][0] == "" assert test_output_df4["src_mac"][0] == "" assert test_output_df4["src_port"][0] == "" assert test_output_df4["dest_ip"][0] == "" assert test_output_df4["dest_mac"][0] == "" assert test_output_df4["dest_port"][0] == "" assert test_output_df4["dest_priority"][0] == "" assert test_output_df4["device_name"][0] == "" assert test_output_df4["event_name"][0] == "" assert test_output_df4["event_type"][0] == "" assert test_output_df4["ip_address"][0] == "" assert test_output_df4["message_ip"][0] == "" assert test_output_df4["message_username"][0] == "" assert test_output_df4["message_hostname"][0] == "" assert test_output_df4["message_description"][0] == "" test_input_df5 = cudf.DataFrame() test_input_df5[raw_colname] = [TEST_DATA5] test_output_df5 = snp.parse(test_input_df5, raw_colname) assert len(test_output_df5.columns) == 23 assert test_output_df5["time"][0] == "1566345700" assert test_output_df5["search_name"][0] == "Manual Notable Event - Rule" assert test_output_df5["orig_time"][0] == "" assert test_output_df5["urgency"][0] == "medium" assert test_output_df5["user"][0] == "" assert test_output_df5["owner"][0] == "test@nvidia.com" assert test_output_df5["security_domain"][0] == "endpoint" assert test_output_df5["severity"][0] == "" assert test_output_df5["src_ip"][0] == "" assert test_output_df5["src_mac"][0] == "" assert test_output_df5["src_port"][0] == "" assert test_output_df5["dest_ip"][0] == "" assert test_output_df5["dest_mac"][0] == "" assert test_output_df5["dest_port"][0] == "" assert test_output_df5["dest_priority"][0] == "" assert test_output_df5["device_name"][0] == "" assert test_output_df5["event_name"][0] == "" assert test_output_df5["event_type"][0] == "" assert test_output_df5["ip_address"][0] == "" assert test_output_df5["message_ip"][0] == "" assert test_output_df5["message_username"][0] == "" assert test_output_df5["message_hostname"][0] == "" assert test_output_df5["message_description"][0] == "" test_input_df6 = cudf.DataFrame() test_input_df6[raw_colname] = [TEST_DATA6] test_output_df6 = snp.parse(test_input_df6, raw_colname) assert len(test_output_df6.columns) == 23 assert test_output_df6["time"][0] == "1566345700" assert ( test_output_df6["search_name"][0] == "Endpoint - FireEye NX alert for Incident Review (Minor) - Rule" ) assert test_output_df6["orig_time"][0] == "" assert test_output_df6["urgency"][0] == "" assert test_output_df6["user"][0] == "" assert test_output_df6["owner"][0] == "" assert test_output_df6["security_domain"][0] == "" assert test_output_df6["severity"][0] == "minr" assert test_output_df6["src_ip"][0] == "10.15.90.151" assert test_output_df6["src_mac"][0] == "" assert test_output_df6["src_port"][0] == "40472" assert test_output_df6["dest_ip"][0] == "10.15.90.150" assert test_output_df6["dest_mac"][0] == "" assert test_output_df6["dest_port"][0] == "80" assert test_output_df6["dest_priority"][0] == "" assert test_output_df6["device_name"][0] == "" assert test_output_df6["event_name"][0] == "" assert test_output_df6["event_type"][0] == "" assert test_output_df6["ip_address"][0] == "" assert test_output_df6["message_ip"][0] == "" assert test_output_df6["message_username"][0] == "" assert test_output_df6["message_hostname"][0] == "" assert test_output_df6["message_description"][0] == "" test_input_df7 = cudf.DataFrame() test_input_df7[raw_colname] = [TEST_DATA7] test_output_df7 = snp.parse(test_input_df7, raw_colname) assert len(test_output_df7.columns) == 23 assert test_output_df7["time"][0] == "1566345700" assert ( test_output_df7["search_name"][0] == "Endpoint - Host With Malware Detected (Quarantined or Waived) - Rule" ) assert test_output_df7["orig_time"][0] == "" assert test_output_df7["urgency"][0] == "" assert test_output_df7["user"][0] == "" assert test_output_df7["owner"][0] == "" assert test_output_df7["security_domain"][0] == "" assert test_output_df7["severity"][0] == "" assert test_output_df7["src_ip"][0] == "" assert test_output_df7["src_mac"][0] == "" assert test_output_df7["src_port"][0] == "" assert test_output_df7["dest_ip"][0] == "TEST-01" assert test_output_df7["dest_mac"][0] == "" assert test_output_df7["dest_port"][0] == "" assert test_output_df7["dest_priority"][0] == "medium" assert test_output_df7["device_name"][0] == "" assert test_output_df7["event_name"][0] == "" assert test_output_df7["event_type"][0] == "" assert test_output_df7["ip_address"][0] == "" assert test_output_df7["message_ip"][0] == "" assert test_output_df7["message_username"][0] == "" assert test_output_df7["message_hostname"][0] == "" assert test_output_df7["message_description"][0] == ""
15,887
66.037975
1,362
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_dataloader.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.utils.data.dataset import Dataset from clx.utils.data.dataloader import DataLoader test_batchsize = 2 test_df = cudf.DataFrame( { "domain": [ "studytour.com.tw", "cnn.com", "bakercityherald.com", "bankmobile.com", ], "type": [1, 1, 0, 1], } ) expected_part_df1 = cudf.DataFrame( { "domain": [ "studytour.com.tw", "cnn.com", ], "type": [1, 1], } ) expected_part_df2 = cudf.DataFrame( { "domain": [ "bakercityherald.com", "bankmobile.com", ], "type": [0, 1], } ) dataset = Dataset(test_df) dataloader = DataLoader(dataset, batchsize=test_batchsize) def test_get_chunks(): df_parts = [] for df_part in dataloader.get_chunks(): df_parts.append(df_part) assert len(df_parts) == 2 assert df_parts[0].reset_index(drop=True).equals(expected_part_df1) assert df_parts[1].reset_index(drop=True).equals(expected_part_df2)
1,644
26.416667
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_stats.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import clx.analytics.stats import cudf import cupy as cp def test_rzscore(): sequence = [ 3, 4, 5, 6, 1, 10, 34, 2, 1, 11, 45, 34, 2, 9, 19, 43, 24, 13, 23, 10, 98, 84, 10, ] series = cudf.Series(sequence) zscores_df = cudf.DataFrame() zscores_df["zscore"] = clx.analytics.stats.rzscore(series, 7) expected_zscores_arr = [ float(0), float(0), float(0), float(0), float(0), float(0), 2.374423424, -0.645941275, -0.683973734, 0.158832461, 1.847751909, 0.880026019, -0.950835449, -0.360593742, 0.111407599, 1.228914145, -0.074966331, -0.570321249, 0.327849973, -0.934372308, 2.296828498, 1.282966989, -0.795223674, ] expected_zscores_df = cudf.DataFrame() expected_zscores_df["zscore"] = expected_zscores_arr # Check that columns are equal zscores_df["zscore"] = zscores_df["zscore"].fillna(0) assert cp.allclose(expected_zscores_df["zscore"], zscores_df["zscore"])
1,871
22.4
75
py
clx-branch-23.04
clx-branch-23.04/python/clx/tests/test_utils.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf from clx.utils.data import utils test_domains_len = 2 test_input_df = cudf.DataFrame( {"domain": ["studytour.com.tw", "cnn.com"], "type": [1, 1]} ) expected_output_df = cudf.DataFrame( { 0: [115, 99], 1: [116, 110], 2: [117, 110], 3: [100, 46], 4: [121, 99], 5: [116, 111], 6: [111, 109], 7: [117, 0], 8: [114, 0], 9: [46, 0], 10: [99, 0], 11: [111, 0], 12: [109, 0], 13: [46, 0], 14: [116, 0], 15: [119, 0], "len": [16, 7] }, dtype="int32" ) expected_output_df["type"] = [1, 1] expected_output_df["domain"] = ["studytour.com.tw", "cnn.com"] def test_str2ascii(): actual_output_df = utils.str2ascii(test_input_df, 'domain') assert actual_output_df.equals(expected_output_df)
1,439
27.235294
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/analytics/dga_dataset.py
# Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from clx.utils.data.dataset import Dataset from clx.utils.data import utils log = logging.getLogger(__name__) class DGADataset(Dataset): """Constructor to create DGADataset instance. :param df: Input dataframe. :type df: cudf.DataFrame :param truncate: Truncate string to n number of characters. :type truncate: int """ def __init__(self, df, truncate): df = self.__preprocess(df, truncate) super().__init__(df) def __preprocess(self, df, truncate): df['domain'] = df['domain'].str.slice_replace(truncate, repl='') df = utils.str2ascii(df, 'domain') return df
1,258
31.282051
74
py
clx-branch-23.04
clx-branch-23.04/python/clx/analytics/asset_classification.py
import cudf from cuml.model_selection import train_test_split import torch import torch.optim as torch_optim import torch.nn.functional as F import logging from torch.utils.dlpack import from_dlpack from clx.analytics.model.tabular_model import TabularModel log = logging.getLogger(__name__) class AssetClassification: """ Supervised asset classification on tabular data containing categorical and/or continuous features. :param layers: linear layer follow the input layer :param drops: drop out percentage :param emb_drop: drop out percentage at embedding layers :param is_reg: is regression :param is_multi: is classification :param use_bn: use batch normalization """ def __init__(self, layers=[200, 100], drops=[0.001, 0.01], emb_drop=0.04, is_reg=False, is_multi=True, use_bn=True): self._layers = layers self._drops = drops self._emb_drop = emb_drop self._is_reg = is_reg self._is_multi = is_multi self._use_bn = use_bn self._device = None self._model = None self._optimizer = None self._cat_cols = None self._cont_cols = None self._device = torch.device('cuda') def train_model(self, train_gdf, cat_cols, cont_cols, label_col, batch_size, epochs, lr=0.01, wd=0.0): """ This function is used for training fastai tabular model with a given training dataset. :param train_gdf: training dataset with categorized and/or continuous feature columns :type train_gdf: cudf.DataFrame :param cat_cols: array of categorical column names in train_gdf :type label_col: array :param cont_col: array of continuous column names in train_gdf :type label_col: array :param label_col: column name of label column in train_gdf :type label_col: str :param batch_size: train_gdf will be partitioned into multiple dataframes of this size :type batch_size: int :param epochs: number of epochs to be adjusted depending on convergence for a specific dataset :type epochs: int :param lr: learning rate :type lr: float :param wd: wd :type wd: float Examples -------- >>> from clx.analytics.asset_classification import AssetClassification >>> ac = AssetClassification() >>> cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9"] >>> cont_cols = ["10"] >>> ac.train_model(X_train, cat_cols, cont_cols, "label", batch_size, epochs, lr=0.01, wd=0.0) """ self._cat_cols = cat_cols self._cont_cols = cont_cols # train/test split X, val_X, Y, val_Y = train_test_split(train_gdf, label_col, train_size=0.9) val_X.index = val_Y.index X.index = Y.index embedded_cols = {} for col in cat_cols: if col != label_col: categories_cnt = X[col].max() + 2 if categories_cnt > 1: embedded_cols[col] = categories_cnt X[label_col] = Y val_X[label_col] = val_Y # Embedding embedding_sizes = [(n_categories, min(100, (n_categories + 1) // 2)) for _, n_categories in embedded_cols.items()] n_cont = len(cont_cols) out_sz = train_gdf[label_col].nunique() # Partition dataframes train_part_dfs = self._get_partitioned_dfs(X, batch_size) val_part_dfs = self._get_partitioned_dfs(val_X, batch_size) self._model = TabularModel(embedding_sizes, n_cont, out_sz, self._layers, self._drops, self._emb_drop, self._is_reg, self._is_multi, self._use_bn) self._to_device(self._model, self._device) self._config_optimizer() for i in range(epochs): loss = self._train(self._model, self._optimizer, train_part_dfs, cat_cols, cont_cols, label_col) print("training loss: ", loss) self._val_loss(self._model, val_part_dfs, cat_cols, cont_cols, label_col) def predict(self, gdf, cat_cols, cont_cols): """ Predict the class with the trained model :param gdf: prediction input dataset with categorized int16 feature columns :type gdf: cudf.DataFrame :param cat_cols: array of categorical column names in gdf :type label_col: array :param cont_col: array of continuous column names in gdf :type label_col: array Examples -------- >>> cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9"] >>> cont_cols = ["10"] >>> ac.predict(X_test, cat_cols, cont_cols).values_host 0 0 1 0 2 0 3 0 4 2 .. 8204 0 8205 4 8206 0 8207 3 8208 0 Length: 8209, dtype: int64 """ cat_set = torch.zeros(0, 0) xb_cont_tensor = torch.zeros(0, 0) if cat_cols: cat_set = gdf[self._cat_cols].to_dlpack() cat_set = from_dlpack(cat_set).long() if cont_cols: xb_cont_tensor = gdf[self._cont_cols].to_dlpack() xb_cont_tensor = from_dlpack(xb_cont_tensor).float() out = self._model(cat_set, xb_cont_tensor) preds = torch.max(out, 1)[1].view(-1).tolist() return cudf.Series(preds) def save_model(self, fname): """ Save trained model :param save_to_path: directory path to save model :type save_to_path: str Examples -------- >>> from clx.analytics.asset_classification import AssetClassification >>> ac = AssetClassification() >>> cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9"] >>> cont_cols = ["10"] >>> ac.train_model(X_train, cat_cols, cont_cols, "label", batch_size, epochs, lr=0.01, wd=0.0) >>> ac.save_model("ac.mdl") """ torch.save(self._model, fname) def load_model(self, fname): """ Load a saved model. :param fname: directory path to model :type fname: str Examples -------- >>> from clx.analytics.asset_classification import AssetClassification >>> ac = AssetClassification() >>> ac.load_model("ac.mdl") """ self._model = torch.load(fname) def _config_optimizer(self, lr=0.001, wd=0.0): parameters = filter(lambda p: p.requires_grad, self._model.parameters()) self._optimizer = torch_optim.Adam(parameters, lr=lr, weight_decay=wd) def _get_partitioned_dfs(self, df, batch_size): dataset_len = df.shape[0] prev_chunk_offset = 0 partitioned_dfs = [] while prev_chunk_offset < dataset_len: curr_chunk_offset = prev_chunk_offset + batch_size chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1] partitioned_dfs.append(chunk) prev_chunk_offset = curr_chunk_offset return partitioned_dfs def _train(self, model, optim, dfs, cat_cols, cont_cols, label_col): self._model.train() total = 0 sum_loss = 0 cat_set = torch.zeros(0, 0) xb_cont_tensor = torch.zeros(0, 0) for df in dfs: batch = df.shape[0] if cat_cols: cat_set = df[cat_cols].to_dlpack() cat_set = from_dlpack(cat_set).long() if cont_cols: xb_cont_tensor = df[cont_cols].to_dlpack() xb_cont_tensor = from_dlpack(xb_cont_tensor).float() output = self._model(cat_set, xb_cont_tensor) train_label = df[label_col].to_dlpack() train_label = from_dlpack(train_label).long() loss = F.cross_entropy(output, train_label) optim.zero_grad() loss.backward() optim.step() total += batch sum_loss += batch * (loss.item()) return sum_loss / total def _val_loss(self, model, dfs, cat_cols, cont_cols, label_col): self._model.eval() total = 0 sum_loss = 0 correct = 0 val_set = torch.zeros(0, 0) xb_cont_tensor = torch.zeros(0, 0) for df in dfs: current_batch_size = df.shape[0] if cat_cols: val_set = df[cat_cols].to_dlpack() val_set = from_dlpack(val_set).long() if cont_cols: xb_cont_tensor = df[cont_cols].to_dlpack() xb_cont_tensor = from_dlpack(xb_cont_tensor).float() out = self._model(val_set, xb_cont_tensor) val_label = df[label_col].to_dlpack() val_label = from_dlpack(val_label).long() loss = F.cross_entropy(out, val_label) sum_loss += current_batch_size * (loss.item()) total += current_batch_size pred = torch.max(out, 1)[1] correct += (pred == val_label).float().sum().item() print("valid loss %.3f and accuracy %.3f" % (sum_loss / total, correct / total)) return sum_loss / total, correct / total def _to_device(self, data, device): """Move tensor(s) to chosen device""" if isinstance(data, (list, tuple)): return [self._to_device(x, device) for x in data] return data.to(device, non_blocking=True)
9,402
34.217228
154
py
clx-branch-23.04
clx-branch-23.04/python/clx/analytics/binary_sequence_classifier.py
import logging import cudf from cudf.core.subword_tokenizer import SubwordTokenizer import torch import torch.nn as nn from torch.utils.dlpack import to_dlpack from clx.analytics.sequence_classifier import SequenceClassifier from clx.utils.data.dataloader import DataLoader from clx.utils.data.dataset import Dataset from transformers import AutoModelForSequenceClassification log = logging.getLogger(__name__) class BinarySequenceClassifier(SequenceClassifier): """ Sequence Classifier using BERT. This class provides methods for training/loading BERT models, evaluation and prediction. """ def init_model(self, model_or_path): """ Load model from huggingface or locally saved model. :param model_or_path: huggingface pretrained model name or directory path to model :type model_or_path: str Examples -------- >>> from clx.analytics.binary_sequence_classifier import BinarySequenceClassifier >>> sc = BinarySequenceClassifier() >>> sc.init_model("bert-base-uncased") # huggingface pre-trained model >>> sc.init_model(model_path) # locally saved model """ self._model = AutoModelForSequenceClassification.from_pretrained(model_or_path) if torch.cuda.is_available(): self._device = torch.device("cuda") self._model.cuda() self._model = nn.DataParallel(self._model) else: self._device = torch.device("cpu") self._tokenizer = SubwordTokenizer(self._hashpath, do_lower_case=True) def predict(self, input_data, max_seq_len=128, batch_size=32, threshold=0.5): """ Predict the class with the trained model :param input_data: input text data for prediction :type input_data: cudf.Series :param max_seq_len: Limits the length of the sequence returned by tokenizer. If tokenized sentence is shorter than max_seq_len, output will be padded with 0s. If the tokenized sentence is longer than max_seq_len it will be truncated to max_seq_len. :type max_seq_len: int :param batch_size: batch size :type batch_size: int :param threshold: results with probabilities higher than this will be labeled as positive :type threshold: float :return: predictions, probabilities: predictions are labels (0 or 1) based on minimum threshold :rtype: cudf.Series, cudf.Series Examples -------- >>> from cuml.preprocessing.model_selection import train_test_split >>> emails_train, emails_test, labels_train, labels_test = train_test_split(train_emails_df, 'label', train_size=0.8) >>> sc.train_model(emails_train, labels_train) >>> predictions = sc.predict(emails_test, threshold=0.8) """ predict_gdf = cudf.DataFrame() predict_gdf["text"] = input_data predict_dataset = Dataset(predict_gdf) predict_dataloader = DataLoader(predict_dataset, batchsize=batch_size) preds_l = [] probs_l = [] self._model.eval() for df in predict_dataloader.get_chunks(): b_input_ids, b_input_mask = self._bert_uncased_tokenize(df["text"], max_seq_len) with torch.no_grad(): logits = self._model( b_input_ids, token_type_ids=None, attention_mask=b_input_mask )[0] b_probs = torch.sigmoid(logits[:, 1]) b_preds = b_probs.ge(threshold).type(torch.int8) b_probs = cudf.io.from_dlpack(to_dlpack(b_probs)) b_preds = cudf.io.from_dlpack(to_dlpack(b_preds)).astype("boolean") preds_l.append(b_preds) probs_l.append(b_probs) preds = cudf.concat(preds_l) probs = cudf.concat(probs_l) return preds, probs
3,848
37.878788
256
py